[6/77] Make GET_MODE_WIDER return an opt_mode
[official-gcc.git] / gcc / config / ia64 / ia64.c
blob720c16bd0036e8cd02c5058bbf9e728018732782
1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999-2017 Free Software Foundation, Inc.
3 Contributed by James E. Wilson <wilson@cygnus.com> and
4 David Mosberger <davidm@hpl.hp.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "memmodel.h"
30 #include "cfghooks.h"
31 #include "df.h"
32 #include "tm_p.h"
33 #include "stringpool.h"
34 #include "attribs.h"
35 #include "optabs.h"
36 #include "regs.h"
37 #include "emit-rtl.h"
38 #include "recog.h"
39 #include "diagnostic-core.h"
40 #include "alias.h"
41 #include "fold-const.h"
42 #include "stor-layout.h"
43 #include "calls.h"
44 #include "varasm.h"
45 #include "output.h"
46 #include "insn-attr.h"
47 #include "flags.h"
48 #include "explow.h"
49 #include "expr.h"
50 #include "cfgrtl.h"
51 #include "libfuncs.h"
52 #include "sched-int.h"
53 #include "common/common-target.h"
54 #include "langhooks.h"
55 #include "gimplify.h"
56 #include "intl.h"
57 #include "debug.h"
58 #include "params.h"
59 #include "dbgcnt.h"
60 #include "tm-constrs.h"
61 #include "sel-sched.h"
62 #include "reload.h"
63 #include "opts.h"
64 #include "dumpfile.h"
65 #include "builtins.h"
67 /* This file should be included last. */
68 #include "target-def.h"
70 /* This is used for communication between ASM_OUTPUT_LABEL and
71 ASM_OUTPUT_LABELREF. */
72 int ia64_asm_output_label = 0;
74 /* Register names for ia64_expand_prologue. */
75 static const char * const ia64_reg_numbers[96] =
76 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
77 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
78 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
79 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
80 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
81 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
82 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
83 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
84 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
85 "r104","r105","r106","r107","r108","r109","r110","r111",
86 "r112","r113","r114","r115","r116","r117","r118","r119",
87 "r120","r121","r122","r123","r124","r125","r126","r127"};
89 /* ??? These strings could be shared with REGISTER_NAMES. */
90 static const char * const ia64_input_reg_names[8] =
91 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
93 /* ??? These strings could be shared with REGISTER_NAMES. */
94 static const char * const ia64_local_reg_names[80] =
95 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
96 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
97 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
98 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
99 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
100 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
101 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
102 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
103 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
104 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
106 /* ??? These strings could be shared with REGISTER_NAMES. */
107 static const char * const ia64_output_reg_names[8] =
108 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
110 /* Variables which are this size or smaller are put in the sdata/sbss
111 sections. */
113 unsigned int ia64_section_threshold;
115 /* The following variable is used by the DFA insn scheduler. The value is
116 TRUE if we do insn bundling instead of insn scheduling. */
117 int bundling_p = 0;
119 enum ia64_frame_regs
121 reg_fp,
122 reg_save_b0,
123 reg_save_pr,
124 reg_save_ar_pfs,
125 reg_save_ar_unat,
126 reg_save_ar_lc,
127 reg_save_gp,
128 number_of_ia64_frame_regs
131 /* Structure to be filled in by ia64_compute_frame_size with register
132 save masks and offsets for the current function. */
134 struct ia64_frame_info
136 HOST_WIDE_INT total_size; /* size of the stack frame, not including
137 the caller's scratch area. */
138 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
139 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
140 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
141 HARD_REG_SET mask; /* mask of saved registers. */
142 unsigned int gr_used_mask; /* mask of registers in use as gr spill
143 registers or long-term scratches. */
144 int n_spilled; /* number of spilled registers. */
145 int r[number_of_ia64_frame_regs]; /* Frame related registers. */
146 int n_input_regs; /* number of input registers used. */
147 int n_local_regs; /* number of local registers used. */
148 int n_output_regs; /* number of output registers used. */
149 int n_rotate_regs; /* number of rotating registers used. */
151 char need_regstk; /* true if a .regstk directive needed. */
152 char initialized; /* true if the data is finalized. */
155 /* Current frame information calculated by ia64_compute_frame_size. */
156 static struct ia64_frame_info current_frame_info;
157 /* The actual registers that are emitted. */
158 static int emitted_frame_related_regs[number_of_ia64_frame_regs];
160 static int ia64_first_cycle_multipass_dfa_lookahead (void);
161 static void ia64_dependencies_evaluation_hook (rtx_insn *, rtx_insn *);
162 static void ia64_init_dfa_pre_cycle_insn (void);
163 static rtx ia64_dfa_pre_cycle_insn (void);
164 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx_insn *, int);
165 static int ia64_dfa_new_cycle (FILE *, int, rtx_insn *, int, int, int *);
166 static void ia64_h_i_d_extended (void);
167 static void * ia64_alloc_sched_context (void);
168 static void ia64_init_sched_context (void *, bool);
169 static void ia64_set_sched_context (void *);
170 static void ia64_clear_sched_context (void *);
171 static void ia64_free_sched_context (void *);
172 static int ia64_mode_to_int (machine_mode);
173 static void ia64_set_sched_flags (spec_info_t);
174 static ds_t ia64_get_insn_spec_ds (rtx_insn *);
175 static ds_t ia64_get_insn_checked_ds (rtx_insn *);
176 static bool ia64_skip_rtx_p (const_rtx);
177 static int ia64_speculate_insn (rtx_insn *, ds_t, rtx *);
178 static bool ia64_needs_block_p (ds_t);
179 static rtx ia64_gen_spec_check (rtx_insn *, rtx_insn *, ds_t);
180 static int ia64_spec_check_p (rtx);
181 static int ia64_spec_check_src_p (rtx);
182 static rtx gen_tls_get_addr (void);
183 static rtx gen_thread_pointer (void);
184 static int find_gr_spill (enum ia64_frame_regs, int);
185 static int next_scratch_gr_reg (void);
186 static void mark_reg_gr_used_mask (rtx, void *);
187 static void ia64_compute_frame_size (HOST_WIDE_INT);
188 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
189 static void finish_spill_pointers (void);
190 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
191 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
192 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
193 static rtx gen_movdi_x (rtx, rtx, rtx);
194 static rtx gen_fr_spill_x (rtx, rtx, rtx);
195 static rtx gen_fr_restore_x (rtx, rtx, rtx);
197 static void ia64_option_override (void);
198 static bool ia64_can_eliminate (const int, const int);
199 static machine_mode hfa_element_mode (const_tree, bool);
200 static void ia64_setup_incoming_varargs (cumulative_args_t, machine_mode,
201 tree, int *, int);
202 static int ia64_arg_partial_bytes (cumulative_args_t, machine_mode,
203 tree, bool);
204 static rtx ia64_function_arg_1 (cumulative_args_t, machine_mode,
205 const_tree, bool, bool);
206 static rtx ia64_function_arg (cumulative_args_t, machine_mode,
207 const_tree, bool);
208 static rtx ia64_function_incoming_arg (cumulative_args_t,
209 machine_mode, const_tree, bool);
210 static void ia64_function_arg_advance (cumulative_args_t, machine_mode,
211 const_tree, bool);
212 static unsigned int ia64_function_arg_boundary (machine_mode,
213 const_tree);
214 static bool ia64_function_ok_for_sibcall (tree, tree);
215 static bool ia64_return_in_memory (const_tree, const_tree);
216 static rtx ia64_function_value (const_tree, const_tree, bool);
217 static rtx ia64_libcall_value (machine_mode, const_rtx);
218 static bool ia64_function_value_regno_p (const unsigned int);
219 static int ia64_register_move_cost (machine_mode, reg_class_t,
220 reg_class_t);
221 static int ia64_memory_move_cost (machine_mode mode, reg_class_t,
222 bool);
223 static bool ia64_rtx_costs (rtx, machine_mode, int, int, int *, bool);
224 static int ia64_unspec_may_trap_p (const_rtx, unsigned);
225 static void fix_range (const char *);
226 static struct machine_function * ia64_init_machine_status (void);
227 static void emit_insn_group_barriers (FILE *);
228 static void emit_all_insn_group_barriers (FILE *);
229 static void final_emit_insn_group_barriers (FILE *);
230 static void emit_predicate_relation_info (void);
231 static void ia64_reorg (void);
232 static bool ia64_in_small_data_p (const_tree);
233 static void process_epilogue (FILE *, rtx, bool, bool);
235 static bool ia64_assemble_integer (rtx, unsigned int, int);
236 static void ia64_output_function_prologue (FILE *);
237 static void ia64_output_function_epilogue (FILE *);
238 static void ia64_output_function_end_prologue (FILE *);
240 static void ia64_print_operand (FILE *, rtx, int);
241 static void ia64_print_operand_address (FILE *, machine_mode, rtx);
242 static bool ia64_print_operand_punct_valid_p (unsigned char code);
244 static int ia64_issue_rate (void);
245 static int ia64_adjust_cost (rtx_insn *, int, rtx_insn *, int, dw_t);
246 static void ia64_sched_init (FILE *, int, int);
247 static void ia64_sched_init_global (FILE *, int, int);
248 static void ia64_sched_finish_global (FILE *, int);
249 static void ia64_sched_finish (FILE *, int);
250 static int ia64_dfa_sched_reorder (FILE *, int, rtx_insn **, int *, int, int);
251 static int ia64_sched_reorder (FILE *, int, rtx_insn **, int *, int);
252 static int ia64_sched_reorder2 (FILE *, int, rtx_insn **, int *, int);
253 static int ia64_variable_issue (FILE *, int, rtx_insn *, int);
255 static void ia64_asm_unwind_emit (FILE *, rtx_insn *);
256 static void ia64_asm_emit_except_personality (rtx);
257 static void ia64_asm_init_sections (void);
259 static enum unwind_info_type ia64_debug_unwind_info (void);
261 static struct bundle_state *get_free_bundle_state (void);
262 static void free_bundle_state (struct bundle_state *);
263 static void initiate_bundle_states (void);
264 static void finish_bundle_states (void);
265 static int insert_bundle_state (struct bundle_state *);
266 static void initiate_bundle_state_table (void);
267 static void finish_bundle_state_table (void);
268 static int try_issue_nops (struct bundle_state *, int);
269 static int try_issue_insn (struct bundle_state *, rtx);
270 static void issue_nops_and_insn (struct bundle_state *, int, rtx_insn *,
271 int, int);
272 static int get_max_pos (state_t);
273 static int get_template (state_t, int);
275 static rtx_insn *get_next_important_insn (rtx_insn *, rtx_insn *);
276 static bool important_for_bundling_p (rtx_insn *);
277 static bool unknown_for_bundling_p (rtx_insn *);
278 static void bundling (FILE *, int, rtx_insn *, rtx_insn *);
280 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
281 HOST_WIDE_INT, tree);
282 static void ia64_file_start (void);
283 static void ia64_globalize_decl_name (FILE *, tree);
285 static int ia64_hpux_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
286 static int ia64_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
287 static section *ia64_select_rtx_section (machine_mode, rtx,
288 unsigned HOST_WIDE_INT);
289 static void ia64_output_dwarf_dtprel (FILE *, int, rtx)
290 ATTRIBUTE_UNUSED;
291 static unsigned int ia64_section_type_flags (tree, const char *, int);
292 static void ia64_init_libfuncs (void)
293 ATTRIBUTE_UNUSED;
294 static void ia64_hpux_init_libfuncs (void)
295 ATTRIBUTE_UNUSED;
296 static void ia64_sysv4_init_libfuncs (void)
297 ATTRIBUTE_UNUSED;
298 static void ia64_vms_init_libfuncs (void)
299 ATTRIBUTE_UNUSED;
300 static void ia64_soft_fp_init_libfuncs (void)
301 ATTRIBUTE_UNUSED;
302 static bool ia64_vms_valid_pointer_mode (machine_mode mode)
303 ATTRIBUTE_UNUSED;
304 static tree ia64_vms_common_object_attribute (tree *, tree, tree, int, bool *)
305 ATTRIBUTE_UNUSED;
307 static bool ia64_attribute_takes_identifier_p (const_tree);
308 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
309 static tree ia64_handle_version_id_attribute (tree *, tree, tree, int, bool *);
310 static void ia64_encode_section_info (tree, rtx, int);
311 static rtx ia64_struct_value_rtx (tree, int);
312 static tree ia64_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
313 static bool ia64_scalar_mode_supported_p (machine_mode mode);
314 static bool ia64_vector_mode_supported_p (machine_mode mode);
315 static bool ia64_legitimate_constant_p (machine_mode, rtx);
316 static bool ia64_legitimate_address_p (machine_mode, rtx, bool);
317 static bool ia64_cannot_force_const_mem (machine_mode, rtx);
318 static const char *ia64_mangle_type (const_tree);
319 static const char *ia64_invalid_conversion (const_tree, const_tree);
320 static const char *ia64_invalid_unary_op (int, const_tree);
321 static const char *ia64_invalid_binary_op (int, const_tree, const_tree);
322 static machine_mode ia64_c_mode_for_suffix (char);
323 static void ia64_trampoline_init (rtx, tree, rtx);
324 static void ia64_override_options_after_change (void);
325 static bool ia64_member_type_forces_blk (const_tree, machine_mode);
327 static tree ia64_fold_builtin (tree, int, tree *, bool);
328 static tree ia64_builtin_decl (unsigned, bool);
330 static reg_class_t ia64_preferred_reload_class (rtx, reg_class_t);
331 static machine_mode ia64_get_reg_raw_mode (int regno);
332 static section * ia64_hpux_function_section (tree, enum node_frequency,
333 bool, bool);
335 static bool ia64_vectorize_vec_perm_const_ok (machine_mode vmode,
336 const unsigned char *sel);
338 #define MAX_VECT_LEN 8
340 struct expand_vec_perm_d
342 rtx target, op0, op1;
343 unsigned char perm[MAX_VECT_LEN];
344 machine_mode vmode;
345 unsigned char nelt;
346 bool one_operand_p;
347 bool testing_p;
350 static bool ia64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d);
353 /* Table of valid machine attributes. */
354 static const struct attribute_spec ia64_attribute_table[] =
356 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
357 affects_type_identity } */
358 { "syscall_linkage", 0, 0, false, true, true, NULL, false },
359 { "model", 1, 1, true, false, false, ia64_handle_model_attribute,
360 false },
361 #if TARGET_ABI_OPEN_VMS
362 { "common_object", 1, 1, true, false, false,
363 ia64_vms_common_object_attribute, false },
364 #endif
365 { "version_id", 1, 1, true, false, false,
366 ia64_handle_version_id_attribute, false },
367 { NULL, 0, 0, false, false, false, NULL, false }
370 /* Initialize the GCC target structure. */
371 #undef TARGET_ATTRIBUTE_TABLE
372 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
374 #undef TARGET_INIT_BUILTINS
375 #define TARGET_INIT_BUILTINS ia64_init_builtins
377 #undef TARGET_FOLD_BUILTIN
378 #define TARGET_FOLD_BUILTIN ia64_fold_builtin
380 #undef TARGET_EXPAND_BUILTIN
381 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
383 #undef TARGET_BUILTIN_DECL
384 #define TARGET_BUILTIN_DECL ia64_builtin_decl
386 #undef TARGET_ASM_BYTE_OP
387 #define TARGET_ASM_BYTE_OP "\tdata1\t"
388 #undef TARGET_ASM_ALIGNED_HI_OP
389 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
390 #undef TARGET_ASM_ALIGNED_SI_OP
391 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
392 #undef TARGET_ASM_ALIGNED_DI_OP
393 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
394 #undef TARGET_ASM_UNALIGNED_HI_OP
395 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
396 #undef TARGET_ASM_UNALIGNED_SI_OP
397 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
398 #undef TARGET_ASM_UNALIGNED_DI_OP
399 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
400 #undef TARGET_ASM_INTEGER
401 #define TARGET_ASM_INTEGER ia64_assemble_integer
403 #undef TARGET_OPTION_OVERRIDE
404 #define TARGET_OPTION_OVERRIDE ia64_option_override
406 #undef TARGET_ASM_FUNCTION_PROLOGUE
407 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
408 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
409 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
410 #undef TARGET_ASM_FUNCTION_EPILOGUE
411 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
413 #undef TARGET_PRINT_OPERAND
414 #define TARGET_PRINT_OPERAND ia64_print_operand
415 #undef TARGET_PRINT_OPERAND_ADDRESS
416 #define TARGET_PRINT_OPERAND_ADDRESS ia64_print_operand_address
417 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
418 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P ia64_print_operand_punct_valid_p
420 #undef TARGET_IN_SMALL_DATA_P
421 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
423 #undef TARGET_SCHED_ADJUST_COST
424 #define TARGET_SCHED_ADJUST_COST ia64_adjust_cost
425 #undef TARGET_SCHED_ISSUE_RATE
426 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
427 #undef TARGET_SCHED_VARIABLE_ISSUE
428 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
429 #undef TARGET_SCHED_INIT
430 #define TARGET_SCHED_INIT ia64_sched_init
431 #undef TARGET_SCHED_FINISH
432 #define TARGET_SCHED_FINISH ia64_sched_finish
433 #undef TARGET_SCHED_INIT_GLOBAL
434 #define TARGET_SCHED_INIT_GLOBAL ia64_sched_init_global
435 #undef TARGET_SCHED_FINISH_GLOBAL
436 #define TARGET_SCHED_FINISH_GLOBAL ia64_sched_finish_global
437 #undef TARGET_SCHED_REORDER
438 #define TARGET_SCHED_REORDER ia64_sched_reorder
439 #undef TARGET_SCHED_REORDER2
440 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
442 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
443 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
445 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
446 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
448 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
449 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
450 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
451 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
453 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
454 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
455 ia64_first_cycle_multipass_dfa_lookahead_guard
457 #undef TARGET_SCHED_DFA_NEW_CYCLE
458 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
460 #undef TARGET_SCHED_H_I_D_EXTENDED
461 #define TARGET_SCHED_H_I_D_EXTENDED ia64_h_i_d_extended
463 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
464 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT ia64_alloc_sched_context
466 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
467 #define TARGET_SCHED_INIT_SCHED_CONTEXT ia64_init_sched_context
469 #undef TARGET_SCHED_SET_SCHED_CONTEXT
470 #define TARGET_SCHED_SET_SCHED_CONTEXT ia64_set_sched_context
472 #undef TARGET_SCHED_CLEAR_SCHED_CONTEXT
473 #define TARGET_SCHED_CLEAR_SCHED_CONTEXT ia64_clear_sched_context
475 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
476 #define TARGET_SCHED_FREE_SCHED_CONTEXT ia64_free_sched_context
478 #undef TARGET_SCHED_SET_SCHED_FLAGS
479 #define TARGET_SCHED_SET_SCHED_FLAGS ia64_set_sched_flags
481 #undef TARGET_SCHED_GET_INSN_SPEC_DS
482 #define TARGET_SCHED_GET_INSN_SPEC_DS ia64_get_insn_spec_ds
484 #undef TARGET_SCHED_GET_INSN_CHECKED_DS
485 #define TARGET_SCHED_GET_INSN_CHECKED_DS ia64_get_insn_checked_ds
487 #undef TARGET_SCHED_SPECULATE_INSN
488 #define TARGET_SCHED_SPECULATE_INSN ia64_speculate_insn
490 #undef TARGET_SCHED_NEEDS_BLOCK_P
491 #define TARGET_SCHED_NEEDS_BLOCK_P ia64_needs_block_p
493 #undef TARGET_SCHED_GEN_SPEC_CHECK
494 #define TARGET_SCHED_GEN_SPEC_CHECK ia64_gen_spec_check
496 #undef TARGET_SCHED_SKIP_RTX_P
497 #define TARGET_SCHED_SKIP_RTX_P ia64_skip_rtx_p
499 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
500 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
501 #undef TARGET_ARG_PARTIAL_BYTES
502 #define TARGET_ARG_PARTIAL_BYTES ia64_arg_partial_bytes
503 #undef TARGET_FUNCTION_ARG
504 #define TARGET_FUNCTION_ARG ia64_function_arg
505 #undef TARGET_FUNCTION_INCOMING_ARG
506 #define TARGET_FUNCTION_INCOMING_ARG ia64_function_incoming_arg
507 #undef TARGET_FUNCTION_ARG_ADVANCE
508 #define TARGET_FUNCTION_ARG_ADVANCE ia64_function_arg_advance
509 #undef TARGET_FUNCTION_ARG_BOUNDARY
510 #define TARGET_FUNCTION_ARG_BOUNDARY ia64_function_arg_boundary
512 #undef TARGET_ASM_OUTPUT_MI_THUNK
513 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
514 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
515 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
517 #undef TARGET_ASM_FILE_START
518 #define TARGET_ASM_FILE_START ia64_file_start
520 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
521 #define TARGET_ASM_GLOBALIZE_DECL_NAME ia64_globalize_decl_name
523 #undef TARGET_REGISTER_MOVE_COST
524 #define TARGET_REGISTER_MOVE_COST ia64_register_move_cost
525 #undef TARGET_MEMORY_MOVE_COST
526 #define TARGET_MEMORY_MOVE_COST ia64_memory_move_cost
527 #undef TARGET_RTX_COSTS
528 #define TARGET_RTX_COSTS ia64_rtx_costs
529 #undef TARGET_ADDRESS_COST
530 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
532 #undef TARGET_UNSPEC_MAY_TRAP_P
533 #define TARGET_UNSPEC_MAY_TRAP_P ia64_unspec_may_trap_p
535 #undef TARGET_MACHINE_DEPENDENT_REORG
536 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
538 #undef TARGET_ENCODE_SECTION_INFO
539 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
541 #undef TARGET_SECTION_TYPE_FLAGS
542 #define TARGET_SECTION_TYPE_FLAGS ia64_section_type_flags
544 #ifdef HAVE_AS_TLS
545 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
546 #define TARGET_ASM_OUTPUT_DWARF_DTPREL ia64_output_dwarf_dtprel
547 #endif
549 /* ??? Investigate. */
550 #if 0
551 #undef TARGET_PROMOTE_PROTOTYPES
552 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
553 #endif
555 #undef TARGET_FUNCTION_VALUE
556 #define TARGET_FUNCTION_VALUE ia64_function_value
557 #undef TARGET_LIBCALL_VALUE
558 #define TARGET_LIBCALL_VALUE ia64_libcall_value
559 #undef TARGET_FUNCTION_VALUE_REGNO_P
560 #define TARGET_FUNCTION_VALUE_REGNO_P ia64_function_value_regno_p
562 #undef TARGET_STRUCT_VALUE_RTX
563 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
564 #undef TARGET_RETURN_IN_MEMORY
565 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
566 #undef TARGET_SETUP_INCOMING_VARARGS
567 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
568 #undef TARGET_STRICT_ARGUMENT_NAMING
569 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
570 #undef TARGET_MUST_PASS_IN_STACK
571 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
572 #undef TARGET_GET_RAW_RESULT_MODE
573 #define TARGET_GET_RAW_RESULT_MODE ia64_get_reg_raw_mode
574 #undef TARGET_GET_RAW_ARG_MODE
575 #define TARGET_GET_RAW_ARG_MODE ia64_get_reg_raw_mode
577 #undef TARGET_MEMBER_TYPE_FORCES_BLK
578 #define TARGET_MEMBER_TYPE_FORCES_BLK ia64_member_type_forces_blk
580 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
581 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
583 #undef TARGET_ASM_UNWIND_EMIT
584 #define TARGET_ASM_UNWIND_EMIT ia64_asm_unwind_emit
585 #undef TARGET_ASM_EMIT_EXCEPT_PERSONALITY
586 #define TARGET_ASM_EMIT_EXCEPT_PERSONALITY ia64_asm_emit_except_personality
587 #undef TARGET_ASM_INIT_SECTIONS
588 #define TARGET_ASM_INIT_SECTIONS ia64_asm_init_sections
590 #undef TARGET_DEBUG_UNWIND_INFO
591 #define TARGET_DEBUG_UNWIND_INFO ia64_debug_unwind_info
593 #undef TARGET_SCALAR_MODE_SUPPORTED_P
594 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
595 #undef TARGET_VECTOR_MODE_SUPPORTED_P
596 #define TARGET_VECTOR_MODE_SUPPORTED_P ia64_vector_mode_supported_p
598 #undef TARGET_LEGITIMATE_CONSTANT_P
599 #define TARGET_LEGITIMATE_CONSTANT_P ia64_legitimate_constant_p
600 #undef TARGET_LEGITIMATE_ADDRESS_P
601 #define TARGET_LEGITIMATE_ADDRESS_P ia64_legitimate_address_p
603 #undef TARGET_LRA_P
604 #define TARGET_LRA_P hook_bool_void_false
606 #undef TARGET_CANNOT_FORCE_CONST_MEM
607 #define TARGET_CANNOT_FORCE_CONST_MEM ia64_cannot_force_const_mem
609 #undef TARGET_MANGLE_TYPE
610 #define TARGET_MANGLE_TYPE ia64_mangle_type
612 #undef TARGET_INVALID_CONVERSION
613 #define TARGET_INVALID_CONVERSION ia64_invalid_conversion
614 #undef TARGET_INVALID_UNARY_OP
615 #define TARGET_INVALID_UNARY_OP ia64_invalid_unary_op
616 #undef TARGET_INVALID_BINARY_OP
617 #define TARGET_INVALID_BINARY_OP ia64_invalid_binary_op
619 #undef TARGET_C_MODE_FOR_SUFFIX
620 #define TARGET_C_MODE_FOR_SUFFIX ia64_c_mode_for_suffix
622 #undef TARGET_CAN_ELIMINATE
623 #define TARGET_CAN_ELIMINATE ia64_can_eliminate
625 #undef TARGET_TRAMPOLINE_INIT
626 #define TARGET_TRAMPOLINE_INIT ia64_trampoline_init
628 #undef TARGET_CAN_USE_DOLOOP_P
629 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
630 #undef TARGET_INVALID_WITHIN_DOLOOP
631 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null
633 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
634 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE ia64_override_options_after_change
636 #undef TARGET_PREFERRED_RELOAD_CLASS
637 #define TARGET_PREFERRED_RELOAD_CLASS ia64_preferred_reload_class
639 #undef TARGET_DELAY_SCHED2
640 #define TARGET_DELAY_SCHED2 true
642 /* Variable tracking should be run after all optimizations which
643 change order of insns. It also needs a valid CFG. */
644 #undef TARGET_DELAY_VARTRACK
645 #define TARGET_DELAY_VARTRACK true
647 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
648 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK ia64_vectorize_vec_perm_const_ok
650 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
651 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P ia64_attribute_takes_identifier_p
653 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
654 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 0
656 struct gcc_target targetm = TARGET_INITIALIZER;
658 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
659 identifier as an argument, so the front end shouldn't look it up. */
661 static bool
662 ia64_attribute_takes_identifier_p (const_tree attr_id)
664 if (is_attribute_p ("model", attr_id))
665 return true;
666 #if TARGET_ABI_OPEN_VMS
667 if (is_attribute_p ("common_object", attr_id))
668 return true;
669 #endif
670 return false;
673 typedef enum
675 ADDR_AREA_NORMAL, /* normal address area */
676 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
678 ia64_addr_area;
680 static GTY(()) tree small_ident1;
681 static GTY(()) tree small_ident2;
683 static void
684 init_idents (void)
686 if (small_ident1 == 0)
688 small_ident1 = get_identifier ("small");
689 small_ident2 = get_identifier ("__small__");
693 /* Retrieve the address area that has been chosen for the given decl. */
695 static ia64_addr_area
696 ia64_get_addr_area (tree decl)
698 tree model_attr;
700 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
701 if (model_attr)
703 tree id;
705 init_idents ();
706 id = TREE_VALUE (TREE_VALUE (model_attr));
707 if (id == small_ident1 || id == small_ident2)
708 return ADDR_AREA_SMALL;
710 return ADDR_AREA_NORMAL;
713 static tree
714 ia64_handle_model_attribute (tree *node, tree name, tree args,
715 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
717 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
718 ia64_addr_area area;
719 tree arg, decl = *node;
721 init_idents ();
722 arg = TREE_VALUE (args);
723 if (arg == small_ident1 || arg == small_ident2)
725 addr_area = ADDR_AREA_SMALL;
727 else
729 warning (OPT_Wattributes, "invalid argument of %qE attribute",
730 name);
731 *no_add_attrs = true;
734 switch (TREE_CODE (decl))
736 case VAR_DECL:
737 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
738 == FUNCTION_DECL)
739 && !TREE_STATIC (decl))
741 error_at (DECL_SOURCE_LOCATION (decl),
742 "an address area attribute cannot be specified for "
743 "local variables");
744 *no_add_attrs = true;
746 area = ia64_get_addr_area (decl);
747 if (area != ADDR_AREA_NORMAL && addr_area != area)
749 error ("address area of %q+D conflicts with previous "
750 "declaration", decl);
751 *no_add_attrs = true;
753 break;
755 case FUNCTION_DECL:
756 error_at (DECL_SOURCE_LOCATION (decl),
757 "address area attribute cannot be specified for "
758 "functions");
759 *no_add_attrs = true;
760 break;
762 default:
763 warning (OPT_Wattributes, "%qE attribute ignored",
764 name);
765 *no_add_attrs = true;
766 break;
769 return NULL_TREE;
772 /* Part of the low level implementation of DEC Ada pragma Common_Object which
773 enables the shared use of variables stored in overlaid linker areas
774 corresponding to the use of Fortran COMMON. */
776 static tree
777 ia64_vms_common_object_attribute (tree *node, tree name, tree args,
778 int flags ATTRIBUTE_UNUSED,
779 bool *no_add_attrs)
781 tree decl = *node;
782 tree id;
784 gcc_assert (DECL_P (decl));
786 DECL_COMMON (decl) = 1;
787 id = TREE_VALUE (args);
788 if (TREE_CODE (id) != IDENTIFIER_NODE && TREE_CODE (id) != STRING_CST)
790 error ("%qE attribute requires a string constant argument", name);
791 *no_add_attrs = true;
792 return NULL_TREE;
794 return NULL_TREE;
797 /* Part of the low level implementation of DEC Ada pragma Common_Object. */
799 void
800 ia64_vms_output_aligned_decl_common (FILE *file, tree decl, const char *name,
801 unsigned HOST_WIDE_INT size,
802 unsigned int align)
804 tree attr = DECL_ATTRIBUTES (decl);
806 if (attr)
807 attr = lookup_attribute ("common_object", attr);
808 if (attr)
810 tree id = TREE_VALUE (TREE_VALUE (attr));
811 const char *name;
813 if (TREE_CODE (id) == IDENTIFIER_NODE)
814 name = IDENTIFIER_POINTER (id);
815 else if (TREE_CODE (id) == STRING_CST)
816 name = TREE_STRING_POINTER (id);
817 else
818 abort ();
820 fprintf (file, "\t.vms_common\t\"%s\",", name);
822 else
823 fprintf (file, "%s", COMMON_ASM_OP);
825 /* Code from elfos.h. */
826 assemble_name (file, name);
827 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED",%u",
828 size, align / BITS_PER_UNIT);
830 fputc ('\n', file);
833 static void
834 ia64_encode_addr_area (tree decl, rtx symbol)
836 int flags;
838 flags = SYMBOL_REF_FLAGS (symbol);
839 switch (ia64_get_addr_area (decl))
841 case ADDR_AREA_NORMAL: break;
842 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
843 default: gcc_unreachable ();
845 SYMBOL_REF_FLAGS (symbol) = flags;
848 static void
849 ia64_encode_section_info (tree decl, rtx rtl, int first)
851 default_encode_section_info (decl, rtl, first);
853 /* Careful not to prod global register variables. */
854 if (TREE_CODE (decl) == VAR_DECL
855 && GET_CODE (DECL_RTL (decl)) == MEM
856 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
857 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
858 ia64_encode_addr_area (decl, XEXP (rtl, 0));
861 /* Return 1 if the operands of a move are ok. */
864 ia64_move_ok (rtx dst, rtx src)
866 /* If we're under init_recog_no_volatile, we'll not be able to use
867 memory_operand. So check the code directly and don't worry about
868 the validity of the underlying address, which should have been
869 checked elsewhere anyway. */
870 if (GET_CODE (dst) != MEM)
871 return 1;
872 if (GET_CODE (src) == MEM)
873 return 0;
874 if (register_operand (src, VOIDmode))
875 return 1;
877 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
878 if (INTEGRAL_MODE_P (GET_MODE (dst)))
879 return src == const0_rtx;
880 else
881 return satisfies_constraint_G (src);
884 /* Return 1 if the operands are ok for a floating point load pair. */
887 ia64_load_pair_ok (rtx dst, rtx src)
889 /* ??? There is a thinko in the implementation of the "x" constraint and the
890 FP_REGS class. The constraint will also reject (reg f30:TI) so we must
891 also return false for it. */
892 if (GET_CODE (dst) != REG
893 || !(FP_REGNO_P (REGNO (dst)) && FP_REGNO_P (REGNO (dst) + 1)))
894 return 0;
895 if (GET_CODE (src) != MEM || MEM_VOLATILE_P (src))
896 return 0;
897 switch (GET_CODE (XEXP (src, 0)))
899 case REG:
900 case POST_INC:
901 break;
902 case POST_DEC:
903 return 0;
904 case POST_MODIFY:
906 rtx adjust = XEXP (XEXP (XEXP (src, 0), 1), 1);
908 if (GET_CODE (adjust) != CONST_INT
909 || INTVAL (adjust) != GET_MODE_SIZE (GET_MODE (src)))
910 return 0;
912 break;
913 default:
914 abort ();
916 return 1;
920 addp4_optimize_ok (rtx op1, rtx op2)
922 return (basereg_operand (op1, GET_MODE(op1)) !=
923 basereg_operand (op2, GET_MODE(op2)));
926 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
927 Return the length of the field, or <= 0 on failure. */
930 ia64_depz_field_mask (rtx rop, rtx rshift)
932 unsigned HOST_WIDE_INT op = INTVAL (rop);
933 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
935 /* Get rid of the zero bits we're shifting in. */
936 op >>= shift;
938 /* We must now have a solid block of 1's at bit 0. */
939 return exact_log2 (op + 1);
942 /* Return the TLS model to use for ADDR. */
944 static enum tls_model
945 tls_symbolic_operand_type (rtx addr)
947 enum tls_model tls_kind = TLS_MODEL_NONE;
949 if (GET_CODE (addr) == CONST)
951 if (GET_CODE (XEXP (addr, 0)) == PLUS
952 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF)
953 tls_kind = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (addr, 0), 0));
955 else if (GET_CODE (addr) == SYMBOL_REF)
956 tls_kind = SYMBOL_REF_TLS_MODEL (addr);
958 return tls_kind;
961 /* Returns true if REG (assumed to be a `reg' RTX) is valid for use
962 as a base register. */
964 static inline bool
965 ia64_reg_ok_for_base_p (const_rtx reg, bool strict)
967 if (strict
968 && REGNO_OK_FOR_BASE_P (REGNO (reg)))
969 return true;
970 else if (!strict
971 && (GENERAL_REGNO_P (REGNO (reg))
972 || !HARD_REGISTER_P (reg)))
973 return true;
974 else
975 return false;
978 static bool
979 ia64_legitimate_address_reg (const_rtx reg, bool strict)
981 if ((REG_P (reg) && ia64_reg_ok_for_base_p (reg, strict))
982 || (GET_CODE (reg) == SUBREG && REG_P (XEXP (reg, 0))
983 && ia64_reg_ok_for_base_p (XEXP (reg, 0), strict)))
984 return true;
986 return false;
989 static bool
990 ia64_legitimate_address_disp (const_rtx reg, const_rtx disp, bool strict)
992 if (GET_CODE (disp) == PLUS
993 && rtx_equal_p (reg, XEXP (disp, 0))
994 && (ia64_legitimate_address_reg (XEXP (disp, 1), strict)
995 || (CONST_INT_P (XEXP (disp, 1))
996 && IN_RANGE (INTVAL (XEXP (disp, 1)), -256, 255))))
997 return true;
999 return false;
1002 /* Implement TARGET_LEGITIMATE_ADDRESS_P. */
1004 static bool
1005 ia64_legitimate_address_p (machine_mode mode ATTRIBUTE_UNUSED,
1006 rtx x, bool strict)
1008 if (ia64_legitimate_address_reg (x, strict))
1009 return true;
1010 else if ((GET_CODE (x) == POST_INC || GET_CODE (x) == POST_DEC)
1011 && ia64_legitimate_address_reg (XEXP (x, 0), strict)
1012 && XEXP (x, 0) != arg_pointer_rtx)
1013 return true;
1014 else if (GET_CODE (x) == POST_MODIFY
1015 && ia64_legitimate_address_reg (XEXP (x, 0), strict)
1016 && XEXP (x, 0) != arg_pointer_rtx
1017 && ia64_legitimate_address_disp (XEXP (x, 0), XEXP (x, 1), strict))
1018 return true;
1019 else
1020 return false;
1023 /* Return true if X is a constant that is valid for some immediate
1024 field in an instruction. */
1026 static bool
1027 ia64_legitimate_constant_p (machine_mode mode, rtx x)
1029 switch (GET_CODE (x))
1031 case CONST_INT:
1032 case LABEL_REF:
1033 return true;
1035 case CONST_DOUBLE:
1036 if (GET_MODE (x) == VOIDmode || mode == SFmode || mode == DFmode)
1037 return true;
1038 return satisfies_constraint_G (x);
1040 case CONST:
1041 case SYMBOL_REF:
1042 /* ??? Short term workaround for PR 28490. We must make the code here
1043 match the code in ia64_expand_move and move_operand, even though they
1044 are both technically wrong. */
1045 if (tls_symbolic_operand_type (x) == 0)
1047 HOST_WIDE_INT addend = 0;
1048 rtx op = x;
1050 if (GET_CODE (op) == CONST
1051 && GET_CODE (XEXP (op, 0)) == PLUS
1052 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
1054 addend = INTVAL (XEXP (XEXP (op, 0), 1));
1055 op = XEXP (XEXP (op, 0), 0);
1058 if (any_offset_symbol_operand (op, mode)
1059 || function_operand (op, mode))
1060 return true;
1061 if (aligned_offset_symbol_operand (op, mode))
1062 return (addend & 0x3fff) == 0;
1063 return false;
1065 return false;
1067 case CONST_VECTOR:
1068 if (mode == V2SFmode)
1069 return satisfies_constraint_Y (x);
1071 return (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
1072 && GET_MODE_SIZE (mode) <= 8);
1074 default:
1075 return false;
1079 /* Don't allow TLS addresses to get spilled to memory. */
1081 static bool
1082 ia64_cannot_force_const_mem (machine_mode mode, rtx x)
1084 if (mode == RFmode)
1085 return true;
1086 return tls_symbolic_operand_type (x) != 0;
1089 /* Expand a symbolic constant load. */
1091 bool
1092 ia64_expand_load_address (rtx dest, rtx src)
1094 gcc_assert (GET_CODE (dest) == REG);
1096 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
1097 having to pointer-extend the value afterward. Other forms of address
1098 computation below are also more natural to compute as 64-bit quantities.
1099 If we've been given an SImode destination register, change it. */
1100 if (GET_MODE (dest) != Pmode)
1101 dest = gen_rtx_REG_offset (dest, Pmode, REGNO (dest),
1102 byte_lowpart_offset (Pmode, GET_MODE (dest)));
1104 if (TARGET_NO_PIC)
1105 return false;
1106 if (small_addr_symbolic_operand (src, VOIDmode))
1107 return false;
1109 if (TARGET_AUTO_PIC)
1110 emit_insn (gen_load_gprel64 (dest, src));
1111 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
1112 emit_insn (gen_load_fptr (dest, src));
1113 else if (sdata_symbolic_operand (src, VOIDmode))
1114 emit_insn (gen_load_gprel (dest, src));
1115 else if (local_symbolic_operand64 (src, VOIDmode))
1117 /* We want to use @gprel rather than @ltoff relocations for local
1118 symbols:
1119 - @gprel does not require dynamic linker
1120 - and does not use .sdata section
1121 https://gcc.gnu.org/bugzilla/60465 */
1122 emit_insn (gen_load_gprel64 (dest, src));
1124 else
1126 HOST_WIDE_INT addend = 0;
1127 rtx tmp;
1129 /* We did split constant offsets in ia64_expand_move, and we did try
1130 to keep them split in move_operand, but we also allowed reload to
1131 rematerialize arbitrary constants rather than spill the value to
1132 the stack and reload it. So we have to be prepared here to split
1133 them apart again. */
1134 if (GET_CODE (src) == CONST)
1136 HOST_WIDE_INT hi, lo;
1138 hi = INTVAL (XEXP (XEXP (src, 0), 1));
1139 lo = ((hi & 0x3fff) ^ 0x2000) - 0x2000;
1140 hi = hi - lo;
1142 if (lo != 0)
1144 addend = lo;
1145 src = plus_constant (Pmode, XEXP (XEXP (src, 0), 0), hi);
1149 tmp = gen_rtx_HIGH (Pmode, src);
1150 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
1151 emit_insn (gen_rtx_SET (dest, tmp));
1153 tmp = gen_rtx_LO_SUM (Pmode, gen_const_mem (Pmode, dest), src);
1154 emit_insn (gen_rtx_SET (dest, tmp));
1156 if (addend)
1158 tmp = gen_rtx_PLUS (Pmode, dest, GEN_INT (addend));
1159 emit_insn (gen_rtx_SET (dest, tmp));
1163 return true;
1166 static GTY(()) rtx gen_tls_tga;
1167 static rtx
1168 gen_tls_get_addr (void)
1170 if (!gen_tls_tga)
1171 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
1172 return gen_tls_tga;
1175 static GTY(()) rtx thread_pointer_rtx;
1176 static rtx
1177 gen_thread_pointer (void)
1179 if (!thread_pointer_rtx)
1180 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
1181 return thread_pointer_rtx;
1184 static rtx
1185 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1,
1186 rtx orig_op1, HOST_WIDE_INT addend)
1188 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp;
1189 rtx_insn *insns;
1190 rtx orig_op0 = op0;
1191 HOST_WIDE_INT addend_lo, addend_hi;
1193 switch (tls_kind)
1195 case TLS_MODEL_GLOBAL_DYNAMIC:
1196 start_sequence ();
1198 tga_op1 = gen_reg_rtx (Pmode);
1199 emit_insn (gen_load_dtpmod (tga_op1, op1));
1201 tga_op2 = gen_reg_rtx (Pmode);
1202 emit_insn (gen_load_dtprel (tga_op2, op1));
1204 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1205 LCT_CONST, Pmode, 2, tga_op1,
1206 Pmode, tga_op2, Pmode);
1208 insns = get_insns ();
1209 end_sequence ();
1211 if (GET_MODE (op0) != Pmode)
1212 op0 = tga_ret;
1213 emit_libcall_block (insns, op0, tga_ret, op1);
1214 break;
1216 case TLS_MODEL_LOCAL_DYNAMIC:
1217 /* ??? This isn't the completely proper way to do local-dynamic
1218 If the call to __tls_get_addr is used only by a single symbol,
1219 then we should (somehow) move the dtprel to the second arg
1220 to avoid the extra add. */
1221 start_sequence ();
1223 tga_op1 = gen_reg_rtx (Pmode);
1224 emit_insn (gen_load_dtpmod (tga_op1, op1));
1226 tga_op2 = const0_rtx;
1228 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1229 LCT_CONST, Pmode, 2, tga_op1,
1230 Pmode, tga_op2, Pmode);
1232 insns = get_insns ();
1233 end_sequence ();
1235 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1236 UNSPEC_LD_BASE);
1237 tmp = gen_reg_rtx (Pmode);
1238 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
1240 if (!register_operand (op0, Pmode))
1241 op0 = gen_reg_rtx (Pmode);
1242 if (TARGET_TLS64)
1244 emit_insn (gen_load_dtprel (op0, op1));
1245 emit_insn (gen_adddi3 (op0, tmp, op0));
1247 else
1248 emit_insn (gen_add_dtprel (op0, op1, tmp));
1249 break;
1251 case TLS_MODEL_INITIAL_EXEC:
1252 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1253 addend_hi = addend - addend_lo;
1255 op1 = plus_constant (Pmode, op1, addend_hi);
1256 addend = addend_lo;
1258 tmp = gen_reg_rtx (Pmode);
1259 emit_insn (gen_load_tprel (tmp, op1));
1261 if (!register_operand (op0, Pmode))
1262 op0 = gen_reg_rtx (Pmode);
1263 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
1264 break;
1266 case TLS_MODEL_LOCAL_EXEC:
1267 if (!register_operand (op0, Pmode))
1268 op0 = gen_reg_rtx (Pmode);
1270 op1 = orig_op1;
1271 addend = 0;
1272 if (TARGET_TLS64)
1274 emit_insn (gen_load_tprel (op0, op1));
1275 emit_insn (gen_adddi3 (op0, op0, gen_thread_pointer ()));
1277 else
1278 emit_insn (gen_add_tprel (op0, op1, gen_thread_pointer ()));
1279 break;
1281 default:
1282 gcc_unreachable ();
1285 if (addend)
1286 op0 = expand_simple_binop (Pmode, PLUS, op0, GEN_INT (addend),
1287 orig_op0, 1, OPTAB_DIRECT);
1288 if (orig_op0 == op0)
1289 return NULL_RTX;
1290 if (GET_MODE (orig_op0) == Pmode)
1291 return op0;
1292 return gen_lowpart (GET_MODE (orig_op0), op0);
1296 ia64_expand_move (rtx op0, rtx op1)
1298 machine_mode mode = GET_MODE (op0);
1300 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
1301 op1 = force_reg (mode, op1);
1303 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
1305 HOST_WIDE_INT addend = 0;
1306 enum tls_model tls_kind;
1307 rtx sym = op1;
1309 if (GET_CODE (op1) == CONST
1310 && GET_CODE (XEXP (op1, 0)) == PLUS
1311 && GET_CODE (XEXP (XEXP (op1, 0), 1)) == CONST_INT)
1313 addend = INTVAL (XEXP (XEXP (op1, 0), 1));
1314 sym = XEXP (XEXP (op1, 0), 0);
1317 tls_kind = tls_symbolic_operand_type (sym);
1318 if (tls_kind)
1319 return ia64_expand_tls_address (tls_kind, op0, sym, op1, addend);
1321 if (any_offset_symbol_operand (sym, mode))
1322 addend = 0;
1323 else if (aligned_offset_symbol_operand (sym, mode))
1325 HOST_WIDE_INT addend_lo, addend_hi;
1327 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1328 addend_hi = addend - addend_lo;
1330 if (addend_lo != 0)
1332 op1 = plus_constant (mode, sym, addend_hi);
1333 addend = addend_lo;
1335 else
1336 addend = 0;
1338 else
1339 op1 = sym;
1341 if (reload_completed)
1343 /* We really should have taken care of this offset earlier. */
1344 gcc_assert (addend == 0);
1345 if (ia64_expand_load_address (op0, op1))
1346 return NULL_RTX;
1349 if (addend)
1351 rtx subtarget = !can_create_pseudo_p () ? op0 : gen_reg_rtx (mode);
1353 emit_insn (gen_rtx_SET (subtarget, op1));
1355 op1 = expand_simple_binop (mode, PLUS, subtarget,
1356 GEN_INT (addend), op0, 1, OPTAB_DIRECT);
1357 if (op0 == op1)
1358 return NULL_RTX;
1362 return op1;
1365 /* Split a move from OP1 to OP0 conditional on COND. */
1367 void
1368 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
1370 rtx_insn *insn, *first = get_last_insn ();
1372 emit_move_insn (op0, op1);
1374 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
1375 if (INSN_P (insn))
1376 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
1377 PATTERN (insn));
1380 /* Split a post-reload TImode or TFmode reference into two DImode
1381 components. This is made extra difficult by the fact that we do
1382 not get any scratch registers to work with, because reload cannot
1383 be prevented from giving us a scratch that overlaps the register
1384 pair involved. So instead, when addressing memory, we tweak the
1385 pointer register up and back down with POST_INCs. Or up and not
1386 back down when we can get away with it.
1388 REVERSED is true when the loads must be done in reversed order
1389 (high word first) for correctness. DEAD is true when the pointer
1390 dies with the second insn we generate and therefore the second
1391 address must not carry a postmodify.
1393 May return an insn which is to be emitted after the moves. */
1395 static rtx
1396 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
1398 rtx fixup = 0;
1400 switch (GET_CODE (in))
1402 case REG:
1403 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
1404 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
1405 break;
1407 case CONST_INT:
1408 case CONST_DOUBLE:
1409 /* Cannot occur reversed. */
1410 gcc_assert (!reversed);
1412 if (GET_MODE (in) != TFmode)
1413 split_double (in, &out[0], &out[1]);
1414 else
1415 /* split_double does not understand how to split a TFmode
1416 quantity into a pair of DImode constants. */
1418 unsigned HOST_WIDE_INT p[2];
1419 long l[4]; /* TFmode is 128 bits */
1421 real_to_target (l, CONST_DOUBLE_REAL_VALUE (in), TFmode);
1423 if (FLOAT_WORDS_BIG_ENDIAN)
1425 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
1426 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
1428 else
1430 p[0] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
1431 p[1] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
1433 out[0] = GEN_INT (p[0]);
1434 out[1] = GEN_INT (p[1]);
1436 break;
1438 case MEM:
1440 rtx base = XEXP (in, 0);
1441 rtx offset;
1443 switch (GET_CODE (base))
1445 case REG:
1446 if (!reversed)
1448 out[0] = adjust_automodify_address
1449 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1450 out[1] = adjust_automodify_address
1451 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
1453 else
1455 /* Reversal requires a pre-increment, which can only
1456 be done as a separate insn. */
1457 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
1458 out[0] = adjust_automodify_address
1459 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
1460 out[1] = adjust_address (in, DImode, 0);
1462 break;
1464 case POST_INC:
1465 gcc_assert (!reversed && !dead);
1467 /* Just do the increment in two steps. */
1468 out[0] = adjust_automodify_address (in, DImode, 0, 0);
1469 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1470 break;
1472 case POST_DEC:
1473 gcc_assert (!reversed && !dead);
1475 /* Add 8, subtract 24. */
1476 base = XEXP (base, 0);
1477 out[0] = adjust_automodify_address
1478 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1479 out[1] = adjust_automodify_address
1480 (in, DImode,
1481 gen_rtx_POST_MODIFY (Pmode, base,
1482 plus_constant (Pmode, base, -24)),
1484 break;
1486 case POST_MODIFY:
1487 gcc_assert (!reversed && !dead);
1489 /* Extract and adjust the modification. This case is
1490 trickier than the others, because we might have an
1491 index register, or we might have a combined offset that
1492 doesn't fit a signed 9-bit displacement field. We can
1493 assume the incoming expression is already legitimate. */
1494 offset = XEXP (base, 1);
1495 base = XEXP (base, 0);
1497 out[0] = adjust_automodify_address
1498 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1500 if (GET_CODE (XEXP (offset, 1)) == REG)
1502 /* Can't adjust the postmodify to match. Emit the
1503 original, then a separate addition insn. */
1504 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1505 fixup = gen_adddi3 (base, base, GEN_INT (-8));
1507 else
1509 gcc_assert (GET_CODE (XEXP (offset, 1)) == CONST_INT);
1510 if (INTVAL (XEXP (offset, 1)) < -256 + 8)
1512 /* Again the postmodify cannot be made to match,
1513 but in this case it's more efficient to get rid
1514 of the postmodify entirely and fix up with an
1515 add insn. */
1516 out[1] = adjust_automodify_address (in, DImode, base, 8);
1517 fixup = gen_adddi3
1518 (base, base, GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
1520 else
1522 /* Combined offset still fits in the displacement field.
1523 (We cannot overflow it at the high end.) */
1524 out[1] = adjust_automodify_address
1525 (in, DImode, gen_rtx_POST_MODIFY
1526 (Pmode, base, gen_rtx_PLUS
1527 (Pmode, base,
1528 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
1532 break;
1534 default:
1535 gcc_unreachable ();
1537 break;
1540 default:
1541 gcc_unreachable ();
1544 return fixup;
1547 /* Split a TImode or TFmode move instruction after reload.
1548 This is used by *movtf_internal and *movti_internal. */
1549 void
1550 ia64_split_tmode_move (rtx operands[])
1552 rtx in[2], out[2], insn;
1553 rtx fixup[2];
1554 bool dead = false;
1555 bool reversed = false;
1557 /* It is possible for reload to decide to overwrite a pointer with
1558 the value it points to. In that case we have to do the loads in
1559 the appropriate order so that the pointer is not destroyed too
1560 early. Also we must not generate a postmodify for that second
1561 load, or rws_access_regno will die. And we must not generate a
1562 postmodify for the second load if the destination register
1563 overlaps with the base register. */
1564 if (GET_CODE (operands[1]) == MEM
1565 && reg_overlap_mentioned_p (operands[0], operands[1]))
1567 rtx base = XEXP (operands[1], 0);
1568 while (GET_CODE (base) != REG)
1569 base = XEXP (base, 0);
1571 if (REGNO (base) == REGNO (operands[0]))
1572 reversed = true;
1574 if (refers_to_regno_p (REGNO (operands[0]),
1575 REGNO (operands[0])+2,
1576 base, 0))
1577 dead = true;
1579 /* Another reason to do the moves in reversed order is if the first
1580 element of the target register pair is also the second element of
1581 the source register pair. */
1582 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1583 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1584 reversed = true;
1586 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1587 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1589 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1590 if (GET_CODE (EXP) == MEM \
1591 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1592 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1593 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1594 add_reg_note (insn, REG_INC, XEXP (XEXP (EXP, 0), 0))
1596 insn = emit_insn (gen_rtx_SET (out[0], in[0]));
1597 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1598 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1600 insn = emit_insn (gen_rtx_SET (out[1], in[1]));
1601 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1602 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1604 if (fixup[0])
1605 emit_insn (fixup[0]);
1606 if (fixup[1])
1607 emit_insn (fixup[1]);
1609 #undef MAYBE_ADD_REG_INC_NOTE
1612 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1613 through memory plus an extra GR scratch register. Except that you can
1614 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1615 SECONDARY_RELOAD_CLASS, but not both.
1617 We got into problems in the first place by allowing a construct like
1618 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1619 This solution attempts to prevent this situation from occurring. When
1620 we see something like the above, we spill the inner register to memory. */
1622 static rtx
1623 spill_xfmode_rfmode_operand (rtx in, int force, machine_mode mode)
1625 if (GET_CODE (in) == SUBREG
1626 && GET_MODE (SUBREG_REG (in)) == TImode
1627 && GET_CODE (SUBREG_REG (in)) == REG)
1629 rtx memt = assign_stack_temp (TImode, 16);
1630 emit_move_insn (memt, SUBREG_REG (in));
1631 return adjust_address (memt, mode, 0);
1633 else if (force && GET_CODE (in) == REG)
1635 rtx memx = assign_stack_temp (mode, 16);
1636 emit_move_insn (memx, in);
1637 return memx;
1639 else
1640 return in;
1643 /* Expand the movxf or movrf pattern (MODE says which) with the given
1644 OPERANDS, returning true if the pattern should then invoke
1645 DONE. */
1647 bool
1648 ia64_expand_movxf_movrf (machine_mode mode, rtx operands[])
1650 rtx op0 = operands[0];
1652 if (GET_CODE (op0) == SUBREG)
1653 op0 = SUBREG_REG (op0);
1655 /* We must support XFmode loads into general registers for stdarg/vararg,
1656 unprototyped calls, and a rare case where a long double is passed as
1657 an argument after a float HFA fills the FP registers. We split them into
1658 DImode loads for convenience. We also need to support XFmode stores
1659 for the last case. This case does not happen for stdarg/vararg routines,
1660 because we do a block store to memory of unnamed arguments. */
1662 if (GET_CODE (op0) == REG && GR_REGNO_P (REGNO (op0)))
1664 rtx out[2];
1666 /* We're hoping to transform everything that deals with XFmode
1667 quantities and GR registers early in the compiler. */
1668 gcc_assert (can_create_pseudo_p ());
1670 /* Struct to register can just use TImode instead. */
1671 if ((GET_CODE (operands[1]) == SUBREG
1672 && GET_MODE (SUBREG_REG (operands[1])) == TImode)
1673 || (GET_CODE (operands[1]) == REG
1674 && GR_REGNO_P (REGNO (operands[1]))))
1676 rtx op1 = operands[1];
1678 if (GET_CODE (op1) == SUBREG)
1679 op1 = SUBREG_REG (op1);
1680 else
1681 op1 = gen_rtx_REG (TImode, REGNO (op1));
1683 emit_move_insn (gen_rtx_REG (TImode, REGNO (op0)), op1);
1684 return true;
1687 if (GET_CODE (operands[1]) == CONST_DOUBLE)
1689 /* Don't word-swap when reading in the constant. */
1690 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0)),
1691 operand_subword (operands[1], WORDS_BIG_ENDIAN,
1692 0, mode));
1693 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0) + 1),
1694 operand_subword (operands[1], !WORDS_BIG_ENDIAN,
1695 0, mode));
1696 return true;
1699 /* If the quantity is in a register not known to be GR, spill it. */
1700 if (register_operand (operands[1], mode))
1701 operands[1] = spill_xfmode_rfmode_operand (operands[1], 1, mode);
1703 gcc_assert (GET_CODE (operands[1]) == MEM);
1705 /* Don't word-swap when reading in the value. */
1706 out[0] = gen_rtx_REG (DImode, REGNO (op0));
1707 out[1] = gen_rtx_REG (DImode, REGNO (op0) + 1);
1709 emit_move_insn (out[0], adjust_address (operands[1], DImode, 0));
1710 emit_move_insn (out[1], adjust_address (operands[1], DImode, 8));
1711 return true;
1714 if (GET_CODE (operands[1]) == REG && GR_REGNO_P (REGNO (operands[1])))
1716 /* We're hoping to transform everything that deals with XFmode
1717 quantities and GR registers early in the compiler. */
1718 gcc_assert (can_create_pseudo_p ());
1720 /* Op0 can't be a GR_REG here, as that case is handled above.
1721 If op0 is a register, then we spill op1, so that we now have a
1722 MEM operand. This requires creating an XFmode subreg of a TImode reg
1723 to force the spill. */
1724 if (register_operand (operands[0], mode))
1726 rtx op1 = gen_rtx_REG (TImode, REGNO (operands[1]));
1727 op1 = gen_rtx_SUBREG (mode, op1, 0);
1728 operands[1] = spill_xfmode_rfmode_operand (op1, 0, mode);
1731 else
1733 rtx in[2];
1735 gcc_assert (GET_CODE (operands[0]) == MEM);
1737 /* Don't word-swap when writing out the value. */
1738 in[0] = gen_rtx_REG (DImode, REGNO (operands[1]));
1739 in[1] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
1741 emit_move_insn (adjust_address (operands[0], DImode, 0), in[0]);
1742 emit_move_insn (adjust_address (operands[0], DImode, 8), in[1]);
1743 return true;
1747 if (!reload_in_progress && !reload_completed)
1749 operands[1] = spill_xfmode_rfmode_operand (operands[1], 0, mode);
1751 if (GET_MODE (op0) == TImode && GET_CODE (op0) == REG)
1753 rtx memt, memx, in = operands[1];
1754 if (CONSTANT_P (in))
1755 in = validize_mem (force_const_mem (mode, in));
1756 if (GET_CODE (in) == MEM)
1757 memt = adjust_address (in, TImode, 0);
1758 else
1760 memt = assign_stack_temp (TImode, 16);
1761 memx = adjust_address (memt, mode, 0);
1762 emit_move_insn (memx, in);
1764 emit_move_insn (op0, memt);
1765 return true;
1768 if (!ia64_move_ok (operands[0], operands[1]))
1769 operands[1] = force_reg (mode, operands[1]);
1772 return false;
1775 /* Emit comparison instruction if necessary, replacing *EXPR, *OP0, *OP1
1776 with the expression that holds the compare result (in VOIDmode). */
1778 static GTY(()) rtx cmptf_libfunc;
1780 void
1781 ia64_expand_compare (rtx *expr, rtx *op0, rtx *op1)
1783 enum rtx_code code = GET_CODE (*expr);
1784 rtx cmp;
1786 /* If we have a BImode input, then we already have a compare result, and
1787 do not need to emit another comparison. */
1788 if (GET_MODE (*op0) == BImode)
1790 gcc_assert ((code == NE || code == EQ) && *op1 == const0_rtx);
1791 cmp = *op0;
1793 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1794 magic number as its third argument, that indicates what to do.
1795 The return value is an integer to be compared against zero. */
1796 else if (TARGET_HPUX && GET_MODE (*op0) == TFmode)
1798 enum qfcmp_magic {
1799 QCMP_INV = 1, /* Raise FP_INVALID on NaNs as a side effect. */
1800 QCMP_UNORD = 2,
1801 QCMP_EQ = 4,
1802 QCMP_LT = 8,
1803 QCMP_GT = 16
1805 int magic;
1806 enum rtx_code ncode;
1807 rtx ret;
1809 gcc_assert (cmptf_libfunc && GET_MODE (*op1) == TFmode);
1810 switch (code)
1812 /* 1 = equal, 0 = not equal. Equality operators do
1813 not raise FP_INVALID when given a NaN operand. */
1814 case EQ: magic = QCMP_EQ; ncode = NE; break;
1815 case NE: magic = QCMP_EQ; ncode = EQ; break;
1816 /* isunordered() from C99. */
1817 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1818 case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
1819 /* Relational operators raise FP_INVALID when given
1820 a NaN operand. */
1821 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1822 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1823 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1824 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1825 /* Unordered relational operators do not raise FP_INVALID
1826 when given a NaN operand. */
1827 case UNLT: magic = QCMP_LT |QCMP_UNORD; ncode = NE; break;
1828 case UNLE: magic = QCMP_LT|QCMP_EQ|QCMP_UNORD; ncode = NE; break;
1829 case UNGT: magic = QCMP_GT |QCMP_UNORD; ncode = NE; break;
1830 case UNGE: magic = QCMP_GT|QCMP_EQ|QCMP_UNORD; ncode = NE; break;
1831 /* Not supported. */
1832 case UNEQ:
1833 case LTGT:
1834 default: gcc_unreachable ();
1837 start_sequence ();
1839 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1840 *op0, TFmode, *op1, TFmode,
1841 GEN_INT (magic), DImode);
1842 cmp = gen_reg_rtx (BImode);
1843 emit_insn (gen_rtx_SET (cmp, gen_rtx_fmt_ee (ncode, BImode,
1844 ret, const0_rtx)));
1846 rtx_insn *insns = get_insns ();
1847 end_sequence ();
1849 emit_libcall_block (insns, cmp, cmp,
1850 gen_rtx_fmt_ee (code, BImode, *op0, *op1));
1851 code = NE;
1853 else
1855 cmp = gen_reg_rtx (BImode);
1856 emit_insn (gen_rtx_SET (cmp, gen_rtx_fmt_ee (code, BImode, *op0, *op1)));
1857 code = NE;
1860 *expr = gen_rtx_fmt_ee (code, VOIDmode, cmp, const0_rtx);
1861 *op0 = cmp;
1862 *op1 = const0_rtx;
1865 /* Generate an integral vector comparison. Return true if the condition has
1866 been reversed, and so the sense of the comparison should be inverted. */
1868 static bool
1869 ia64_expand_vecint_compare (enum rtx_code code, machine_mode mode,
1870 rtx dest, rtx op0, rtx op1)
1872 bool negate = false;
1873 rtx x;
1875 /* Canonicalize the comparison to EQ, GT, GTU. */
1876 switch (code)
1878 case EQ:
1879 case GT:
1880 case GTU:
1881 break;
1883 case NE:
1884 case LE:
1885 case LEU:
1886 code = reverse_condition (code);
1887 negate = true;
1888 break;
1890 case GE:
1891 case GEU:
1892 code = reverse_condition (code);
1893 negate = true;
1894 /* FALLTHRU */
1896 case LT:
1897 case LTU:
1898 code = swap_condition (code);
1899 x = op0, op0 = op1, op1 = x;
1900 break;
1902 default:
1903 gcc_unreachable ();
1906 /* Unsigned parallel compare is not supported by the hardware. Play some
1907 tricks to turn this into a signed comparison against 0. */
1908 if (code == GTU)
1910 switch (mode)
1912 case E_V2SImode:
1914 rtx t1, t2, mask;
1916 /* Subtract (-(INT MAX) - 1) from both operands to make
1917 them signed. */
1918 mask = gen_int_mode (0x80000000, SImode);
1919 mask = gen_rtx_CONST_VECTOR (V2SImode, gen_rtvec (2, mask, mask));
1920 mask = force_reg (mode, mask);
1921 t1 = gen_reg_rtx (mode);
1922 emit_insn (gen_subv2si3 (t1, op0, mask));
1923 t2 = gen_reg_rtx (mode);
1924 emit_insn (gen_subv2si3 (t2, op1, mask));
1925 op0 = t1;
1926 op1 = t2;
1927 code = GT;
1929 break;
1931 case E_V8QImode:
1932 case E_V4HImode:
1933 /* Perform a parallel unsigned saturating subtraction. */
1934 x = gen_reg_rtx (mode);
1935 emit_insn (gen_rtx_SET (x, gen_rtx_US_MINUS (mode, op0, op1)));
1937 code = EQ;
1938 op0 = x;
1939 op1 = CONST0_RTX (mode);
1940 negate = !negate;
1941 break;
1943 default:
1944 gcc_unreachable ();
1948 x = gen_rtx_fmt_ee (code, mode, op0, op1);
1949 emit_insn (gen_rtx_SET (dest, x));
1951 return negate;
1954 /* Emit an integral vector conditional move. */
1956 void
1957 ia64_expand_vecint_cmov (rtx operands[])
1959 machine_mode mode = GET_MODE (operands[0]);
1960 enum rtx_code code = GET_CODE (operands[3]);
1961 bool negate;
1962 rtx cmp, x, ot, of;
1964 cmp = gen_reg_rtx (mode);
1965 negate = ia64_expand_vecint_compare (code, mode, cmp,
1966 operands[4], operands[5]);
1968 ot = operands[1+negate];
1969 of = operands[2-negate];
1971 if (ot == CONST0_RTX (mode))
1973 if (of == CONST0_RTX (mode))
1975 emit_move_insn (operands[0], ot);
1976 return;
1979 x = gen_rtx_NOT (mode, cmp);
1980 x = gen_rtx_AND (mode, x, of);
1981 emit_insn (gen_rtx_SET (operands[0], x));
1983 else if (of == CONST0_RTX (mode))
1985 x = gen_rtx_AND (mode, cmp, ot);
1986 emit_insn (gen_rtx_SET (operands[0], x));
1988 else
1990 rtx t, f;
1992 t = gen_reg_rtx (mode);
1993 x = gen_rtx_AND (mode, cmp, operands[1+negate]);
1994 emit_insn (gen_rtx_SET (t, x));
1996 f = gen_reg_rtx (mode);
1997 x = gen_rtx_NOT (mode, cmp);
1998 x = gen_rtx_AND (mode, x, operands[2-negate]);
1999 emit_insn (gen_rtx_SET (f, x));
2001 x = gen_rtx_IOR (mode, t, f);
2002 emit_insn (gen_rtx_SET (operands[0], x));
2006 /* Emit an integral vector min or max operation. Return true if all done. */
2008 bool
2009 ia64_expand_vecint_minmax (enum rtx_code code, machine_mode mode,
2010 rtx operands[])
2012 rtx xops[6];
2014 /* These four combinations are supported directly. */
2015 if (mode == V8QImode && (code == UMIN || code == UMAX))
2016 return false;
2017 if (mode == V4HImode && (code == SMIN || code == SMAX))
2018 return false;
2020 /* This combination can be implemented with only saturating subtraction. */
2021 if (mode == V4HImode && code == UMAX)
2023 rtx x, tmp = gen_reg_rtx (mode);
2025 x = gen_rtx_US_MINUS (mode, operands[1], operands[2]);
2026 emit_insn (gen_rtx_SET (tmp, x));
2028 emit_insn (gen_addv4hi3 (operands[0], tmp, operands[2]));
2029 return true;
2032 /* Everything else implemented via vector comparisons. */
2033 xops[0] = operands[0];
2034 xops[4] = xops[1] = operands[1];
2035 xops[5] = xops[2] = operands[2];
2037 switch (code)
2039 case UMIN:
2040 code = LTU;
2041 break;
2042 case UMAX:
2043 code = GTU;
2044 break;
2045 case SMIN:
2046 code = LT;
2047 break;
2048 case SMAX:
2049 code = GT;
2050 break;
2051 default:
2052 gcc_unreachable ();
2054 xops[3] = gen_rtx_fmt_ee (code, VOIDmode, operands[1], operands[2]);
2056 ia64_expand_vecint_cmov (xops);
2057 return true;
2060 /* The vectors LO and HI each contain N halves of a double-wide vector.
2061 Reassemble either the first N/2 or the second N/2 elements. */
2063 void
2064 ia64_unpack_assemble (rtx out, rtx lo, rtx hi, bool highp)
2066 machine_mode vmode = GET_MODE (lo);
2067 unsigned int i, high, nelt = GET_MODE_NUNITS (vmode);
2068 struct expand_vec_perm_d d;
2069 bool ok;
2071 d.target = gen_lowpart (vmode, out);
2072 d.op0 = (TARGET_BIG_ENDIAN ? hi : lo);
2073 d.op1 = (TARGET_BIG_ENDIAN ? lo : hi);
2074 d.vmode = vmode;
2075 d.nelt = nelt;
2076 d.one_operand_p = false;
2077 d.testing_p = false;
2079 high = (highp ? nelt / 2 : 0);
2080 for (i = 0; i < nelt / 2; ++i)
2082 d.perm[i * 2] = i + high;
2083 d.perm[i * 2 + 1] = i + high + nelt;
2086 ok = ia64_expand_vec_perm_const_1 (&d);
2087 gcc_assert (ok);
2090 /* Return a vector of the sign-extension of VEC. */
2092 static rtx
2093 ia64_unpack_sign (rtx vec, bool unsignedp)
2095 machine_mode mode = GET_MODE (vec);
2096 rtx zero = CONST0_RTX (mode);
2098 if (unsignedp)
2099 return zero;
2100 else
2102 rtx sign = gen_reg_rtx (mode);
2103 bool neg;
2105 neg = ia64_expand_vecint_compare (LT, mode, sign, vec, zero);
2106 gcc_assert (!neg);
2108 return sign;
2112 /* Emit an integral vector unpack operation. */
2114 void
2115 ia64_expand_unpack (rtx operands[3], bool unsignedp, bool highp)
2117 rtx sign = ia64_unpack_sign (operands[1], unsignedp);
2118 ia64_unpack_assemble (operands[0], operands[1], sign, highp);
2121 /* Emit an integral vector widening sum operations. */
2123 void
2124 ia64_expand_widen_sum (rtx operands[3], bool unsignedp)
2126 machine_mode wmode;
2127 rtx l, h, t, sign;
2129 sign = ia64_unpack_sign (operands[1], unsignedp);
2131 wmode = GET_MODE (operands[0]);
2132 l = gen_reg_rtx (wmode);
2133 h = gen_reg_rtx (wmode);
2135 ia64_unpack_assemble (l, operands[1], sign, false);
2136 ia64_unpack_assemble (h, operands[1], sign, true);
2138 t = expand_binop (wmode, add_optab, l, operands[2], NULL, 0, OPTAB_DIRECT);
2139 t = expand_binop (wmode, add_optab, h, t, operands[0], 0, OPTAB_DIRECT);
2140 if (t != operands[0])
2141 emit_move_insn (operands[0], t);
2144 /* Emit the appropriate sequence for a call. */
2146 void
2147 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
2148 int sibcall_p)
2150 rtx insn, b0;
2152 addr = XEXP (addr, 0);
2153 addr = convert_memory_address (DImode, addr);
2154 b0 = gen_rtx_REG (DImode, R_BR (0));
2156 /* ??? Should do this for functions known to bind local too. */
2157 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
2159 if (sibcall_p)
2160 insn = gen_sibcall_nogp (addr);
2161 else if (! retval)
2162 insn = gen_call_nogp (addr, b0);
2163 else
2164 insn = gen_call_value_nogp (retval, addr, b0);
2165 insn = emit_call_insn (insn);
2167 else
2169 if (sibcall_p)
2170 insn = gen_sibcall_gp (addr);
2171 else if (! retval)
2172 insn = gen_call_gp (addr, b0);
2173 else
2174 insn = gen_call_value_gp (retval, addr, b0);
2175 insn = emit_call_insn (insn);
2177 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
2180 if (sibcall_p)
2181 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
2183 if (TARGET_ABI_OPEN_VMS)
2184 use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
2185 gen_rtx_REG (DImode, GR_REG (25)));
2188 static void
2189 reg_emitted (enum ia64_frame_regs r)
2191 if (emitted_frame_related_regs[r] == 0)
2192 emitted_frame_related_regs[r] = current_frame_info.r[r];
2193 else
2194 gcc_assert (emitted_frame_related_regs[r] == current_frame_info.r[r]);
2197 static int
2198 get_reg (enum ia64_frame_regs r)
2200 reg_emitted (r);
2201 return current_frame_info.r[r];
2204 static bool
2205 is_emitted (int regno)
2207 unsigned int r;
2209 for (r = reg_fp; r < number_of_ia64_frame_regs; r++)
2210 if (emitted_frame_related_regs[r] == regno)
2211 return true;
2212 return false;
2215 void
2216 ia64_reload_gp (void)
2218 rtx tmp;
2220 if (current_frame_info.r[reg_save_gp])
2222 tmp = gen_rtx_REG (DImode, get_reg (reg_save_gp));
2224 else
2226 HOST_WIDE_INT offset;
2227 rtx offset_r;
2229 offset = (current_frame_info.spill_cfa_off
2230 + current_frame_info.spill_size);
2231 if (frame_pointer_needed)
2233 tmp = hard_frame_pointer_rtx;
2234 offset = -offset;
2236 else
2238 tmp = stack_pointer_rtx;
2239 offset = current_frame_info.total_size - offset;
2242 offset_r = GEN_INT (offset);
2243 if (satisfies_constraint_I (offset_r))
2244 emit_insn (gen_adddi3 (pic_offset_table_rtx, tmp, offset_r));
2245 else
2247 emit_move_insn (pic_offset_table_rtx, offset_r);
2248 emit_insn (gen_adddi3 (pic_offset_table_rtx,
2249 pic_offset_table_rtx, tmp));
2252 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
2255 emit_move_insn (pic_offset_table_rtx, tmp);
2258 void
2259 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
2260 rtx scratch_b, int noreturn_p, int sibcall_p)
2262 rtx insn;
2263 bool is_desc = false;
2265 /* If we find we're calling through a register, then we're actually
2266 calling through a descriptor, so load up the values. */
2267 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
2269 rtx tmp;
2270 bool addr_dead_p;
2272 /* ??? We are currently constrained to *not* use peep2, because
2273 we can legitimately change the global lifetime of the GP
2274 (in the form of killing where previously live). This is
2275 because a call through a descriptor doesn't use the previous
2276 value of the GP, while a direct call does, and we do not
2277 commit to either form until the split here.
2279 That said, this means that we lack precise life info for
2280 whether ADDR is dead after this call. This is not terribly
2281 important, since we can fix things up essentially for free
2282 with the POST_DEC below, but it's nice to not use it when we
2283 can immediately tell it's not necessary. */
2284 addr_dead_p = ((noreturn_p || sibcall_p
2285 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
2286 REGNO (addr)))
2287 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
2289 /* Load the code address into scratch_b. */
2290 tmp = gen_rtx_POST_INC (Pmode, addr);
2291 tmp = gen_rtx_MEM (Pmode, tmp);
2292 emit_move_insn (scratch_r, tmp);
2293 emit_move_insn (scratch_b, scratch_r);
2295 /* Load the GP address. If ADDR is not dead here, then we must
2296 revert the change made above via the POST_INCREMENT. */
2297 if (!addr_dead_p)
2298 tmp = gen_rtx_POST_DEC (Pmode, addr);
2299 else
2300 tmp = addr;
2301 tmp = gen_rtx_MEM (Pmode, tmp);
2302 emit_move_insn (pic_offset_table_rtx, tmp);
2304 is_desc = true;
2305 addr = scratch_b;
2308 if (sibcall_p)
2309 insn = gen_sibcall_nogp (addr);
2310 else if (retval)
2311 insn = gen_call_value_nogp (retval, addr, retaddr);
2312 else
2313 insn = gen_call_nogp (addr, retaddr);
2314 emit_call_insn (insn);
2316 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
2317 ia64_reload_gp ();
2320 /* Expand an atomic operation. We want to perform MEM <CODE>= VAL atomically.
2322 This differs from the generic code in that we know about the zero-extending
2323 properties of cmpxchg, and the zero-extending requirements of ar.ccv. We
2324 also know that ld.acq+cmpxchg.rel equals a full barrier.
2326 The loop we want to generate looks like
2328 cmp_reg = mem;
2329 label:
2330 old_reg = cmp_reg;
2331 new_reg = cmp_reg op val;
2332 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
2333 if (cmp_reg != old_reg)
2334 goto label;
2336 Note that we only do the plain load from memory once. Subsequent
2337 iterations use the value loaded by the compare-and-swap pattern. */
2339 void
2340 ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
2341 rtx old_dst, rtx new_dst, enum memmodel model)
2343 machine_mode mode = GET_MODE (mem);
2344 rtx old_reg, new_reg, cmp_reg, ar_ccv, label;
2345 enum insn_code icode;
2347 /* Special case for using fetchadd. */
2348 if ((mode == SImode || mode == DImode)
2349 && (code == PLUS || code == MINUS)
2350 && fetchadd_operand (val, mode))
2352 if (code == MINUS)
2353 val = GEN_INT (-INTVAL (val));
2355 if (!old_dst)
2356 old_dst = gen_reg_rtx (mode);
2358 switch (model)
2360 case MEMMODEL_ACQ_REL:
2361 case MEMMODEL_SEQ_CST:
2362 case MEMMODEL_SYNC_SEQ_CST:
2363 emit_insn (gen_memory_barrier ());
2364 /* FALLTHRU */
2365 case MEMMODEL_RELAXED:
2366 case MEMMODEL_ACQUIRE:
2367 case MEMMODEL_SYNC_ACQUIRE:
2368 case MEMMODEL_CONSUME:
2369 if (mode == SImode)
2370 icode = CODE_FOR_fetchadd_acq_si;
2371 else
2372 icode = CODE_FOR_fetchadd_acq_di;
2373 break;
2374 case MEMMODEL_RELEASE:
2375 case MEMMODEL_SYNC_RELEASE:
2376 if (mode == SImode)
2377 icode = CODE_FOR_fetchadd_rel_si;
2378 else
2379 icode = CODE_FOR_fetchadd_rel_di;
2380 break;
2382 default:
2383 gcc_unreachable ();
2386 emit_insn (GEN_FCN (icode) (old_dst, mem, val));
2388 if (new_dst)
2390 new_reg = expand_simple_binop (mode, PLUS, old_dst, val, new_dst,
2391 true, OPTAB_WIDEN);
2392 if (new_reg != new_dst)
2393 emit_move_insn (new_dst, new_reg);
2395 return;
2398 /* Because of the volatile mem read, we get an ld.acq, which is the
2399 front half of the full barrier. The end half is the cmpxchg.rel.
2400 For relaxed and release memory models, we don't need this. But we
2401 also don't bother trying to prevent it either. */
2402 gcc_assert (is_mm_relaxed (model) || is_mm_release (model)
2403 || MEM_VOLATILE_P (mem));
2405 old_reg = gen_reg_rtx (DImode);
2406 cmp_reg = gen_reg_rtx (DImode);
2407 label = gen_label_rtx ();
2409 if (mode != DImode)
2411 val = simplify_gen_subreg (DImode, val, mode, 0);
2412 emit_insn (gen_extend_insn (cmp_reg, mem, DImode, mode, 1));
2414 else
2415 emit_move_insn (cmp_reg, mem);
2417 emit_label (label);
2419 ar_ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
2420 emit_move_insn (old_reg, cmp_reg);
2421 emit_move_insn (ar_ccv, cmp_reg);
2423 if (old_dst)
2424 emit_move_insn (old_dst, gen_lowpart (mode, cmp_reg));
2426 new_reg = cmp_reg;
2427 if (code == NOT)
2429 new_reg = expand_simple_binop (DImode, AND, new_reg, val, NULL_RTX,
2430 true, OPTAB_DIRECT);
2431 new_reg = expand_simple_unop (DImode, code, new_reg, NULL_RTX, true);
2433 else
2434 new_reg = expand_simple_binop (DImode, code, new_reg, val, NULL_RTX,
2435 true, OPTAB_DIRECT);
2437 if (mode != DImode)
2438 new_reg = gen_lowpart (mode, new_reg);
2439 if (new_dst)
2440 emit_move_insn (new_dst, new_reg);
2442 switch (model)
2444 case MEMMODEL_RELAXED:
2445 case MEMMODEL_ACQUIRE:
2446 case MEMMODEL_SYNC_ACQUIRE:
2447 case MEMMODEL_CONSUME:
2448 switch (mode)
2450 case E_QImode: icode = CODE_FOR_cmpxchg_acq_qi; break;
2451 case E_HImode: icode = CODE_FOR_cmpxchg_acq_hi; break;
2452 case E_SImode: icode = CODE_FOR_cmpxchg_acq_si; break;
2453 case E_DImode: icode = CODE_FOR_cmpxchg_acq_di; break;
2454 default:
2455 gcc_unreachable ();
2457 break;
2459 case MEMMODEL_RELEASE:
2460 case MEMMODEL_SYNC_RELEASE:
2461 case MEMMODEL_ACQ_REL:
2462 case MEMMODEL_SEQ_CST:
2463 case MEMMODEL_SYNC_SEQ_CST:
2464 switch (mode)
2466 case E_QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;
2467 case E_HImode: icode = CODE_FOR_cmpxchg_rel_hi; break;
2468 case E_SImode: icode = CODE_FOR_cmpxchg_rel_si; break;
2469 case E_DImode: icode = CODE_FOR_cmpxchg_rel_di; break;
2470 default:
2471 gcc_unreachable ();
2473 break;
2475 default:
2476 gcc_unreachable ();
2479 emit_insn (GEN_FCN (icode) (cmp_reg, mem, ar_ccv, new_reg));
2481 emit_cmp_and_jump_insns (cmp_reg, old_reg, NE, NULL, DImode, true, label);
2484 /* Begin the assembly file. */
2486 static void
2487 ia64_file_start (void)
2489 default_file_start ();
2490 emit_safe_across_calls ();
2493 void
2494 emit_safe_across_calls (void)
2496 unsigned int rs, re;
2497 int out_state;
2499 rs = 1;
2500 out_state = 0;
2501 while (1)
2503 while (rs < 64 && call_used_regs[PR_REG (rs)])
2504 rs++;
2505 if (rs >= 64)
2506 break;
2507 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
2508 continue;
2509 if (out_state == 0)
2511 fputs ("\t.pred.safe_across_calls ", asm_out_file);
2512 out_state = 1;
2514 else
2515 fputc (',', asm_out_file);
2516 if (re == rs + 1)
2517 fprintf (asm_out_file, "p%u", rs);
2518 else
2519 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
2520 rs = re + 1;
2522 if (out_state)
2523 fputc ('\n', asm_out_file);
2526 /* Globalize a declaration. */
2528 static void
2529 ia64_globalize_decl_name (FILE * stream, tree decl)
2531 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2532 tree version_attr = lookup_attribute ("version_id", DECL_ATTRIBUTES (decl));
2533 if (version_attr)
2535 tree v = TREE_VALUE (TREE_VALUE (version_attr));
2536 const char *p = TREE_STRING_POINTER (v);
2537 fprintf (stream, "\t.alias %s#, \"%s{%s}\"\n", name, name, p);
2539 targetm.asm_out.globalize_label (stream, name);
2540 if (TREE_CODE (decl) == FUNCTION_DECL)
2541 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "function");
2544 /* Helper function for ia64_compute_frame_size: find an appropriate general
2545 register to spill some special register to. SPECIAL_SPILL_MASK contains
2546 bits in GR0 to GR31 that have already been allocated by this routine.
2547 TRY_LOCALS is true if we should attempt to locate a local regnum. */
2549 static int
2550 find_gr_spill (enum ia64_frame_regs r, int try_locals)
2552 int regno;
2554 if (emitted_frame_related_regs[r] != 0)
2556 regno = emitted_frame_related_regs[r];
2557 if (regno >= LOC_REG (0) && regno < LOC_REG (80 - frame_pointer_needed)
2558 && current_frame_info.n_local_regs < regno - LOC_REG (0) + 1)
2559 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2560 else if (crtl->is_leaf
2561 && regno >= GR_REG (1) && regno <= GR_REG (31))
2562 current_frame_info.gr_used_mask |= 1 << regno;
2564 return regno;
2567 /* If this is a leaf function, first try an otherwise unused
2568 call-clobbered register. */
2569 if (crtl->is_leaf)
2571 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2572 if (! df_regs_ever_live_p (regno)
2573 && call_used_regs[regno]
2574 && ! fixed_regs[regno]
2575 && ! global_regs[regno]
2576 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0
2577 && ! is_emitted (regno))
2579 current_frame_info.gr_used_mask |= 1 << regno;
2580 return regno;
2584 if (try_locals)
2586 regno = current_frame_info.n_local_regs;
2587 /* If there is a frame pointer, then we can't use loc79, because
2588 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
2589 reg_name switching code in ia64_expand_prologue. */
2590 while (regno < (80 - frame_pointer_needed))
2591 if (! is_emitted (LOC_REG (regno++)))
2593 current_frame_info.n_local_regs = regno;
2594 return LOC_REG (regno - 1);
2598 /* Failed to find a general register to spill to. Must use stack. */
2599 return 0;
2602 /* In order to make for nice schedules, we try to allocate every temporary
2603 to a different register. We must of course stay away from call-saved,
2604 fixed, and global registers. We must also stay away from registers
2605 allocated in current_frame_info.gr_used_mask, since those include regs
2606 used all through the prologue.
2608 Any register allocated here must be used immediately. The idea is to
2609 aid scheduling, not to solve data flow problems. */
2611 static int last_scratch_gr_reg;
2613 static int
2614 next_scratch_gr_reg (void)
2616 int i, regno;
2618 for (i = 0; i < 32; ++i)
2620 regno = (last_scratch_gr_reg + i + 1) & 31;
2621 if (call_used_regs[regno]
2622 && ! fixed_regs[regno]
2623 && ! global_regs[regno]
2624 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
2626 last_scratch_gr_reg = regno;
2627 return regno;
2631 /* There must be _something_ available. */
2632 gcc_unreachable ();
2635 /* Helper function for ia64_compute_frame_size, called through
2636 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
2638 static void
2639 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
2641 unsigned int regno = REGNO (reg);
2642 if (regno < 32)
2644 unsigned int i, n = hard_regno_nregs[regno][GET_MODE (reg)];
2645 for (i = 0; i < n; ++i)
2646 current_frame_info.gr_used_mask |= 1 << (regno + i);
2651 /* Returns the number of bytes offset between the frame pointer and the stack
2652 pointer for the current function. SIZE is the number of bytes of space
2653 needed for local variables. */
2655 static void
2656 ia64_compute_frame_size (HOST_WIDE_INT size)
2658 HOST_WIDE_INT total_size;
2659 HOST_WIDE_INT spill_size = 0;
2660 HOST_WIDE_INT extra_spill_size = 0;
2661 HOST_WIDE_INT pretend_args_size;
2662 HARD_REG_SET mask;
2663 int n_spilled = 0;
2664 int spilled_gr_p = 0;
2665 int spilled_fr_p = 0;
2666 unsigned int regno;
2667 int min_regno;
2668 int max_regno;
2669 int i;
2671 if (current_frame_info.initialized)
2672 return;
2674 memset (&current_frame_info, 0, sizeof current_frame_info);
2675 CLEAR_HARD_REG_SET (mask);
2677 /* Don't allocate scratches to the return register. */
2678 diddle_return_value (mark_reg_gr_used_mask, NULL);
2680 /* Don't allocate scratches to the EH scratch registers. */
2681 if (cfun->machine->ia64_eh_epilogue_sp)
2682 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
2683 if (cfun->machine->ia64_eh_epilogue_bsp)
2684 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
2686 /* Static stack checking uses r2 and r3. */
2687 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
2688 current_frame_info.gr_used_mask |= 0xc;
2690 /* Find the size of the register stack frame. We have only 80 local
2691 registers, because we reserve 8 for the inputs and 8 for the
2692 outputs. */
2694 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
2695 since we'll be adjusting that down later. */
2696 regno = LOC_REG (78) + ! frame_pointer_needed;
2697 for (; regno >= LOC_REG (0); regno--)
2698 if (df_regs_ever_live_p (regno) && !is_emitted (regno))
2699 break;
2700 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2702 /* For functions marked with the syscall_linkage attribute, we must mark
2703 all eight input registers as in use, so that locals aren't visible to
2704 the caller. */
2706 if (cfun->machine->n_varargs > 0
2707 || lookup_attribute ("syscall_linkage",
2708 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
2709 current_frame_info.n_input_regs = 8;
2710 else
2712 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
2713 if (df_regs_ever_live_p (regno))
2714 break;
2715 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
2718 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
2719 if (df_regs_ever_live_p (regno))
2720 break;
2721 i = regno - OUT_REG (0) + 1;
2723 #ifndef PROFILE_HOOK
2724 /* When -p profiling, we need one output register for the mcount argument.
2725 Likewise for -a profiling for the bb_init_func argument. For -ax
2726 profiling, we need two output registers for the two bb_init_trace_func
2727 arguments. */
2728 if (crtl->profile)
2729 i = MAX (i, 1);
2730 #endif
2731 current_frame_info.n_output_regs = i;
2733 /* ??? No rotating register support yet. */
2734 current_frame_info.n_rotate_regs = 0;
2736 /* Discover which registers need spilling, and how much room that
2737 will take. Begin with floating point and general registers,
2738 which will always wind up on the stack. */
2740 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
2741 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2743 SET_HARD_REG_BIT (mask, regno);
2744 spill_size += 16;
2745 n_spilled += 1;
2746 spilled_fr_p = 1;
2749 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2750 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2752 SET_HARD_REG_BIT (mask, regno);
2753 spill_size += 8;
2754 n_spilled += 1;
2755 spilled_gr_p = 1;
2758 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
2759 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2761 SET_HARD_REG_BIT (mask, regno);
2762 spill_size += 8;
2763 n_spilled += 1;
2766 /* Now come all special registers that might get saved in other
2767 general registers. */
2769 if (frame_pointer_needed)
2771 current_frame_info.r[reg_fp] = find_gr_spill (reg_fp, 1);
2772 /* If we did not get a register, then we take LOC79. This is guaranteed
2773 to be free, even if regs_ever_live is already set, because this is
2774 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
2775 as we don't count loc79 above. */
2776 if (current_frame_info.r[reg_fp] == 0)
2778 current_frame_info.r[reg_fp] = LOC_REG (79);
2779 current_frame_info.n_local_regs = LOC_REG (79) - LOC_REG (0) + 1;
2783 if (! crtl->is_leaf)
2785 /* Emit a save of BR0 if we call other functions. Do this even
2786 if this function doesn't return, as EH depends on this to be
2787 able to unwind the stack. */
2788 SET_HARD_REG_BIT (mask, BR_REG (0));
2790 current_frame_info.r[reg_save_b0] = find_gr_spill (reg_save_b0, 1);
2791 if (current_frame_info.r[reg_save_b0] == 0)
2793 extra_spill_size += 8;
2794 n_spilled += 1;
2797 /* Similarly for ar.pfs. */
2798 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2799 current_frame_info.r[reg_save_ar_pfs] = find_gr_spill (reg_save_ar_pfs, 1);
2800 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2802 extra_spill_size += 8;
2803 n_spilled += 1;
2806 /* Similarly for gp. Note that if we're calling setjmp, the stacked
2807 registers are clobbered, so we fall back to the stack. */
2808 current_frame_info.r[reg_save_gp]
2809 = (cfun->calls_setjmp ? 0 : find_gr_spill (reg_save_gp, 1));
2810 if (current_frame_info.r[reg_save_gp] == 0)
2812 SET_HARD_REG_BIT (mask, GR_REG (1));
2813 spill_size += 8;
2814 n_spilled += 1;
2817 else
2819 if (df_regs_ever_live_p (BR_REG (0)) && ! call_used_regs[BR_REG (0)])
2821 SET_HARD_REG_BIT (mask, BR_REG (0));
2822 extra_spill_size += 8;
2823 n_spilled += 1;
2826 if (df_regs_ever_live_p (AR_PFS_REGNUM))
2828 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2829 current_frame_info.r[reg_save_ar_pfs]
2830 = find_gr_spill (reg_save_ar_pfs, 1);
2831 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2833 extra_spill_size += 8;
2834 n_spilled += 1;
2839 /* Unwind descriptor hackery: things are most efficient if we allocate
2840 consecutive GR save registers for RP, PFS, FP in that order. However,
2841 it is absolutely critical that FP get the only hard register that's
2842 guaranteed to be free, so we allocated it first. If all three did
2843 happen to be allocated hard regs, and are consecutive, rearrange them
2844 into the preferred order now.
2846 If we have already emitted code for any of those registers,
2847 then it's already too late to change. */
2848 min_regno = MIN (current_frame_info.r[reg_fp],
2849 MIN (current_frame_info.r[reg_save_b0],
2850 current_frame_info.r[reg_save_ar_pfs]));
2851 max_regno = MAX (current_frame_info.r[reg_fp],
2852 MAX (current_frame_info.r[reg_save_b0],
2853 current_frame_info.r[reg_save_ar_pfs]));
2854 if (min_regno > 0
2855 && min_regno + 2 == max_regno
2856 && (current_frame_info.r[reg_fp] == min_regno + 1
2857 || current_frame_info.r[reg_save_b0] == min_regno + 1
2858 || current_frame_info.r[reg_save_ar_pfs] == min_regno + 1)
2859 && (emitted_frame_related_regs[reg_save_b0] == 0
2860 || emitted_frame_related_regs[reg_save_b0] == min_regno)
2861 && (emitted_frame_related_regs[reg_save_ar_pfs] == 0
2862 || emitted_frame_related_regs[reg_save_ar_pfs] == min_regno + 1)
2863 && (emitted_frame_related_regs[reg_fp] == 0
2864 || emitted_frame_related_regs[reg_fp] == min_regno + 2))
2866 current_frame_info.r[reg_save_b0] = min_regno;
2867 current_frame_info.r[reg_save_ar_pfs] = min_regno + 1;
2868 current_frame_info.r[reg_fp] = min_regno + 2;
2871 /* See if we need to store the predicate register block. */
2872 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2873 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2874 break;
2875 if (regno <= PR_REG (63))
2877 SET_HARD_REG_BIT (mask, PR_REG (0));
2878 current_frame_info.r[reg_save_pr] = find_gr_spill (reg_save_pr, 1);
2879 if (current_frame_info.r[reg_save_pr] == 0)
2881 extra_spill_size += 8;
2882 n_spilled += 1;
2885 /* ??? Mark them all as used so that register renaming and such
2886 are free to use them. */
2887 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2888 df_set_regs_ever_live (regno, true);
2891 /* If we're forced to use st8.spill, we're forced to save and restore
2892 ar.unat as well. The check for existing liveness allows inline asm
2893 to touch ar.unat. */
2894 if (spilled_gr_p || cfun->machine->n_varargs
2895 || df_regs_ever_live_p (AR_UNAT_REGNUM))
2897 df_set_regs_ever_live (AR_UNAT_REGNUM, true);
2898 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
2899 current_frame_info.r[reg_save_ar_unat]
2900 = find_gr_spill (reg_save_ar_unat, spill_size == 0);
2901 if (current_frame_info.r[reg_save_ar_unat] == 0)
2903 extra_spill_size += 8;
2904 n_spilled += 1;
2908 if (df_regs_ever_live_p (AR_LC_REGNUM))
2910 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
2911 current_frame_info.r[reg_save_ar_lc]
2912 = find_gr_spill (reg_save_ar_lc, spill_size == 0);
2913 if (current_frame_info.r[reg_save_ar_lc] == 0)
2915 extra_spill_size += 8;
2916 n_spilled += 1;
2920 /* If we have an odd number of words of pretend arguments written to
2921 the stack, then the FR save area will be unaligned. We round the
2922 size of this area up to keep things 16 byte aligned. */
2923 if (spilled_fr_p)
2924 pretend_args_size = IA64_STACK_ALIGN (crtl->args.pretend_args_size);
2925 else
2926 pretend_args_size = crtl->args.pretend_args_size;
2928 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2929 + crtl->outgoing_args_size);
2930 total_size = IA64_STACK_ALIGN (total_size);
2932 /* We always use the 16-byte scratch area provided by the caller, but
2933 if we are a leaf function, there's no one to which we need to provide
2934 a scratch area. However, if the function allocates dynamic stack space,
2935 the dynamic offset is computed early and contains STACK_POINTER_OFFSET,
2936 so we need to cope. */
2937 if (crtl->is_leaf && !cfun->calls_alloca)
2938 total_size = MAX (0, total_size - 16);
2940 current_frame_info.total_size = total_size;
2941 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2942 current_frame_info.spill_size = spill_size;
2943 current_frame_info.extra_spill_size = extra_spill_size;
2944 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2945 current_frame_info.n_spilled = n_spilled;
2946 current_frame_info.initialized = reload_completed;
2949 /* Worker function for TARGET_CAN_ELIMINATE. */
2951 bool
2952 ia64_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
2954 return (to == BR_REG (0) ? crtl->is_leaf : true);
2957 /* Compute the initial difference between the specified pair of registers. */
2959 HOST_WIDE_INT
2960 ia64_initial_elimination_offset (int from, int to)
2962 HOST_WIDE_INT offset;
2964 ia64_compute_frame_size (get_frame_size ());
2965 switch (from)
2967 case FRAME_POINTER_REGNUM:
2968 switch (to)
2970 case HARD_FRAME_POINTER_REGNUM:
2971 offset = -current_frame_info.total_size;
2972 if (!crtl->is_leaf || cfun->calls_alloca)
2973 offset += 16 + crtl->outgoing_args_size;
2974 break;
2976 case STACK_POINTER_REGNUM:
2977 offset = 0;
2978 if (!crtl->is_leaf || cfun->calls_alloca)
2979 offset += 16 + crtl->outgoing_args_size;
2980 break;
2982 default:
2983 gcc_unreachable ();
2985 break;
2987 case ARG_POINTER_REGNUM:
2988 /* Arguments start above the 16 byte save area, unless stdarg
2989 in which case we store through the 16 byte save area. */
2990 switch (to)
2992 case HARD_FRAME_POINTER_REGNUM:
2993 offset = 16 - crtl->args.pretend_args_size;
2994 break;
2996 case STACK_POINTER_REGNUM:
2997 offset = (current_frame_info.total_size
2998 + 16 - crtl->args.pretend_args_size);
2999 break;
3001 default:
3002 gcc_unreachable ();
3004 break;
3006 default:
3007 gcc_unreachable ();
3010 return offset;
3013 /* If there are more than a trivial number of register spills, we use
3014 two interleaved iterators so that we can get two memory references
3015 per insn group.
3017 In order to simplify things in the prologue and epilogue expanders,
3018 we use helper functions to fix up the memory references after the
3019 fact with the appropriate offsets to a POST_MODIFY memory mode.
3020 The following data structure tracks the state of the two iterators
3021 while insns are being emitted. */
3023 struct spill_fill_data
3025 rtx_insn *init_after; /* point at which to emit initializations */
3026 rtx init_reg[2]; /* initial base register */
3027 rtx iter_reg[2]; /* the iterator registers */
3028 rtx *prev_addr[2]; /* address of last memory use */
3029 rtx_insn *prev_insn[2]; /* the insn corresponding to prev_addr */
3030 HOST_WIDE_INT prev_off[2]; /* last offset */
3031 int n_iter; /* number of iterators in use */
3032 int next_iter; /* next iterator to use */
3033 unsigned int save_gr_used_mask;
3036 static struct spill_fill_data spill_fill_data;
3038 static void
3039 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
3041 int i;
3043 spill_fill_data.init_after = get_last_insn ();
3044 spill_fill_data.init_reg[0] = init_reg;
3045 spill_fill_data.init_reg[1] = init_reg;
3046 spill_fill_data.prev_addr[0] = NULL;
3047 spill_fill_data.prev_addr[1] = NULL;
3048 spill_fill_data.prev_insn[0] = NULL;
3049 spill_fill_data.prev_insn[1] = NULL;
3050 spill_fill_data.prev_off[0] = cfa_off;
3051 spill_fill_data.prev_off[1] = cfa_off;
3052 spill_fill_data.next_iter = 0;
3053 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
3055 spill_fill_data.n_iter = 1 + (n_spills > 2);
3056 for (i = 0; i < spill_fill_data.n_iter; ++i)
3058 int regno = next_scratch_gr_reg ();
3059 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
3060 current_frame_info.gr_used_mask |= 1 << regno;
3064 static void
3065 finish_spill_pointers (void)
3067 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
3070 static rtx
3071 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
3073 int iter = spill_fill_data.next_iter;
3074 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
3075 rtx disp_rtx = GEN_INT (disp);
3076 rtx mem;
3078 if (spill_fill_data.prev_addr[iter])
3080 if (satisfies_constraint_N (disp_rtx))
3082 *spill_fill_data.prev_addr[iter]
3083 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
3084 gen_rtx_PLUS (DImode,
3085 spill_fill_data.iter_reg[iter],
3086 disp_rtx));
3087 add_reg_note (spill_fill_data.prev_insn[iter],
3088 REG_INC, spill_fill_data.iter_reg[iter]);
3090 else
3092 /* ??? Could use register post_modify for loads. */
3093 if (!satisfies_constraint_I (disp_rtx))
3095 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
3096 emit_move_insn (tmp, disp_rtx);
3097 disp_rtx = tmp;
3099 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
3100 spill_fill_data.iter_reg[iter], disp_rtx));
3103 /* Micro-optimization: if we've created a frame pointer, it's at
3104 CFA 0, which may allow the real iterator to be initialized lower,
3105 slightly increasing parallelism. Also, if there are few saves
3106 it may eliminate the iterator entirely. */
3107 else if (disp == 0
3108 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
3109 && frame_pointer_needed)
3111 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
3112 set_mem_alias_set (mem, get_varargs_alias_set ());
3113 return mem;
3115 else
3117 rtx seq;
3118 rtx_insn *insn;
3120 if (disp == 0)
3121 seq = gen_movdi (spill_fill_data.iter_reg[iter],
3122 spill_fill_data.init_reg[iter]);
3123 else
3125 start_sequence ();
3127 if (!satisfies_constraint_I (disp_rtx))
3129 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
3130 emit_move_insn (tmp, disp_rtx);
3131 disp_rtx = tmp;
3134 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
3135 spill_fill_data.init_reg[iter],
3136 disp_rtx));
3138 seq = get_insns ();
3139 end_sequence ();
3142 /* Careful for being the first insn in a sequence. */
3143 if (spill_fill_data.init_after)
3144 insn = emit_insn_after (seq, spill_fill_data.init_after);
3145 else
3147 rtx_insn *first = get_insns ();
3148 if (first)
3149 insn = emit_insn_before (seq, first);
3150 else
3151 insn = emit_insn (seq);
3153 spill_fill_data.init_after = insn;
3156 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
3158 /* ??? Not all of the spills are for varargs, but some of them are.
3159 The rest of the spills belong in an alias set of their own. But
3160 it doesn't actually hurt to include them here. */
3161 set_mem_alias_set (mem, get_varargs_alias_set ());
3163 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
3164 spill_fill_data.prev_off[iter] = cfa_off;
3166 if (++iter >= spill_fill_data.n_iter)
3167 iter = 0;
3168 spill_fill_data.next_iter = iter;
3170 return mem;
3173 static void
3174 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
3175 rtx frame_reg)
3177 int iter = spill_fill_data.next_iter;
3178 rtx mem;
3179 rtx_insn *insn;
3181 mem = spill_restore_mem (reg, cfa_off);
3182 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
3183 spill_fill_data.prev_insn[iter] = insn;
3185 if (frame_reg)
3187 rtx base;
3188 HOST_WIDE_INT off;
3190 RTX_FRAME_RELATED_P (insn) = 1;
3192 /* Don't even pretend that the unwind code can intuit its way
3193 through a pair of interleaved post_modify iterators. Just
3194 provide the correct answer. */
3196 if (frame_pointer_needed)
3198 base = hard_frame_pointer_rtx;
3199 off = - cfa_off;
3201 else
3203 base = stack_pointer_rtx;
3204 off = current_frame_info.total_size - cfa_off;
3207 add_reg_note (insn, REG_CFA_OFFSET,
3208 gen_rtx_SET (gen_rtx_MEM (GET_MODE (reg),
3209 plus_constant (Pmode,
3210 base, off)),
3211 frame_reg));
3215 static void
3216 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
3218 int iter = spill_fill_data.next_iter;
3219 rtx_insn *insn;
3221 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
3222 GEN_INT (cfa_off)));
3223 spill_fill_data.prev_insn[iter] = insn;
3226 /* Wrapper functions that discards the CONST_INT spill offset. These
3227 exist so that we can give gr_spill/gr_fill the offset they need and
3228 use a consistent function interface. */
3230 static rtx
3231 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3233 return gen_movdi (dest, src);
3236 static rtx
3237 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3239 return gen_fr_spill (dest, src);
3242 static rtx
3243 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3245 return gen_fr_restore (dest, src);
3248 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
3250 /* See Table 6.2 of the IA-64 Software Developer Manual, Volume 2. */
3251 #define BACKING_STORE_SIZE(N) ((N) > 0 ? ((N) + (N)/63 + 1) * 8 : 0)
3253 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
3254 inclusive. These are offsets from the current stack pointer. BS_SIZE
3255 is the size of the backing store. ??? This clobbers r2 and r3. */
3257 static void
3258 ia64_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size,
3259 int bs_size)
3261 rtx r2 = gen_rtx_REG (Pmode, GR_REG (2));
3262 rtx r3 = gen_rtx_REG (Pmode, GR_REG (3));
3263 rtx p6 = gen_rtx_REG (BImode, PR_REG (6));
3265 /* On the IA-64 there is a second stack in memory, namely the Backing Store
3266 of the Register Stack Engine. We also need to probe it after checking
3267 that the 2 stacks don't overlap. */
3268 emit_insn (gen_bsp_value (r3));
3269 emit_move_insn (r2, GEN_INT (-(first + size)));
3271 /* Compare current value of BSP and SP registers. */
3272 emit_insn (gen_rtx_SET (p6, gen_rtx_fmt_ee (LTU, BImode,
3273 r3, stack_pointer_rtx)));
3275 /* Compute the address of the probe for the Backing Store (which grows
3276 towards higher addresses). We probe only at the first offset of
3277 the next page because some OS (eg Linux/ia64) only extend the
3278 backing store when this specific address is hit (but generate a SEGV
3279 on other address). Page size is the worst case (4KB). The reserve
3280 size is at least 4096 - (96 + 2) * 8 = 3312 bytes, which is enough.
3281 Also compute the address of the last probe for the memory stack
3282 (which grows towards lower addresses). */
3283 emit_insn (gen_rtx_SET (r3, plus_constant (Pmode, r3, 4095)));
3284 emit_insn (gen_rtx_SET (r2, gen_rtx_PLUS (Pmode, stack_pointer_rtx, r2)));
3286 /* Compare them and raise SEGV if the former has topped the latter. */
3287 emit_insn (gen_rtx_COND_EXEC (VOIDmode,
3288 gen_rtx_fmt_ee (NE, VOIDmode, p6, const0_rtx),
3289 gen_rtx_SET (p6, gen_rtx_fmt_ee (GEU, BImode,
3290 r3, r2))));
3291 emit_insn (gen_rtx_SET (gen_rtx_ZERO_EXTRACT (DImode, r3, GEN_INT (12),
3292 const0_rtx),
3293 const0_rtx));
3294 emit_insn (gen_rtx_COND_EXEC (VOIDmode,
3295 gen_rtx_fmt_ee (NE, VOIDmode, p6, const0_rtx),
3296 gen_rtx_TRAP_IF (VOIDmode, const1_rtx,
3297 GEN_INT (11))));
3299 /* Probe the Backing Store if necessary. */
3300 if (bs_size > 0)
3301 emit_stack_probe (r3);
3303 /* Probe the memory stack if necessary. */
3304 if (size == 0)
3307 /* See if we have a constant small number of probes to generate. If so,
3308 that's the easy case. */
3309 else if (size <= PROBE_INTERVAL)
3310 emit_stack_probe (r2);
3312 /* The run-time loop is made up of 9 insns in the generic case while this
3313 compile-time loop is made up of 5+2*(n-2) insns for n # of intervals. */
3314 else if (size <= 4 * PROBE_INTERVAL)
3316 HOST_WIDE_INT i;
3318 emit_move_insn (r2, GEN_INT (-(first + PROBE_INTERVAL)));
3319 emit_insn (gen_rtx_SET (r2,
3320 gen_rtx_PLUS (Pmode, stack_pointer_rtx, r2)));
3321 emit_stack_probe (r2);
3323 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
3324 it exceeds SIZE. If only two probes are needed, this will not
3325 generate any code. Then probe at FIRST + SIZE. */
3326 for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
3328 emit_insn (gen_rtx_SET (r2,
3329 plus_constant (Pmode, r2, -PROBE_INTERVAL)));
3330 emit_stack_probe (r2);
3333 emit_insn (gen_rtx_SET (r2,
3334 plus_constant (Pmode, r2,
3335 (i - PROBE_INTERVAL) - size)));
3336 emit_stack_probe (r2);
3339 /* Otherwise, do the same as above, but in a loop. Note that we must be
3340 extra careful with variables wrapping around because we might be at
3341 the very top (or the very bottom) of the address space and we have
3342 to be able to handle this case properly; in particular, we use an
3343 equality test for the loop condition. */
3344 else
3346 HOST_WIDE_INT rounded_size;
3348 emit_move_insn (r2, GEN_INT (-first));
3351 /* Step 1: round SIZE to the previous multiple of the interval. */
3353 rounded_size = size & -PROBE_INTERVAL;
3356 /* Step 2: compute initial and final value of the loop counter. */
3358 /* TEST_ADDR = SP + FIRST. */
3359 emit_insn (gen_rtx_SET (r2,
3360 gen_rtx_PLUS (Pmode, stack_pointer_rtx, r2)));
3362 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
3363 if (rounded_size > (1 << 21))
3365 emit_move_insn (r3, GEN_INT (-rounded_size));
3366 emit_insn (gen_rtx_SET (r3, gen_rtx_PLUS (Pmode, r2, r3)));
3368 else
3369 emit_insn (gen_rtx_SET (r3, gen_rtx_PLUS (Pmode, r2,
3370 GEN_INT (-rounded_size))));
3373 /* Step 3: the loop
3377 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
3378 probe at TEST_ADDR
3380 while (TEST_ADDR != LAST_ADDR)
3382 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
3383 until it is equal to ROUNDED_SIZE. */
3385 emit_insn (gen_probe_stack_range (r2, r2, r3));
3388 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
3389 that SIZE is equal to ROUNDED_SIZE. */
3391 /* TEMP = SIZE - ROUNDED_SIZE. */
3392 if (size != rounded_size)
3394 emit_insn (gen_rtx_SET (r2, plus_constant (Pmode, r2,
3395 rounded_size - size)));
3396 emit_stack_probe (r2);
3400 /* Make sure nothing is scheduled before we are done. */
3401 emit_insn (gen_blockage ());
3404 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
3405 absolute addresses. */
3407 const char *
3408 output_probe_stack_range (rtx reg1, rtx reg2)
3410 static int labelno = 0;
3411 char loop_lab[32];
3412 rtx xops[3];
3414 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
3416 /* Loop. */
3417 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
3419 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
3420 xops[0] = reg1;
3421 xops[1] = GEN_INT (-PROBE_INTERVAL);
3422 output_asm_insn ("addl %0 = %1, %0", xops);
3423 fputs ("\t;;\n", asm_out_file);
3425 /* Probe at TEST_ADDR. */
3426 output_asm_insn ("probe.w.fault %0, 0", xops);
3428 /* Test if TEST_ADDR == LAST_ADDR. */
3429 xops[1] = reg2;
3430 xops[2] = gen_rtx_REG (BImode, PR_REG (6));
3431 output_asm_insn ("cmp.eq %2, %I2 = %0, %1", xops);
3433 /* Branch. */
3434 fprintf (asm_out_file, "\t(%s) br.cond.dpnt ", reg_names [PR_REG (7)]);
3435 assemble_name_raw (asm_out_file, loop_lab);
3436 fputc ('\n', asm_out_file);
3438 return "";
3441 /* Called after register allocation to add any instructions needed for the
3442 prologue. Using a prologue insn is favored compared to putting all of the
3443 instructions in output_function_prologue(), since it allows the scheduler
3444 to intermix instructions with the saves of the caller saved registers. In
3445 some cases, it might be necessary to emit a barrier instruction as the last
3446 insn to prevent such scheduling.
3448 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
3449 so that the debug info generation code can handle them properly.
3451 The register save area is laid out like so:
3452 cfa+16
3453 [ varargs spill area ]
3454 [ fr register spill area ]
3455 [ br register spill area ]
3456 [ ar register spill area ]
3457 [ pr register spill area ]
3458 [ gr register spill area ] */
3460 /* ??? Get inefficient code when the frame size is larger than can fit in an
3461 adds instruction. */
3463 void
3464 ia64_expand_prologue (void)
3466 rtx_insn *insn;
3467 rtx ar_pfs_save_reg, ar_unat_save_reg;
3468 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
3469 rtx reg, alt_reg;
3471 ia64_compute_frame_size (get_frame_size ());
3472 last_scratch_gr_reg = 15;
3474 if (flag_stack_usage_info)
3475 current_function_static_stack_size = current_frame_info.total_size;
3477 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
3479 HOST_WIDE_INT size = current_frame_info.total_size;
3480 int bs_size = BACKING_STORE_SIZE (current_frame_info.n_input_regs
3481 + current_frame_info.n_local_regs);
3483 if (crtl->is_leaf && !cfun->calls_alloca)
3485 if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
3486 ia64_emit_probe_stack_range (STACK_CHECK_PROTECT,
3487 size - STACK_CHECK_PROTECT,
3488 bs_size);
3489 else if (size + bs_size > STACK_CHECK_PROTECT)
3490 ia64_emit_probe_stack_range (STACK_CHECK_PROTECT, 0, bs_size);
3492 else if (size + bs_size > 0)
3493 ia64_emit_probe_stack_range (STACK_CHECK_PROTECT, size, bs_size);
3496 if (dump_file)
3498 fprintf (dump_file, "ia64 frame related registers "
3499 "recorded in current_frame_info.r[]:\n");
3500 #define PRINTREG(a) if (current_frame_info.r[a]) \
3501 fprintf(dump_file, "%s = %d\n", #a, current_frame_info.r[a])
3502 PRINTREG(reg_fp);
3503 PRINTREG(reg_save_b0);
3504 PRINTREG(reg_save_pr);
3505 PRINTREG(reg_save_ar_pfs);
3506 PRINTREG(reg_save_ar_unat);
3507 PRINTREG(reg_save_ar_lc);
3508 PRINTREG(reg_save_gp);
3509 #undef PRINTREG
3512 /* If there is no epilogue, then we don't need some prologue insns.
3513 We need to avoid emitting the dead prologue insns, because flow
3514 will complain about them. */
3515 if (optimize)
3517 edge e;
3518 edge_iterator ei;
3520 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
3521 if ((e->flags & EDGE_FAKE) == 0
3522 && (e->flags & EDGE_FALLTHRU) != 0)
3523 break;
3524 epilogue_p = (e != NULL);
3526 else
3527 epilogue_p = 1;
3529 /* Set the local, input, and output register names. We need to do this
3530 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
3531 half. If we use in/loc/out register names, then we get assembler errors
3532 in crtn.S because there is no alloc insn or regstk directive in there. */
3533 if (! TARGET_REG_NAMES)
3535 int inputs = current_frame_info.n_input_regs;
3536 int locals = current_frame_info.n_local_regs;
3537 int outputs = current_frame_info.n_output_regs;
3539 for (i = 0; i < inputs; i++)
3540 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
3541 for (i = 0; i < locals; i++)
3542 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
3543 for (i = 0; i < outputs; i++)
3544 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
3547 /* Set the frame pointer register name. The regnum is logically loc79,
3548 but of course we'll not have allocated that many locals. Rather than
3549 worrying about renumbering the existing rtxs, we adjust the name. */
3550 /* ??? This code means that we can never use one local register when
3551 there is a frame pointer. loc79 gets wasted in this case, as it is
3552 renamed to a register that will never be used. See also the try_locals
3553 code in find_gr_spill. */
3554 if (current_frame_info.r[reg_fp])
3556 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3557 reg_names[HARD_FRAME_POINTER_REGNUM]
3558 = reg_names[current_frame_info.r[reg_fp]];
3559 reg_names[current_frame_info.r[reg_fp]] = tmp;
3562 /* We don't need an alloc instruction if we've used no outputs or locals. */
3563 if (current_frame_info.n_local_regs == 0
3564 && current_frame_info.n_output_regs == 0
3565 && current_frame_info.n_input_regs <= crtl->args.info.int_regs
3566 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3568 /* If there is no alloc, but there are input registers used, then we
3569 need a .regstk directive. */
3570 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
3571 ar_pfs_save_reg = NULL_RTX;
3573 else
3575 current_frame_info.need_regstk = 0;
3577 if (current_frame_info.r[reg_save_ar_pfs])
3579 regno = current_frame_info.r[reg_save_ar_pfs];
3580 reg_emitted (reg_save_ar_pfs);
3582 else
3583 regno = next_scratch_gr_reg ();
3584 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
3586 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
3587 GEN_INT (current_frame_info.n_input_regs),
3588 GEN_INT (current_frame_info.n_local_regs),
3589 GEN_INT (current_frame_info.n_output_regs),
3590 GEN_INT (current_frame_info.n_rotate_regs)));
3591 if (current_frame_info.r[reg_save_ar_pfs])
3593 RTX_FRAME_RELATED_P (insn) = 1;
3594 add_reg_note (insn, REG_CFA_REGISTER,
3595 gen_rtx_SET (ar_pfs_save_reg,
3596 gen_rtx_REG (DImode, AR_PFS_REGNUM)));
3600 /* Set up frame pointer, stack pointer, and spill iterators. */
3602 n_varargs = cfun->machine->n_varargs;
3603 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
3604 stack_pointer_rtx, 0);
3606 if (frame_pointer_needed)
3608 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3609 RTX_FRAME_RELATED_P (insn) = 1;
3611 /* Force the unwind info to recognize this as defining a new CFA,
3612 rather than some temp register setup. */
3613 add_reg_note (insn, REG_CFA_ADJUST_CFA, NULL_RTX);
3616 if (current_frame_info.total_size != 0)
3618 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
3619 rtx offset;
3621 if (satisfies_constraint_I (frame_size_rtx))
3622 offset = frame_size_rtx;
3623 else
3625 regno = next_scratch_gr_reg ();
3626 offset = gen_rtx_REG (DImode, regno);
3627 emit_move_insn (offset, frame_size_rtx);
3630 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
3631 stack_pointer_rtx, offset));
3633 if (! frame_pointer_needed)
3635 RTX_FRAME_RELATED_P (insn) = 1;
3636 add_reg_note (insn, REG_CFA_ADJUST_CFA,
3637 gen_rtx_SET (stack_pointer_rtx,
3638 gen_rtx_PLUS (DImode,
3639 stack_pointer_rtx,
3640 frame_size_rtx)));
3643 /* ??? At this point we must generate a magic insn that appears to
3644 modify the stack pointer, the frame pointer, and all spill
3645 iterators. This would allow the most scheduling freedom. For
3646 now, just hard stop. */
3647 emit_insn (gen_blockage ());
3650 /* Must copy out ar.unat before doing any integer spills. */
3651 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3653 if (current_frame_info.r[reg_save_ar_unat])
3655 ar_unat_save_reg
3656 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3657 reg_emitted (reg_save_ar_unat);
3659 else
3661 alt_regno = next_scratch_gr_reg ();
3662 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3663 current_frame_info.gr_used_mask |= 1 << alt_regno;
3666 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3667 insn = emit_move_insn (ar_unat_save_reg, reg);
3668 if (current_frame_info.r[reg_save_ar_unat])
3670 RTX_FRAME_RELATED_P (insn) = 1;
3671 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3674 /* Even if we're not going to generate an epilogue, we still
3675 need to save the register so that EH works. */
3676 if (! epilogue_p && current_frame_info.r[reg_save_ar_unat])
3677 emit_insn (gen_prologue_use (ar_unat_save_reg));
3679 else
3680 ar_unat_save_reg = NULL_RTX;
3682 /* Spill all varargs registers. Do this before spilling any GR registers,
3683 since we want the UNAT bits for the GR registers to override the UNAT
3684 bits from varargs, which we don't care about. */
3686 cfa_off = -16;
3687 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
3689 reg = gen_rtx_REG (DImode, regno);
3690 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
3693 /* Locate the bottom of the register save area. */
3694 cfa_off = (current_frame_info.spill_cfa_off
3695 + current_frame_info.spill_size
3696 + current_frame_info.extra_spill_size);
3698 /* Save the predicate register block either in a register or in memory. */
3699 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3701 reg = gen_rtx_REG (DImode, PR_REG (0));
3702 if (current_frame_info.r[reg_save_pr] != 0)
3704 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3705 reg_emitted (reg_save_pr);
3706 insn = emit_move_insn (alt_reg, reg);
3708 /* ??? Denote pr spill/fill by a DImode move that modifies all
3709 64 hard registers. */
3710 RTX_FRAME_RELATED_P (insn) = 1;
3711 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3713 /* Even if we're not going to generate an epilogue, we still
3714 need to save the register so that EH works. */
3715 if (! epilogue_p)
3716 emit_insn (gen_prologue_use (alt_reg));
3718 else
3720 alt_regno = next_scratch_gr_reg ();
3721 alt_reg = gen_rtx_REG (DImode, alt_regno);
3722 insn = emit_move_insn (alt_reg, reg);
3723 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3724 cfa_off -= 8;
3728 /* Handle AR regs in numerical order. All of them get special handling. */
3729 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
3730 && current_frame_info.r[reg_save_ar_unat] == 0)
3732 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3733 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
3734 cfa_off -= 8;
3737 /* The alloc insn already copied ar.pfs into a general register. The
3738 only thing we have to do now is copy that register to a stack slot
3739 if we'd not allocated a local register for the job. */
3740 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
3741 && current_frame_info.r[reg_save_ar_pfs] == 0)
3743 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3744 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
3745 cfa_off -= 8;
3748 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3750 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3751 if (current_frame_info.r[reg_save_ar_lc] != 0)
3753 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3754 reg_emitted (reg_save_ar_lc);
3755 insn = emit_move_insn (alt_reg, reg);
3756 RTX_FRAME_RELATED_P (insn) = 1;
3757 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3759 /* Even if we're not going to generate an epilogue, we still
3760 need to save the register so that EH works. */
3761 if (! epilogue_p)
3762 emit_insn (gen_prologue_use (alt_reg));
3764 else
3766 alt_regno = next_scratch_gr_reg ();
3767 alt_reg = gen_rtx_REG (DImode, alt_regno);
3768 emit_move_insn (alt_reg, reg);
3769 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3770 cfa_off -= 8;
3774 /* Save the return pointer. */
3775 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3777 reg = gen_rtx_REG (DImode, BR_REG (0));
3778 if (current_frame_info.r[reg_save_b0] != 0)
3780 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3781 reg_emitted (reg_save_b0);
3782 insn = emit_move_insn (alt_reg, reg);
3783 RTX_FRAME_RELATED_P (insn) = 1;
3784 add_reg_note (insn, REG_CFA_REGISTER, gen_rtx_SET (alt_reg, pc_rtx));
3786 /* Even if we're not going to generate an epilogue, we still
3787 need to save the register so that EH works. */
3788 if (! epilogue_p)
3789 emit_insn (gen_prologue_use (alt_reg));
3791 else
3793 alt_regno = next_scratch_gr_reg ();
3794 alt_reg = gen_rtx_REG (DImode, alt_regno);
3795 emit_move_insn (alt_reg, reg);
3796 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3797 cfa_off -= 8;
3801 if (current_frame_info.r[reg_save_gp])
3803 reg_emitted (reg_save_gp);
3804 insn = emit_move_insn (gen_rtx_REG (DImode,
3805 current_frame_info.r[reg_save_gp]),
3806 pic_offset_table_rtx);
3809 /* We should now be at the base of the gr/br/fr spill area. */
3810 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3811 + current_frame_info.spill_size));
3813 /* Spill all general registers. */
3814 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3815 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3817 reg = gen_rtx_REG (DImode, regno);
3818 do_spill (gen_gr_spill, reg, cfa_off, reg);
3819 cfa_off -= 8;
3822 /* Spill the rest of the BR registers. */
3823 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3824 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3826 alt_regno = next_scratch_gr_reg ();
3827 alt_reg = gen_rtx_REG (DImode, alt_regno);
3828 reg = gen_rtx_REG (DImode, regno);
3829 emit_move_insn (alt_reg, reg);
3830 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3831 cfa_off -= 8;
3834 /* Align the frame and spill all FR registers. */
3835 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3836 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3838 gcc_assert (!(cfa_off & 15));
3839 reg = gen_rtx_REG (XFmode, regno);
3840 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
3841 cfa_off -= 16;
3844 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3846 finish_spill_pointers ();
3849 /* Output the textual info surrounding the prologue. */
3851 void
3852 ia64_start_function (FILE *file, const char *fnname,
3853 tree decl ATTRIBUTE_UNUSED)
3855 #if TARGET_ABI_OPEN_VMS
3856 vms_start_function (fnname);
3857 #endif
3859 fputs ("\t.proc ", file);
3860 assemble_name (file, fnname);
3861 fputc ('\n', file);
3862 ASM_OUTPUT_LABEL (file, fnname);
3865 /* Called after register allocation to add any instructions needed for the
3866 epilogue. Using an epilogue insn is favored compared to putting all of the
3867 instructions in output_function_prologue(), since it allows the scheduler
3868 to intermix instructions with the saves of the caller saved registers. In
3869 some cases, it might be necessary to emit a barrier instruction as the last
3870 insn to prevent such scheduling. */
3872 void
3873 ia64_expand_epilogue (int sibcall_p)
3875 rtx_insn *insn;
3876 rtx reg, alt_reg, ar_unat_save_reg;
3877 int regno, alt_regno, cfa_off;
3879 ia64_compute_frame_size (get_frame_size ());
3881 /* If there is a frame pointer, then we use it instead of the stack
3882 pointer, so that the stack pointer does not need to be valid when
3883 the epilogue starts. See EXIT_IGNORE_STACK. */
3884 if (frame_pointer_needed)
3885 setup_spill_pointers (current_frame_info.n_spilled,
3886 hard_frame_pointer_rtx, 0);
3887 else
3888 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
3889 current_frame_info.total_size);
3891 if (current_frame_info.total_size != 0)
3893 /* ??? At this point we must generate a magic insn that appears to
3894 modify the spill iterators and the frame pointer. This would
3895 allow the most scheduling freedom. For now, just hard stop. */
3896 emit_insn (gen_blockage ());
3899 /* Locate the bottom of the register save area. */
3900 cfa_off = (current_frame_info.spill_cfa_off
3901 + current_frame_info.spill_size
3902 + current_frame_info.extra_spill_size);
3904 /* Restore the predicate registers. */
3905 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3907 if (current_frame_info.r[reg_save_pr] != 0)
3909 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3910 reg_emitted (reg_save_pr);
3912 else
3914 alt_regno = next_scratch_gr_reg ();
3915 alt_reg = gen_rtx_REG (DImode, alt_regno);
3916 do_restore (gen_movdi_x, alt_reg, cfa_off);
3917 cfa_off -= 8;
3919 reg = gen_rtx_REG (DImode, PR_REG (0));
3920 emit_move_insn (reg, alt_reg);
3923 /* Restore the application registers. */
3925 /* Load the saved unat from the stack, but do not restore it until
3926 after the GRs have been restored. */
3927 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3929 if (current_frame_info.r[reg_save_ar_unat] != 0)
3931 ar_unat_save_reg
3932 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3933 reg_emitted (reg_save_ar_unat);
3935 else
3937 alt_regno = next_scratch_gr_reg ();
3938 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3939 current_frame_info.gr_used_mask |= 1 << alt_regno;
3940 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
3941 cfa_off -= 8;
3944 else
3945 ar_unat_save_reg = NULL_RTX;
3947 if (current_frame_info.r[reg_save_ar_pfs] != 0)
3949 reg_emitted (reg_save_ar_pfs);
3950 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_pfs]);
3951 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3952 emit_move_insn (reg, alt_reg);
3954 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3956 alt_regno = next_scratch_gr_reg ();
3957 alt_reg = gen_rtx_REG (DImode, alt_regno);
3958 do_restore (gen_movdi_x, alt_reg, cfa_off);
3959 cfa_off -= 8;
3960 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3961 emit_move_insn (reg, alt_reg);
3964 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3966 if (current_frame_info.r[reg_save_ar_lc] != 0)
3968 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3969 reg_emitted (reg_save_ar_lc);
3971 else
3973 alt_regno = next_scratch_gr_reg ();
3974 alt_reg = gen_rtx_REG (DImode, alt_regno);
3975 do_restore (gen_movdi_x, alt_reg, cfa_off);
3976 cfa_off -= 8;
3978 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3979 emit_move_insn (reg, alt_reg);
3982 /* Restore the return pointer. */
3983 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3985 if (current_frame_info.r[reg_save_b0] != 0)
3987 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3988 reg_emitted (reg_save_b0);
3990 else
3992 alt_regno = next_scratch_gr_reg ();
3993 alt_reg = gen_rtx_REG (DImode, alt_regno);
3994 do_restore (gen_movdi_x, alt_reg, cfa_off);
3995 cfa_off -= 8;
3997 reg = gen_rtx_REG (DImode, BR_REG (0));
3998 emit_move_insn (reg, alt_reg);
4001 /* We should now be at the base of the gr/br/fr spill area. */
4002 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
4003 + current_frame_info.spill_size));
4005 /* The GP may be stored on the stack in the prologue, but it's
4006 never restored in the epilogue. Skip the stack slot. */
4007 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
4008 cfa_off -= 8;
4010 /* Restore all general registers. */
4011 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
4012 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
4014 reg = gen_rtx_REG (DImode, regno);
4015 do_restore (gen_gr_restore, reg, cfa_off);
4016 cfa_off -= 8;
4019 /* Restore the branch registers. */
4020 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
4021 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
4023 alt_regno = next_scratch_gr_reg ();
4024 alt_reg = gen_rtx_REG (DImode, alt_regno);
4025 do_restore (gen_movdi_x, alt_reg, cfa_off);
4026 cfa_off -= 8;
4027 reg = gen_rtx_REG (DImode, regno);
4028 emit_move_insn (reg, alt_reg);
4031 /* Restore floating point registers. */
4032 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
4033 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
4035 gcc_assert (!(cfa_off & 15));
4036 reg = gen_rtx_REG (XFmode, regno);
4037 do_restore (gen_fr_restore_x, reg, cfa_off);
4038 cfa_off -= 16;
4041 /* Restore ar.unat for real. */
4042 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
4044 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
4045 emit_move_insn (reg, ar_unat_save_reg);
4048 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
4050 finish_spill_pointers ();
4052 if (current_frame_info.total_size
4053 || cfun->machine->ia64_eh_epilogue_sp
4054 || frame_pointer_needed)
4056 /* ??? At this point we must generate a magic insn that appears to
4057 modify the spill iterators, the stack pointer, and the frame
4058 pointer. This would allow the most scheduling freedom. For now,
4059 just hard stop. */
4060 emit_insn (gen_blockage ());
4063 if (cfun->machine->ia64_eh_epilogue_sp)
4064 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
4065 else if (frame_pointer_needed)
4067 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
4068 RTX_FRAME_RELATED_P (insn) = 1;
4069 add_reg_note (insn, REG_CFA_ADJUST_CFA, NULL);
4071 else if (current_frame_info.total_size)
4073 rtx offset, frame_size_rtx;
4075 frame_size_rtx = GEN_INT (current_frame_info.total_size);
4076 if (satisfies_constraint_I (frame_size_rtx))
4077 offset = frame_size_rtx;
4078 else
4080 regno = next_scratch_gr_reg ();
4081 offset = gen_rtx_REG (DImode, regno);
4082 emit_move_insn (offset, frame_size_rtx);
4085 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
4086 offset));
4088 RTX_FRAME_RELATED_P (insn) = 1;
4089 add_reg_note (insn, REG_CFA_ADJUST_CFA,
4090 gen_rtx_SET (stack_pointer_rtx,
4091 gen_rtx_PLUS (DImode,
4092 stack_pointer_rtx,
4093 frame_size_rtx)));
4096 if (cfun->machine->ia64_eh_epilogue_bsp)
4097 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
4099 if (! sibcall_p)
4100 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
4101 else
4103 int fp = GR_REG (2);
4104 /* We need a throw away register here, r0 and r1 are reserved,
4105 so r2 is the first available call clobbered register. If
4106 there was a frame_pointer register, we may have swapped the
4107 names of r2 and HARD_FRAME_POINTER_REGNUM, so we have to make
4108 sure we're using the string "r2" when emitting the register
4109 name for the assembler. */
4110 if (current_frame_info.r[reg_fp]
4111 && current_frame_info.r[reg_fp] == GR_REG (2))
4112 fp = HARD_FRAME_POINTER_REGNUM;
4114 /* We must emit an alloc to force the input registers to become output
4115 registers. Otherwise, if the callee tries to pass its parameters
4116 through to another call without an intervening alloc, then these
4117 values get lost. */
4118 /* ??? We don't need to preserve all input registers. We only need to
4119 preserve those input registers used as arguments to the sibling call.
4120 It is unclear how to compute that number here. */
4121 if (current_frame_info.n_input_regs != 0)
4123 rtx n_inputs = GEN_INT (current_frame_info.n_input_regs);
4125 insn = emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
4126 const0_rtx, const0_rtx,
4127 n_inputs, const0_rtx));
4128 RTX_FRAME_RELATED_P (insn) = 1;
4130 /* ??? We need to mark the alloc as frame-related so that it gets
4131 passed into ia64_asm_unwind_emit for ia64-specific unwinding.
4132 But there's nothing dwarf2 related to be done wrt the register
4133 windows. If we do nothing, dwarf2out will abort on the UNSPEC;
4134 the empty parallel means dwarf2out will not see anything. */
4135 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4136 gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (0)));
4141 /* Return 1 if br.ret can do all the work required to return from a
4142 function. */
4145 ia64_direct_return (void)
4147 if (reload_completed && ! frame_pointer_needed)
4149 ia64_compute_frame_size (get_frame_size ());
4151 return (current_frame_info.total_size == 0
4152 && current_frame_info.n_spilled == 0
4153 && current_frame_info.r[reg_save_b0] == 0
4154 && current_frame_info.r[reg_save_pr] == 0
4155 && current_frame_info.r[reg_save_ar_pfs] == 0
4156 && current_frame_info.r[reg_save_ar_unat] == 0
4157 && current_frame_info.r[reg_save_ar_lc] == 0);
4159 return 0;
4162 /* Return the magic cookie that we use to hold the return address
4163 during early compilation. */
4166 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
4168 if (count != 0)
4169 return NULL;
4170 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
4173 /* Split this value after reload, now that we know where the return
4174 address is saved. */
4176 void
4177 ia64_split_return_addr_rtx (rtx dest)
4179 rtx src;
4181 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
4183 if (current_frame_info.r[reg_save_b0] != 0)
4185 src = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
4186 reg_emitted (reg_save_b0);
4188 else
4190 HOST_WIDE_INT off;
4191 unsigned int regno;
4192 rtx off_r;
4194 /* Compute offset from CFA for BR0. */
4195 /* ??? Must be kept in sync with ia64_expand_prologue. */
4196 off = (current_frame_info.spill_cfa_off
4197 + current_frame_info.spill_size);
4198 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
4199 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
4200 off -= 8;
4202 /* Convert CFA offset to a register based offset. */
4203 if (frame_pointer_needed)
4204 src = hard_frame_pointer_rtx;
4205 else
4207 src = stack_pointer_rtx;
4208 off += current_frame_info.total_size;
4211 /* Load address into scratch register. */
4212 off_r = GEN_INT (off);
4213 if (satisfies_constraint_I (off_r))
4214 emit_insn (gen_adddi3 (dest, src, off_r));
4215 else
4217 emit_move_insn (dest, off_r);
4218 emit_insn (gen_adddi3 (dest, src, dest));
4221 src = gen_rtx_MEM (Pmode, dest);
4224 else
4225 src = gen_rtx_REG (DImode, BR_REG (0));
4227 emit_move_insn (dest, src);
4231 ia64_hard_regno_rename_ok (int from, int to)
4233 /* Don't clobber any of the registers we reserved for the prologue. */
4234 unsigned int r;
4236 for (r = reg_fp; r <= reg_save_ar_lc; r++)
4237 if (to == current_frame_info.r[r]
4238 || from == current_frame_info.r[r]
4239 || to == emitted_frame_related_regs[r]
4240 || from == emitted_frame_related_regs[r])
4241 return 0;
4243 /* Don't use output registers outside the register frame. */
4244 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
4245 return 0;
4247 /* Retain even/oddness on predicate register pairs. */
4248 if (PR_REGNO_P (from) && PR_REGNO_P (to))
4249 return (from & 1) == (to & 1);
4251 return 1;
4254 /* Target hook for assembling integer objects. Handle word-sized
4255 aligned objects and detect the cases when @fptr is needed. */
4257 static bool
4258 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
4260 if (size == POINTER_SIZE / BITS_PER_UNIT
4261 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
4262 && GET_CODE (x) == SYMBOL_REF
4263 && SYMBOL_REF_FUNCTION_P (x))
4265 static const char * const directive[2][2] = {
4266 /* 64-bit pointer */ /* 32-bit pointer */
4267 { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
4268 { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
4270 fputs (directive[(aligned_p != 0)][POINTER_SIZE == 32], asm_out_file);
4271 output_addr_const (asm_out_file, x);
4272 fputs (")\n", asm_out_file);
4273 return true;
4275 return default_assemble_integer (x, size, aligned_p);
4278 /* Emit the function prologue. */
4280 static void
4281 ia64_output_function_prologue (FILE *file)
4283 int mask, grsave, grsave_prev;
4285 if (current_frame_info.need_regstk)
4286 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
4287 current_frame_info.n_input_regs,
4288 current_frame_info.n_local_regs,
4289 current_frame_info.n_output_regs,
4290 current_frame_info.n_rotate_regs);
4292 if (ia64_except_unwind_info (&global_options) != UI_TARGET)
4293 return;
4295 /* Emit the .prologue directive. */
4297 mask = 0;
4298 grsave = grsave_prev = 0;
4299 if (current_frame_info.r[reg_save_b0] != 0)
4301 mask |= 8;
4302 grsave = grsave_prev = current_frame_info.r[reg_save_b0];
4304 if (current_frame_info.r[reg_save_ar_pfs] != 0
4305 && (grsave_prev == 0
4306 || current_frame_info.r[reg_save_ar_pfs] == grsave_prev + 1))
4308 mask |= 4;
4309 if (grsave_prev == 0)
4310 grsave = current_frame_info.r[reg_save_ar_pfs];
4311 grsave_prev = current_frame_info.r[reg_save_ar_pfs];
4313 if (current_frame_info.r[reg_fp] != 0
4314 && (grsave_prev == 0
4315 || current_frame_info.r[reg_fp] == grsave_prev + 1))
4317 mask |= 2;
4318 if (grsave_prev == 0)
4319 grsave = HARD_FRAME_POINTER_REGNUM;
4320 grsave_prev = current_frame_info.r[reg_fp];
4322 if (current_frame_info.r[reg_save_pr] != 0
4323 && (grsave_prev == 0
4324 || current_frame_info.r[reg_save_pr] == grsave_prev + 1))
4326 mask |= 1;
4327 if (grsave_prev == 0)
4328 grsave = current_frame_info.r[reg_save_pr];
4331 if (mask && TARGET_GNU_AS)
4332 fprintf (file, "\t.prologue %d, %d\n", mask,
4333 ia64_dbx_register_number (grsave));
4334 else
4335 fputs ("\t.prologue\n", file);
4337 /* Emit a .spill directive, if necessary, to relocate the base of
4338 the register spill area. */
4339 if (current_frame_info.spill_cfa_off != -16)
4340 fprintf (file, "\t.spill %ld\n",
4341 (long) (current_frame_info.spill_cfa_off
4342 + current_frame_info.spill_size));
4345 /* Emit the .body directive at the scheduled end of the prologue. */
4347 static void
4348 ia64_output_function_end_prologue (FILE *file)
4350 if (ia64_except_unwind_info (&global_options) != UI_TARGET)
4351 return;
4353 fputs ("\t.body\n", file);
4356 /* Emit the function epilogue. */
4358 static void
4359 ia64_output_function_epilogue (FILE *)
4361 int i;
4363 if (current_frame_info.r[reg_fp])
4365 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
4366 reg_names[HARD_FRAME_POINTER_REGNUM]
4367 = reg_names[current_frame_info.r[reg_fp]];
4368 reg_names[current_frame_info.r[reg_fp]] = tmp;
4369 reg_emitted (reg_fp);
4371 if (! TARGET_REG_NAMES)
4373 for (i = 0; i < current_frame_info.n_input_regs; i++)
4374 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
4375 for (i = 0; i < current_frame_info.n_local_regs; i++)
4376 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
4377 for (i = 0; i < current_frame_info.n_output_regs; i++)
4378 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
4381 current_frame_info.initialized = 0;
4385 ia64_dbx_register_number (int regno)
4387 /* In ia64_expand_prologue we quite literally renamed the frame pointer
4388 from its home at loc79 to something inside the register frame. We
4389 must perform the same renumbering here for the debug info. */
4390 if (current_frame_info.r[reg_fp])
4392 if (regno == HARD_FRAME_POINTER_REGNUM)
4393 regno = current_frame_info.r[reg_fp];
4394 else if (regno == current_frame_info.r[reg_fp])
4395 regno = HARD_FRAME_POINTER_REGNUM;
4398 if (IN_REGNO_P (regno))
4399 return 32 + regno - IN_REG (0);
4400 else if (LOC_REGNO_P (regno))
4401 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
4402 else if (OUT_REGNO_P (regno))
4403 return (32 + current_frame_info.n_input_regs
4404 + current_frame_info.n_local_regs + regno - OUT_REG (0));
4405 else
4406 return regno;
4409 /* Implement TARGET_TRAMPOLINE_INIT.
4411 The trampoline should set the static chain pointer to value placed
4412 into the trampoline and should branch to the specified routine.
4413 To make the normal indirect-subroutine calling convention work,
4414 the trampoline must look like a function descriptor; the first
4415 word being the target address and the second being the target's
4416 global pointer.
4418 We abuse the concept of a global pointer by arranging for it
4419 to point to the data we need to load. The complete trampoline
4420 has the following form:
4422 +-------------------+ \
4423 TRAMP: | __ia64_trampoline | |
4424 +-------------------+ > fake function descriptor
4425 | TRAMP+16 | |
4426 +-------------------+ /
4427 | target descriptor |
4428 +-------------------+
4429 | static link |
4430 +-------------------+
4433 static void
4434 ia64_trampoline_init (rtx m_tramp, tree fndecl, rtx static_chain)
4436 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
4437 rtx addr, addr_reg, tramp, eight = GEN_INT (8);
4439 /* The Intel assembler requires that the global __ia64_trampoline symbol
4440 be declared explicitly */
4441 if (!TARGET_GNU_AS)
4443 static bool declared_ia64_trampoline = false;
4445 if (!declared_ia64_trampoline)
4447 declared_ia64_trampoline = true;
4448 (*targetm.asm_out.globalize_label) (asm_out_file,
4449 "__ia64_trampoline");
4453 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
4454 addr = convert_memory_address (Pmode, XEXP (m_tramp, 0));
4455 fnaddr = convert_memory_address (Pmode, fnaddr);
4456 static_chain = convert_memory_address (Pmode, static_chain);
4458 /* Load up our iterator. */
4459 addr_reg = copy_to_reg (addr);
4460 m_tramp = adjust_automodify_address (m_tramp, Pmode, addr_reg, 0);
4462 /* The first two words are the fake descriptor:
4463 __ia64_trampoline, ADDR+16. */
4464 tramp = gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline");
4465 if (TARGET_ABI_OPEN_VMS)
4467 /* HP decided to break the ELF ABI on VMS (to deal with an ambiguity
4468 in the Macro-32 compiler) and changed the semantics of the LTOFF22
4469 relocation against function symbols to make it identical to the
4470 LTOFF_FPTR22 relocation. Emit the latter directly to stay within
4471 strict ELF and dereference to get the bare code address. */
4472 rtx reg = gen_reg_rtx (Pmode);
4473 SYMBOL_REF_FLAGS (tramp) |= SYMBOL_FLAG_FUNCTION;
4474 emit_move_insn (reg, tramp);
4475 emit_move_insn (reg, gen_rtx_MEM (Pmode, reg));
4476 tramp = reg;
4478 emit_move_insn (m_tramp, tramp);
4479 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4480 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4482 emit_move_insn (m_tramp, force_reg (Pmode, plus_constant (Pmode, addr, 16)));
4483 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4484 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4486 /* The third word is the target descriptor. */
4487 emit_move_insn (m_tramp, force_reg (Pmode, fnaddr));
4488 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4489 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4491 /* The fourth word is the static chain. */
4492 emit_move_insn (m_tramp, static_chain);
4495 /* Do any needed setup for a variadic function. CUM has not been updated
4496 for the last named argument which has type TYPE and mode MODE.
4498 We generate the actual spill instructions during prologue generation. */
4500 static void
4501 ia64_setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
4502 tree type, int * pretend_size,
4503 int second_time ATTRIBUTE_UNUSED)
4505 CUMULATIVE_ARGS next_cum = *get_cumulative_args (cum);
4507 /* Skip the current argument. */
4508 ia64_function_arg_advance (pack_cumulative_args (&next_cum), mode, type, 1);
4510 if (next_cum.words < MAX_ARGUMENT_SLOTS)
4512 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
4513 *pretend_size = n * UNITS_PER_WORD;
4514 cfun->machine->n_varargs = n;
4518 /* Check whether TYPE is a homogeneous floating point aggregate. If
4519 it is, return the mode of the floating point type that appears
4520 in all leafs. If it is not, return VOIDmode.
4522 An aggregate is a homogeneous floating point aggregate is if all
4523 fields/elements in it have the same floating point type (e.g,
4524 SFmode). 128-bit quad-precision floats are excluded.
4526 Variable sized aggregates should never arrive here, since we should
4527 have already decided to pass them by reference. Top-level zero-sized
4528 aggregates are excluded because our parallels crash the middle-end. */
4530 static machine_mode
4531 hfa_element_mode (const_tree type, bool nested)
4533 machine_mode element_mode = VOIDmode;
4534 machine_mode mode;
4535 enum tree_code code = TREE_CODE (type);
4536 int know_element_mode = 0;
4537 tree t;
4539 if (!nested && (!TYPE_SIZE (type) || integer_zerop (TYPE_SIZE (type))))
4540 return VOIDmode;
4542 switch (code)
4544 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
4545 case BOOLEAN_TYPE: case POINTER_TYPE:
4546 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
4547 case LANG_TYPE: case FUNCTION_TYPE:
4548 return VOIDmode;
4550 /* Fortran complex types are supposed to be HFAs, so we need to handle
4551 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
4552 types though. */
4553 case COMPLEX_TYPE:
4554 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
4555 && TYPE_MODE (type) != TCmode)
4556 return GET_MODE_INNER (TYPE_MODE (type));
4557 else
4558 return VOIDmode;
4560 case REAL_TYPE:
4561 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
4562 mode if this is contained within an aggregate. */
4563 if (nested && TYPE_MODE (type) != TFmode)
4564 return TYPE_MODE (type);
4565 else
4566 return VOIDmode;
4568 case ARRAY_TYPE:
4569 return hfa_element_mode (TREE_TYPE (type), 1);
4571 case RECORD_TYPE:
4572 case UNION_TYPE:
4573 case QUAL_UNION_TYPE:
4574 for (t = TYPE_FIELDS (type); t; t = DECL_CHAIN (t))
4576 if (TREE_CODE (t) != FIELD_DECL)
4577 continue;
4579 mode = hfa_element_mode (TREE_TYPE (t), 1);
4580 if (know_element_mode)
4582 if (mode != element_mode)
4583 return VOIDmode;
4585 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
4586 return VOIDmode;
4587 else
4589 know_element_mode = 1;
4590 element_mode = mode;
4593 return element_mode;
4595 default:
4596 /* If we reach here, we probably have some front-end specific type
4597 that the backend doesn't know about. This can happen via the
4598 aggregate_value_p call in init_function_start. All we can do is
4599 ignore unknown tree types. */
4600 return VOIDmode;
4603 return VOIDmode;
4606 /* Return the number of words required to hold a quantity of TYPE and MODE
4607 when passed as an argument. */
4608 static int
4609 ia64_function_arg_words (const_tree type, machine_mode mode)
4611 int words;
4613 if (mode == BLKmode)
4614 words = int_size_in_bytes (type);
4615 else
4616 words = GET_MODE_SIZE (mode);
4618 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
4621 /* Return the number of registers that should be skipped so the current
4622 argument (described by TYPE and WORDS) will be properly aligned.
4624 Integer and float arguments larger than 8 bytes start at the next
4625 even boundary. Aggregates larger than 8 bytes start at the next
4626 even boundary if the aggregate has 16 byte alignment. Note that
4627 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
4628 but are still to be aligned in registers.
4630 ??? The ABI does not specify how to handle aggregates with
4631 alignment from 9 to 15 bytes, or greater than 16. We handle them
4632 all as if they had 16 byte alignment. Such aggregates can occur
4633 only if gcc extensions are used. */
4634 static int
4635 ia64_function_arg_offset (const CUMULATIVE_ARGS *cum,
4636 const_tree type, int words)
4638 /* No registers are skipped on VMS. */
4639 if (TARGET_ABI_OPEN_VMS || (cum->words & 1) == 0)
4640 return 0;
4642 if (type
4643 && TREE_CODE (type) != INTEGER_TYPE
4644 && TREE_CODE (type) != REAL_TYPE)
4645 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
4646 else
4647 return words > 1;
4650 /* Return rtx for register where argument is passed, or zero if it is passed
4651 on the stack. */
4652 /* ??? 128-bit quad-precision floats are always passed in general
4653 registers. */
4655 static rtx
4656 ia64_function_arg_1 (cumulative_args_t cum_v, machine_mode mode,
4657 const_tree type, bool named, bool incoming)
4659 const CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
4661 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
4662 int words = ia64_function_arg_words (type, mode);
4663 int offset = ia64_function_arg_offset (cum, type, words);
4664 machine_mode hfa_mode = VOIDmode;
4666 /* For OPEN VMS, emit the instruction setting up the argument register here,
4667 when we know this will be together with the other arguments setup related
4668 insns. This is not the conceptually best place to do this, but this is
4669 the easiest as we have convenient access to cumulative args info. */
4671 if (TARGET_ABI_OPEN_VMS && mode == VOIDmode && type == void_type_node
4672 && named == 1)
4674 unsigned HOST_WIDE_INT regval = cum->words;
4675 int i;
4677 for (i = 0; i < 8; i++)
4678 regval |= ((int) cum->atypes[i]) << (i * 3 + 8);
4680 emit_move_insn (gen_rtx_REG (DImode, GR_REG (25)),
4681 GEN_INT (regval));
4684 /* If all argument slots are used, then it must go on the stack. */
4685 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4686 return 0;
4688 /* On OpenVMS argument is either in Rn or Fn. */
4689 if (TARGET_ABI_OPEN_VMS)
4691 if (FLOAT_MODE_P (mode))
4692 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->words);
4693 else
4694 return gen_rtx_REG (mode, basereg + cum->words);
4697 /* Check for and handle homogeneous FP aggregates. */
4698 if (type)
4699 hfa_mode = hfa_element_mode (type, 0);
4701 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4702 and unprototyped hfas are passed specially. */
4703 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4705 rtx loc[16];
4706 int i = 0;
4707 int fp_regs = cum->fp_regs;
4708 int int_regs = cum->words + offset;
4709 int hfa_size = GET_MODE_SIZE (hfa_mode);
4710 int byte_size;
4711 int args_byte_size;
4713 /* If prototyped, pass it in FR regs then GR regs.
4714 If not prototyped, pass it in both FR and GR regs.
4716 If this is an SFmode aggregate, then it is possible to run out of
4717 FR regs while GR regs are still left. In that case, we pass the
4718 remaining part in the GR regs. */
4720 /* Fill the FP regs. We do this always. We stop if we reach the end
4721 of the argument, the last FP register, or the last argument slot. */
4723 byte_size = ((mode == BLKmode)
4724 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4725 args_byte_size = int_regs * UNITS_PER_WORD;
4726 offset = 0;
4727 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4728 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
4730 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4731 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
4732 + fp_regs)),
4733 GEN_INT (offset));
4734 offset += hfa_size;
4735 args_byte_size += hfa_size;
4736 fp_regs++;
4739 /* If no prototype, then the whole thing must go in GR regs. */
4740 if (! cum->prototype)
4741 offset = 0;
4742 /* If this is an SFmode aggregate, then we might have some left over
4743 that needs to go in GR regs. */
4744 else if (byte_size != offset)
4745 int_regs += offset / UNITS_PER_WORD;
4747 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
4749 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
4751 machine_mode gr_mode = DImode;
4752 unsigned int gr_size;
4754 /* If we have an odd 4 byte hunk because we ran out of FR regs,
4755 then this goes in a GR reg left adjusted/little endian, right
4756 adjusted/big endian. */
4757 /* ??? Currently this is handled wrong, because 4-byte hunks are
4758 always right adjusted/little endian. */
4759 if (offset & 0x4)
4760 gr_mode = SImode;
4761 /* If we have an even 4 byte hunk because the aggregate is a
4762 multiple of 4 bytes in size, then this goes in a GR reg right
4763 adjusted/little endian. */
4764 else if (byte_size - offset == 4)
4765 gr_mode = SImode;
4767 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4768 gen_rtx_REG (gr_mode, (basereg
4769 + int_regs)),
4770 GEN_INT (offset));
4772 gr_size = GET_MODE_SIZE (gr_mode);
4773 offset += gr_size;
4774 if (gr_size == UNITS_PER_WORD
4775 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
4776 int_regs++;
4777 else if (gr_size > UNITS_PER_WORD)
4778 int_regs += gr_size / UNITS_PER_WORD;
4780 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4783 /* Integral and aggregates go in general registers. If we have run out of
4784 FR registers, then FP values must also go in general registers. This can
4785 happen when we have a SFmode HFA. */
4786 else if (mode == TFmode || mode == TCmode
4787 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4789 int byte_size = ((mode == BLKmode)
4790 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4791 if (BYTES_BIG_ENDIAN
4792 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
4793 && byte_size < UNITS_PER_WORD
4794 && byte_size > 0)
4796 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4797 gen_rtx_REG (DImode,
4798 (basereg + cum->words
4799 + offset)),
4800 const0_rtx);
4801 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
4803 else
4804 return gen_rtx_REG (mode, basereg + cum->words + offset);
4808 /* If there is a prototype, then FP values go in a FR register when
4809 named, and in a GR register when unnamed. */
4810 else if (cum->prototype)
4812 if (named)
4813 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
4814 /* In big-endian mode, an anonymous SFmode value must be represented
4815 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
4816 the value into the high half of the general register. */
4817 else if (BYTES_BIG_ENDIAN && mode == SFmode)
4818 return gen_rtx_PARALLEL (mode,
4819 gen_rtvec (1,
4820 gen_rtx_EXPR_LIST (VOIDmode,
4821 gen_rtx_REG (DImode, basereg + cum->words + offset),
4822 const0_rtx)));
4823 else
4824 return gen_rtx_REG (mode, basereg + cum->words + offset);
4826 /* If there is no prototype, then FP values go in both FR and GR
4827 registers. */
4828 else
4830 /* See comment above. */
4831 machine_mode inner_mode =
4832 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
4834 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
4835 gen_rtx_REG (mode, (FR_ARG_FIRST
4836 + cum->fp_regs)),
4837 const0_rtx);
4838 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4839 gen_rtx_REG (inner_mode,
4840 (basereg + cum->words
4841 + offset)),
4842 const0_rtx);
4844 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
4848 /* Implement TARGET_FUNCION_ARG target hook. */
4850 static rtx
4851 ia64_function_arg (cumulative_args_t cum, machine_mode mode,
4852 const_tree type, bool named)
4854 return ia64_function_arg_1 (cum, mode, type, named, false);
4857 /* Implement TARGET_FUNCION_INCOMING_ARG target hook. */
4859 static rtx
4860 ia64_function_incoming_arg (cumulative_args_t cum,
4861 machine_mode mode,
4862 const_tree type, bool named)
4864 return ia64_function_arg_1 (cum, mode, type, named, true);
4867 /* Return number of bytes, at the beginning of the argument, that must be
4868 put in registers. 0 is the argument is entirely in registers or entirely
4869 in memory. */
4871 static int
4872 ia64_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
4873 tree type, bool named ATTRIBUTE_UNUSED)
4875 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
4877 int words = ia64_function_arg_words (type, mode);
4878 int offset = ia64_function_arg_offset (cum, type, words);
4880 /* If all argument slots are used, then it must go on the stack. */
4881 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4882 return 0;
4884 /* It doesn't matter whether the argument goes in FR or GR regs. If
4885 it fits within the 8 argument slots, then it goes entirely in
4886 registers. If it extends past the last argument slot, then the rest
4887 goes on the stack. */
4889 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
4890 return 0;
4892 return (MAX_ARGUMENT_SLOTS - cum->words - offset) * UNITS_PER_WORD;
4895 /* Return ivms_arg_type based on machine_mode. */
4897 static enum ivms_arg_type
4898 ia64_arg_type (machine_mode mode)
4900 switch (mode)
4902 case E_SFmode:
4903 return FS;
4904 case E_DFmode:
4905 return FT;
4906 default:
4907 return I64;
4911 /* Update CUM to point after this argument. This is patterned after
4912 ia64_function_arg. */
4914 static void
4915 ia64_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
4916 const_tree type, bool named)
4918 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
4919 int words = ia64_function_arg_words (type, mode);
4920 int offset = ia64_function_arg_offset (cum, type, words);
4921 machine_mode hfa_mode = VOIDmode;
4923 /* If all arg slots are already full, then there is nothing to do. */
4924 if (cum->words >= MAX_ARGUMENT_SLOTS)
4926 cum->words += words + offset;
4927 return;
4930 cum->atypes[cum->words] = ia64_arg_type (mode);
4931 cum->words += words + offset;
4933 /* On OpenVMS argument is either in Rn or Fn. */
4934 if (TARGET_ABI_OPEN_VMS)
4936 cum->int_regs = cum->words;
4937 cum->fp_regs = cum->words;
4938 return;
4941 /* Check for and handle homogeneous FP aggregates. */
4942 if (type)
4943 hfa_mode = hfa_element_mode (type, 0);
4945 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4946 and unprototyped hfas are passed specially. */
4947 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4949 int fp_regs = cum->fp_regs;
4950 /* This is the original value of cum->words + offset. */
4951 int int_regs = cum->words - words;
4952 int hfa_size = GET_MODE_SIZE (hfa_mode);
4953 int byte_size;
4954 int args_byte_size;
4956 /* If prototyped, pass it in FR regs then GR regs.
4957 If not prototyped, pass it in both FR and GR regs.
4959 If this is an SFmode aggregate, then it is possible to run out of
4960 FR regs while GR regs are still left. In that case, we pass the
4961 remaining part in the GR regs. */
4963 /* Fill the FP regs. We do this always. We stop if we reach the end
4964 of the argument, the last FP register, or the last argument slot. */
4966 byte_size = ((mode == BLKmode)
4967 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4968 args_byte_size = int_regs * UNITS_PER_WORD;
4969 offset = 0;
4970 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4971 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
4973 offset += hfa_size;
4974 args_byte_size += hfa_size;
4975 fp_regs++;
4978 cum->fp_regs = fp_regs;
4981 /* Integral and aggregates go in general registers. So do TFmode FP values.
4982 If we have run out of FR registers, then other FP values must also go in
4983 general registers. This can happen when we have a SFmode HFA. */
4984 else if (mode == TFmode || mode == TCmode
4985 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4986 cum->int_regs = cum->words;
4988 /* If there is a prototype, then FP values go in a FR register when
4989 named, and in a GR register when unnamed. */
4990 else if (cum->prototype)
4992 if (! named)
4993 cum->int_regs = cum->words;
4994 else
4995 /* ??? Complex types should not reach here. */
4996 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4998 /* If there is no prototype, then FP values go in both FR and GR
4999 registers. */
5000 else
5002 /* ??? Complex types should not reach here. */
5003 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
5004 cum->int_regs = cum->words;
5008 /* Arguments with alignment larger than 8 bytes start at the next even
5009 boundary. On ILP32 HPUX, TFmode arguments start on next even boundary
5010 even though their normal alignment is 8 bytes. See ia64_function_arg. */
5012 static unsigned int
5013 ia64_function_arg_boundary (machine_mode mode, const_tree type)
5015 if (mode == TFmode && TARGET_HPUX && TARGET_ILP32)
5016 return PARM_BOUNDARY * 2;
5018 if (type)
5020 if (TYPE_ALIGN (type) > PARM_BOUNDARY)
5021 return PARM_BOUNDARY * 2;
5022 else
5023 return PARM_BOUNDARY;
5026 if (GET_MODE_BITSIZE (mode) > PARM_BOUNDARY)
5027 return PARM_BOUNDARY * 2;
5028 else
5029 return PARM_BOUNDARY;
5032 /* True if it is OK to do sibling call optimization for the specified
5033 call expression EXP. DECL will be the called function, or NULL if
5034 this is an indirect call. */
5035 static bool
5036 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
5038 /* We can't perform a sibcall if the current function has the syscall_linkage
5039 attribute. */
5040 if (lookup_attribute ("syscall_linkage",
5041 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
5042 return false;
5044 /* We must always return with our current GP. This means we can
5045 only sibcall to functions defined in the current module unless
5046 TARGET_CONST_GP is set to true. */
5047 return (decl && (*targetm.binds_local_p) (decl)) || TARGET_CONST_GP;
5051 /* Implement va_arg. */
5053 static tree
5054 ia64_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
5055 gimple_seq *post_p)
5057 /* Variable sized types are passed by reference. */
5058 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
5060 tree ptrtype = build_pointer_type (type);
5061 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
5062 return build_va_arg_indirect_ref (addr);
5065 /* Aggregate arguments with alignment larger than 8 bytes start at
5066 the next even boundary. Integer and floating point arguments
5067 do so if they are larger than 8 bytes, whether or not they are
5068 also aligned larger than 8 bytes. */
5069 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
5070 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
5072 tree t = fold_build_pointer_plus_hwi (valist, 2 * UNITS_PER_WORD - 1);
5073 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
5074 build_int_cst (TREE_TYPE (t), -2 * UNITS_PER_WORD));
5075 gimplify_assign (unshare_expr (valist), t, pre_p);
5078 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5081 /* Return 1 if function return value returned in memory. Return 0 if it is
5082 in a register. */
5084 static bool
5085 ia64_return_in_memory (const_tree valtype, const_tree fntype ATTRIBUTE_UNUSED)
5087 machine_mode mode;
5088 machine_mode hfa_mode;
5089 HOST_WIDE_INT byte_size;
5091 mode = TYPE_MODE (valtype);
5092 byte_size = GET_MODE_SIZE (mode);
5093 if (mode == BLKmode)
5095 byte_size = int_size_in_bytes (valtype);
5096 if (byte_size < 0)
5097 return true;
5100 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
5102 hfa_mode = hfa_element_mode (valtype, 0);
5103 if (hfa_mode != VOIDmode)
5105 int hfa_size = GET_MODE_SIZE (hfa_mode);
5107 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
5108 return true;
5109 else
5110 return false;
5112 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
5113 return true;
5114 else
5115 return false;
5118 /* Return rtx for register that holds the function return value. */
5120 static rtx
5121 ia64_function_value (const_tree valtype,
5122 const_tree fn_decl_or_type,
5123 bool outgoing ATTRIBUTE_UNUSED)
5125 machine_mode mode;
5126 machine_mode hfa_mode;
5127 int unsignedp;
5128 const_tree func = fn_decl_or_type;
5130 if (fn_decl_or_type
5131 && !DECL_P (fn_decl_or_type))
5132 func = NULL;
5134 mode = TYPE_MODE (valtype);
5135 hfa_mode = hfa_element_mode (valtype, 0);
5137 if (hfa_mode != VOIDmode)
5139 rtx loc[8];
5140 int i;
5141 int hfa_size;
5142 int byte_size;
5143 int offset;
5145 hfa_size = GET_MODE_SIZE (hfa_mode);
5146 byte_size = ((mode == BLKmode)
5147 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
5148 offset = 0;
5149 for (i = 0; offset < byte_size; i++)
5151 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
5152 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
5153 GEN_INT (offset));
5154 offset += hfa_size;
5156 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
5158 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
5159 return gen_rtx_REG (mode, FR_ARG_FIRST);
5160 else
5162 bool need_parallel = false;
5164 /* In big-endian mode, we need to manage the layout of aggregates
5165 in the registers so that we get the bits properly aligned in
5166 the highpart of the registers. */
5167 if (BYTES_BIG_ENDIAN
5168 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
5169 need_parallel = true;
5171 /* Something like struct S { long double x; char a[0] } is not an
5172 HFA structure, and therefore doesn't go in fp registers. But
5173 the middle-end will give it XFmode anyway, and XFmode values
5174 don't normally fit in integer registers. So we need to smuggle
5175 the value inside a parallel. */
5176 else if (mode == XFmode || mode == XCmode || mode == RFmode)
5177 need_parallel = true;
5179 if (need_parallel)
5181 rtx loc[8];
5182 int offset;
5183 int bytesize;
5184 int i;
5186 offset = 0;
5187 bytesize = int_size_in_bytes (valtype);
5188 /* An empty PARALLEL is invalid here, but the return value
5189 doesn't matter for empty structs. */
5190 if (bytesize == 0)
5191 return gen_rtx_REG (mode, GR_RET_FIRST);
5192 for (i = 0; offset < bytesize; i++)
5194 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
5195 gen_rtx_REG (DImode,
5196 GR_RET_FIRST + i),
5197 GEN_INT (offset));
5198 offset += UNITS_PER_WORD;
5200 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
5203 mode = promote_function_mode (valtype, mode, &unsignedp,
5204 func ? TREE_TYPE (func) : NULL_TREE,
5205 true);
5207 return gen_rtx_REG (mode, GR_RET_FIRST);
5211 /* Worker function for TARGET_LIBCALL_VALUE. */
5213 static rtx
5214 ia64_libcall_value (machine_mode mode,
5215 const_rtx fun ATTRIBUTE_UNUSED)
5217 return gen_rtx_REG (mode,
5218 (((GET_MODE_CLASS (mode) == MODE_FLOAT
5219 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5220 && (mode) != TFmode)
5221 ? FR_RET_FIRST : GR_RET_FIRST));
5224 /* Worker function for FUNCTION_VALUE_REGNO_P. */
5226 static bool
5227 ia64_function_value_regno_p (const unsigned int regno)
5229 return ((regno >= GR_RET_FIRST && regno <= GR_RET_LAST)
5230 || (regno >= FR_RET_FIRST && regno <= FR_RET_LAST));
5233 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
5234 We need to emit DTP-relative relocations. */
5236 static void
5237 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
5239 gcc_assert (size == 4 || size == 8);
5240 if (size == 4)
5241 fputs ("\tdata4.ua\t@dtprel(", file);
5242 else
5243 fputs ("\tdata8.ua\t@dtprel(", file);
5244 output_addr_const (file, x);
5245 fputs (")", file);
5248 /* Print a memory address as an operand to reference that memory location. */
5250 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
5251 also call this from ia64_print_operand for memory addresses. */
5253 static void
5254 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
5255 machine_mode /*mode*/,
5256 rtx address ATTRIBUTE_UNUSED)
5260 /* Print an operand to an assembler instruction.
5261 C Swap and print a comparison operator.
5262 D Print an FP comparison operator.
5263 E Print 32 - constant, for SImode shifts as extract.
5264 e Print 64 - constant, for DImode rotates.
5265 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
5266 a floating point register emitted normally.
5267 G A floating point constant.
5268 I Invert a predicate register by adding 1.
5269 J Select the proper predicate register for a condition.
5270 j Select the inverse predicate register for a condition.
5271 O Append .acq for volatile load.
5272 P Postincrement of a MEM.
5273 Q Append .rel for volatile store.
5274 R Print .s .d or nothing for a single, double or no truncation.
5275 S Shift amount for shladd instruction.
5276 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
5277 for Intel assembler.
5278 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
5279 for Intel assembler.
5280 X A pair of floating point registers.
5281 r Print register name, or constant 0 as r0. HP compatibility for
5282 Linux kernel.
5283 v Print vector constant value as an 8-byte integer value. */
5285 static void
5286 ia64_print_operand (FILE * file, rtx x, int code)
5288 const char *str;
5290 switch (code)
5292 case 0:
5293 /* Handled below. */
5294 break;
5296 case 'C':
5298 enum rtx_code c = swap_condition (GET_CODE (x));
5299 fputs (GET_RTX_NAME (c), file);
5300 return;
5303 case 'D':
5304 switch (GET_CODE (x))
5306 case NE:
5307 str = "neq";
5308 break;
5309 case UNORDERED:
5310 str = "unord";
5311 break;
5312 case ORDERED:
5313 str = "ord";
5314 break;
5315 case UNLT:
5316 str = "nge";
5317 break;
5318 case UNLE:
5319 str = "ngt";
5320 break;
5321 case UNGT:
5322 str = "nle";
5323 break;
5324 case UNGE:
5325 str = "nlt";
5326 break;
5327 case UNEQ:
5328 case LTGT:
5329 gcc_unreachable ();
5330 default:
5331 str = GET_RTX_NAME (GET_CODE (x));
5332 break;
5334 fputs (str, file);
5335 return;
5337 case 'E':
5338 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
5339 return;
5341 case 'e':
5342 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
5343 return;
5345 case 'F':
5346 if (x == CONST0_RTX (GET_MODE (x)))
5347 str = reg_names [FR_REG (0)];
5348 else if (x == CONST1_RTX (GET_MODE (x)))
5349 str = reg_names [FR_REG (1)];
5350 else
5352 gcc_assert (GET_CODE (x) == REG);
5353 str = reg_names [REGNO (x)];
5355 fputs (str, file);
5356 return;
5358 case 'G':
5360 long val[4];
5361 real_to_target (val, CONST_DOUBLE_REAL_VALUE (x), GET_MODE (x));
5362 if (GET_MODE (x) == SFmode)
5363 fprintf (file, "0x%08lx", val[0] & 0xffffffff);
5364 else if (GET_MODE (x) == DFmode)
5365 fprintf (file, "0x%08lx%08lx", (WORDS_BIG_ENDIAN ? val[0] : val[1])
5366 & 0xffffffff,
5367 (WORDS_BIG_ENDIAN ? val[1] : val[0])
5368 & 0xffffffff);
5369 else
5370 output_operand_lossage ("invalid %%G mode");
5372 return;
5374 case 'I':
5375 fputs (reg_names [REGNO (x) + 1], file);
5376 return;
5378 case 'J':
5379 case 'j':
5381 unsigned int regno = REGNO (XEXP (x, 0));
5382 if (GET_CODE (x) == EQ)
5383 regno += 1;
5384 if (code == 'j')
5385 regno ^= 1;
5386 fputs (reg_names [regno], file);
5388 return;
5390 case 'O':
5391 if (MEM_VOLATILE_P (x))
5392 fputs(".acq", file);
5393 return;
5395 case 'P':
5397 HOST_WIDE_INT value;
5399 switch (GET_CODE (XEXP (x, 0)))
5401 default:
5402 return;
5404 case POST_MODIFY:
5405 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
5406 if (GET_CODE (x) == CONST_INT)
5407 value = INTVAL (x);
5408 else
5410 gcc_assert (GET_CODE (x) == REG);
5411 fprintf (file, ", %s", reg_names[REGNO (x)]);
5412 return;
5414 break;
5416 case POST_INC:
5417 value = GET_MODE_SIZE (GET_MODE (x));
5418 break;
5420 case POST_DEC:
5421 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
5422 break;
5425 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
5426 return;
5429 case 'Q':
5430 if (MEM_VOLATILE_P (x))
5431 fputs(".rel", file);
5432 return;
5434 case 'R':
5435 if (x == CONST0_RTX (GET_MODE (x)))
5436 fputs(".s", file);
5437 else if (x == CONST1_RTX (GET_MODE (x)))
5438 fputs(".d", file);
5439 else if (x == CONST2_RTX (GET_MODE (x)))
5441 else
5442 output_operand_lossage ("invalid %%R value");
5443 return;
5445 case 'S':
5446 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5447 return;
5449 case 'T':
5450 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
5452 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
5453 return;
5455 break;
5457 case 'U':
5458 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
5460 const char *prefix = "0x";
5461 if (INTVAL (x) & 0x80000000)
5463 fprintf (file, "0xffffffff");
5464 prefix = "";
5466 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
5467 return;
5469 break;
5471 case 'X':
5473 unsigned int regno = REGNO (x);
5474 fprintf (file, "%s, %s", reg_names [regno], reg_names [regno + 1]);
5476 return;
5478 case 'r':
5479 /* If this operand is the constant zero, write it as register zero.
5480 Any register, zero, or CONST_INT value is OK here. */
5481 if (GET_CODE (x) == REG)
5482 fputs (reg_names[REGNO (x)], file);
5483 else if (x == CONST0_RTX (GET_MODE (x)))
5484 fputs ("r0", file);
5485 else if (GET_CODE (x) == CONST_INT)
5486 output_addr_const (file, x);
5487 else
5488 output_operand_lossage ("invalid %%r value");
5489 return;
5491 case 'v':
5492 gcc_assert (GET_CODE (x) == CONST_VECTOR);
5493 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
5494 break;
5496 case '+':
5498 const char *which;
5500 /* For conditional branches, returns or calls, substitute
5501 sptk, dptk, dpnt, or spnt for %s. */
5502 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
5503 if (x)
5505 int pred_val = profile_probability::from_reg_br_prob_note
5506 (XINT (x, 0)).to_reg_br_prob_base ();
5508 /* Guess top and bottom 10% statically predicted. */
5509 if (pred_val < REG_BR_PROB_BASE / 50
5510 && br_prob_note_reliable_p (x))
5511 which = ".spnt";
5512 else if (pred_val < REG_BR_PROB_BASE / 2)
5513 which = ".dpnt";
5514 else if (pred_val < REG_BR_PROB_BASE / 100 * 98
5515 || !br_prob_note_reliable_p (x))
5516 which = ".dptk";
5517 else
5518 which = ".sptk";
5520 else if (CALL_P (current_output_insn))
5521 which = ".sptk";
5522 else
5523 which = ".dptk";
5525 fputs (which, file);
5526 return;
5529 case ',':
5530 x = current_insn_predicate;
5531 if (x)
5533 unsigned int regno = REGNO (XEXP (x, 0));
5534 if (GET_CODE (x) == EQ)
5535 regno += 1;
5536 fprintf (file, "(%s) ", reg_names [regno]);
5538 return;
5540 default:
5541 output_operand_lossage ("ia64_print_operand: unknown code");
5542 return;
5545 switch (GET_CODE (x))
5547 /* This happens for the spill/restore instructions. */
5548 case POST_INC:
5549 case POST_DEC:
5550 case POST_MODIFY:
5551 x = XEXP (x, 0);
5552 /* fall through */
5554 case REG:
5555 fputs (reg_names [REGNO (x)], file);
5556 break;
5558 case MEM:
5560 rtx addr = XEXP (x, 0);
5561 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
5562 addr = XEXP (addr, 0);
5563 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
5564 break;
5567 default:
5568 output_addr_const (file, x);
5569 break;
5572 return;
5575 /* Worker function for TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
5577 static bool
5578 ia64_print_operand_punct_valid_p (unsigned char code)
5580 return (code == '+' || code == ',');
5583 /* Compute a (partial) cost for rtx X. Return true if the complete
5584 cost has been computed, and false if subexpressions should be
5585 scanned. In either case, *TOTAL contains the cost result. */
5586 /* ??? This is incomplete. */
5588 static bool
5589 ia64_rtx_costs (rtx x, machine_mode mode, int outer_code,
5590 int opno ATTRIBUTE_UNUSED,
5591 int *total, bool speed ATTRIBUTE_UNUSED)
5593 int code = GET_CODE (x);
5595 switch (code)
5597 case CONST_INT:
5598 switch (outer_code)
5600 case SET:
5601 *total = satisfies_constraint_J (x) ? 0 : COSTS_N_INSNS (1);
5602 return true;
5603 case PLUS:
5604 if (satisfies_constraint_I (x))
5605 *total = 0;
5606 else if (satisfies_constraint_J (x))
5607 *total = 1;
5608 else
5609 *total = COSTS_N_INSNS (1);
5610 return true;
5611 default:
5612 if (satisfies_constraint_K (x) || satisfies_constraint_L (x))
5613 *total = 0;
5614 else
5615 *total = COSTS_N_INSNS (1);
5616 return true;
5619 case CONST_DOUBLE:
5620 *total = COSTS_N_INSNS (1);
5621 return true;
5623 case CONST:
5624 case SYMBOL_REF:
5625 case LABEL_REF:
5626 *total = COSTS_N_INSNS (3);
5627 return true;
5629 case FMA:
5630 *total = COSTS_N_INSNS (4);
5631 return true;
5633 case MULT:
5634 /* For multiplies wider than HImode, we have to go to the FPU,
5635 which normally involves copies. Plus there's the latency
5636 of the multiply itself, and the latency of the instructions to
5637 transfer integer regs to FP regs. */
5638 if (FLOAT_MODE_P (mode))
5639 *total = COSTS_N_INSNS (4);
5640 else if (GET_MODE_SIZE (mode) > 2)
5641 *total = COSTS_N_INSNS (10);
5642 else
5643 *total = COSTS_N_INSNS (2);
5644 return true;
5646 case PLUS:
5647 case MINUS:
5648 if (FLOAT_MODE_P (mode))
5650 *total = COSTS_N_INSNS (4);
5651 return true;
5653 /* FALLTHRU */
5655 case ASHIFT:
5656 case ASHIFTRT:
5657 case LSHIFTRT:
5658 *total = COSTS_N_INSNS (1);
5659 return true;
5661 case DIV:
5662 case UDIV:
5663 case MOD:
5664 case UMOD:
5665 /* We make divide expensive, so that divide-by-constant will be
5666 optimized to a multiply. */
5667 *total = COSTS_N_INSNS (60);
5668 return true;
5670 default:
5671 return false;
5675 /* Calculate the cost of moving data from a register in class FROM to
5676 one in class TO, using MODE. */
5678 static int
5679 ia64_register_move_cost (machine_mode mode, reg_class_t from,
5680 reg_class_t to)
5682 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
5683 if (to == ADDL_REGS)
5684 to = GR_REGS;
5685 if (from == ADDL_REGS)
5686 from = GR_REGS;
5688 /* All costs are symmetric, so reduce cases by putting the
5689 lower number class as the destination. */
5690 if (from < to)
5692 reg_class_t tmp = to;
5693 to = from, from = tmp;
5696 /* Moving from FR<->GR in XFmode must be more expensive than 2,
5697 so that we get secondary memory reloads. Between FR_REGS,
5698 we have to make this at least as expensive as memory_move_cost
5699 to avoid spectacularly poor register class preferencing. */
5700 if (mode == XFmode || mode == RFmode)
5702 if (to != GR_REGS || from != GR_REGS)
5703 return memory_move_cost (mode, to, false);
5704 else
5705 return 3;
5708 switch (to)
5710 case PR_REGS:
5711 /* Moving between PR registers takes two insns. */
5712 if (from == PR_REGS)
5713 return 3;
5714 /* Moving between PR and anything but GR is impossible. */
5715 if (from != GR_REGS)
5716 return memory_move_cost (mode, to, false);
5717 break;
5719 case BR_REGS:
5720 /* Moving between BR and anything but GR is impossible. */
5721 if (from != GR_REGS && from != GR_AND_BR_REGS)
5722 return memory_move_cost (mode, to, false);
5723 break;
5725 case AR_I_REGS:
5726 case AR_M_REGS:
5727 /* Moving between AR and anything but GR is impossible. */
5728 if (from != GR_REGS)
5729 return memory_move_cost (mode, to, false);
5730 break;
5732 case GR_REGS:
5733 case FR_REGS:
5734 case FP_REGS:
5735 case GR_AND_FR_REGS:
5736 case GR_AND_BR_REGS:
5737 case ALL_REGS:
5738 break;
5740 default:
5741 gcc_unreachable ();
5744 return 2;
5747 /* Calculate the cost of moving data of MODE from a register to or from
5748 memory. */
5750 static int
5751 ia64_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
5752 reg_class_t rclass,
5753 bool in ATTRIBUTE_UNUSED)
5755 if (rclass == GENERAL_REGS
5756 || rclass == FR_REGS
5757 || rclass == FP_REGS
5758 || rclass == GR_AND_FR_REGS)
5759 return 4;
5760 else
5761 return 10;
5764 /* Implement TARGET_PREFERRED_RELOAD_CLASS. Place additional restrictions
5765 on RCLASS to use when copying X into that class. */
5767 static reg_class_t
5768 ia64_preferred_reload_class (rtx x, reg_class_t rclass)
5770 switch (rclass)
5772 case FR_REGS:
5773 case FP_REGS:
5774 /* Don't allow volatile mem reloads into floating point registers.
5775 This is defined to force reload to choose the r/m case instead
5776 of the f/f case when reloading (set (reg fX) (mem/v)). */
5777 if (MEM_P (x) && MEM_VOLATILE_P (x))
5778 return NO_REGS;
5780 /* Force all unrecognized constants into the constant pool. */
5781 if (CONSTANT_P (x))
5782 return NO_REGS;
5783 break;
5785 case AR_M_REGS:
5786 case AR_I_REGS:
5787 if (!OBJECT_P (x))
5788 return NO_REGS;
5789 break;
5791 default:
5792 break;
5795 return rclass;
5798 /* This function returns the register class required for a secondary
5799 register when copying between one of the registers in RCLASS, and X,
5800 using MODE. A return value of NO_REGS means that no secondary register
5801 is required. */
5803 enum reg_class
5804 ia64_secondary_reload_class (enum reg_class rclass,
5805 machine_mode mode ATTRIBUTE_UNUSED, rtx x)
5807 int regno = -1;
5809 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
5810 regno = true_regnum (x);
5812 switch (rclass)
5814 case BR_REGS:
5815 case AR_M_REGS:
5816 case AR_I_REGS:
5817 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
5818 interaction. We end up with two pseudos with overlapping lifetimes
5819 both of which are equiv to the same constant, and both which need
5820 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
5821 changes depending on the path length, which means the qty_first_reg
5822 check in make_regs_eqv can give different answers at different times.
5823 At some point I'll probably need a reload_indi pattern to handle
5824 this.
5826 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
5827 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
5828 non-general registers for good measure. */
5829 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
5830 return GR_REGS;
5832 /* This is needed if a pseudo used as a call_operand gets spilled to a
5833 stack slot. */
5834 if (GET_CODE (x) == MEM)
5835 return GR_REGS;
5836 break;
5838 case FR_REGS:
5839 case FP_REGS:
5840 /* Need to go through general registers to get to other class regs. */
5841 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
5842 return GR_REGS;
5844 /* This can happen when a paradoxical subreg is an operand to the
5845 muldi3 pattern. */
5846 /* ??? This shouldn't be necessary after instruction scheduling is
5847 enabled, because paradoxical subregs are not accepted by
5848 register_operand when INSN_SCHEDULING is defined. Or alternatively,
5849 stop the paradoxical subreg stupidity in the *_operand functions
5850 in recog.c. */
5851 if (GET_CODE (x) == MEM
5852 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
5853 || GET_MODE (x) == QImode))
5854 return GR_REGS;
5856 /* This can happen because of the ior/and/etc patterns that accept FP
5857 registers as operands. If the third operand is a constant, then it
5858 needs to be reloaded into a FP register. */
5859 if (GET_CODE (x) == CONST_INT)
5860 return GR_REGS;
5862 /* This can happen because of register elimination in a muldi3 insn.
5863 E.g. `26107 * (unsigned long)&u'. */
5864 if (GET_CODE (x) == PLUS)
5865 return GR_REGS;
5866 break;
5868 case PR_REGS:
5869 /* ??? This happens if we cse/gcse a BImode value across a call,
5870 and the function has a nonlocal goto. This is because global
5871 does not allocate call crossing pseudos to hard registers when
5872 crtl->has_nonlocal_goto is true. This is relatively
5873 common for C++ programs that use exceptions. To reproduce,
5874 return NO_REGS and compile libstdc++. */
5875 if (GET_CODE (x) == MEM)
5876 return GR_REGS;
5878 /* This can happen when we take a BImode subreg of a DImode value,
5879 and that DImode value winds up in some non-GR register. */
5880 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
5881 return GR_REGS;
5882 break;
5884 default:
5885 break;
5888 return NO_REGS;
5892 /* Implement targetm.unspec_may_trap_p hook. */
5893 static int
5894 ia64_unspec_may_trap_p (const_rtx x, unsigned flags)
5896 switch (XINT (x, 1))
5898 case UNSPEC_LDA:
5899 case UNSPEC_LDS:
5900 case UNSPEC_LDSA:
5901 case UNSPEC_LDCCLR:
5902 case UNSPEC_CHKACLR:
5903 case UNSPEC_CHKS:
5904 /* These unspecs are just wrappers. */
5905 return may_trap_p_1 (XVECEXP (x, 0, 0), flags);
5908 return default_unspec_may_trap_p (x, flags);
5912 /* Parse the -mfixed-range= option string. */
5914 static void
5915 fix_range (const char *const_str)
5917 int i, first, last;
5918 char *str, *dash, *comma;
5920 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
5921 REG2 are either register names or register numbers. The effect
5922 of this option is to mark the registers in the range from REG1 to
5923 REG2 as ``fixed'' so they won't be used by the compiler. This is
5924 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
5926 i = strlen (const_str);
5927 str = (char *) alloca (i + 1);
5928 memcpy (str, const_str, i + 1);
5930 while (1)
5932 dash = strchr (str, '-');
5933 if (!dash)
5935 warning (0, "value of -mfixed-range must have form REG1-REG2");
5936 return;
5938 *dash = '\0';
5940 comma = strchr (dash + 1, ',');
5941 if (comma)
5942 *comma = '\0';
5944 first = decode_reg_name (str);
5945 if (first < 0)
5947 warning (0, "unknown register name: %s", str);
5948 return;
5951 last = decode_reg_name (dash + 1);
5952 if (last < 0)
5954 warning (0, "unknown register name: %s", dash + 1);
5955 return;
5958 *dash = '-';
5960 if (first > last)
5962 warning (0, "%s-%s is an empty range", str, dash + 1);
5963 return;
5966 for (i = first; i <= last; ++i)
5967 fixed_regs[i] = call_used_regs[i] = 1;
5969 if (!comma)
5970 break;
5972 *comma = ',';
5973 str = comma + 1;
5977 /* Implement TARGET_OPTION_OVERRIDE. */
5979 static void
5980 ia64_option_override (void)
5982 unsigned int i;
5983 cl_deferred_option *opt;
5984 vec<cl_deferred_option> *v
5985 = (vec<cl_deferred_option> *) ia64_deferred_options;
5987 if (v)
5988 FOR_EACH_VEC_ELT (*v, i, opt)
5990 switch (opt->opt_index)
5992 case OPT_mfixed_range_:
5993 fix_range (opt->arg);
5994 break;
5996 default:
5997 gcc_unreachable ();
6001 if (TARGET_AUTO_PIC)
6002 target_flags |= MASK_CONST_GP;
6004 /* Numerous experiment shows that IRA based loop pressure
6005 calculation works better for RTL loop invariant motion on targets
6006 with enough (>= 32) registers. It is an expensive optimization.
6007 So it is on only for peak performance. */
6008 if (optimize >= 3)
6009 flag_ira_loop_pressure = 1;
6012 ia64_section_threshold = (global_options_set.x_g_switch_value
6013 ? g_switch_value
6014 : IA64_DEFAULT_GVALUE);
6016 init_machine_status = ia64_init_machine_status;
6018 if (align_functions <= 0)
6019 align_functions = 64;
6020 if (align_loops <= 0)
6021 align_loops = 32;
6022 if (TARGET_ABI_OPEN_VMS)
6023 flag_no_common = 1;
6025 ia64_override_options_after_change();
6028 /* Implement targetm.override_options_after_change. */
6030 static void
6031 ia64_override_options_after_change (void)
6033 if (optimize >= 3
6034 && !global_options_set.x_flag_selective_scheduling
6035 && !global_options_set.x_flag_selective_scheduling2)
6037 flag_selective_scheduling2 = 1;
6038 flag_sel_sched_pipelining = 1;
6040 if (mflag_sched_control_spec == 2)
6042 /* Control speculation is on by default for the selective scheduler,
6043 but not for the Haifa scheduler. */
6044 mflag_sched_control_spec = flag_selective_scheduling2 ? 1 : 0;
6046 if (flag_sel_sched_pipelining && flag_auto_inc_dec)
6048 /* FIXME: remove this when we'd implement breaking autoinsns as
6049 a transformation. */
6050 flag_auto_inc_dec = 0;
6054 /* Initialize the record of emitted frame related registers. */
6056 void ia64_init_expanders (void)
6058 memset (&emitted_frame_related_regs, 0, sizeof (emitted_frame_related_regs));
6061 static struct machine_function *
6062 ia64_init_machine_status (void)
6064 return ggc_cleared_alloc<machine_function> ();
6067 static enum attr_itanium_class ia64_safe_itanium_class (rtx_insn *);
6068 static enum attr_type ia64_safe_type (rtx_insn *);
6070 static enum attr_itanium_class
6071 ia64_safe_itanium_class (rtx_insn *insn)
6073 if (recog_memoized (insn) >= 0)
6074 return get_attr_itanium_class (insn);
6075 else if (DEBUG_INSN_P (insn))
6076 return ITANIUM_CLASS_IGNORE;
6077 else
6078 return ITANIUM_CLASS_UNKNOWN;
6081 static enum attr_type
6082 ia64_safe_type (rtx_insn *insn)
6084 if (recog_memoized (insn) >= 0)
6085 return get_attr_type (insn);
6086 else
6087 return TYPE_UNKNOWN;
6090 /* The following collection of routines emit instruction group stop bits as
6091 necessary to avoid dependencies. */
6093 /* Need to track some additional registers as far as serialization is
6094 concerned so we can properly handle br.call and br.ret. We could
6095 make these registers visible to gcc, but since these registers are
6096 never explicitly used in gcc generated code, it seems wasteful to
6097 do so (plus it would make the call and return patterns needlessly
6098 complex). */
6099 #define REG_RP (BR_REG (0))
6100 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
6101 /* This is used for volatile asms which may require a stop bit immediately
6102 before and after them. */
6103 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
6104 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
6105 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
6107 /* For each register, we keep track of how it has been written in the
6108 current instruction group.
6110 If a register is written unconditionally (no qualifying predicate),
6111 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
6113 If a register is written if its qualifying predicate P is true, we
6114 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
6115 may be written again by the complement of P (P^1) and when this happens,
6116 WRITE_COUNT gets set to 2.
6118 The result of this is that whenever an insn attempts to write a register
6119 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
6121 If a predicate register is written by a floating-point insn, we set
6122 WRITTEN_BY_FP to true.
6124 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
6125 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
6127 #if GCC_VERSION >= 4000
6128 #define RWS_FIELD_TYPE __extension__ unsigned short
6129 #else
6130 #define RWS_FIELD_TYPE unsigned int
6131 #endif
6132 struct reg_write_state
6134 RWS_FIELD_TYPE write_count : 2;
6135 RWS_FIELD_TYPE first_pred : 10;
6136 RWS_FIELD_TYPE written_by_fp : 1;
6137 RWS_FIELD_TYPE written_by_and : 1;
6138 RWS_FIELD_TYPE written_by_or : 1;
6141 /* Cumulative info for the current instruction group. */
6142 struct reg_write_state rws_sum[NUM_REGS];
6143 #if CHECKING_P
6144 /* Bitmap whether a register has been written in the current insn. */
6145 HARD_REG_ELT_TYPE rws_insn[(NUM_REGS + HOST_BITS_PER_WIDEST_FAST_INT - 1)
6146 / HOST_BITS_PER_WIDEST_FAST_INT];
6148 static inline void
6149 rws_insn_set (int regno)
6151 gcc_assert (!TEST_HARD_REG_BIT (rws_insn, regno));
6152 SET_HARD_REG_BIT (rws_insn, regno);
6155 static inline int
6156 rws_insn_test (int regno)
6158 return TEST_HARD_REG_BIT (rws_insn, regno);
6160 #else
6161 /* When not checking, track just REG_AR_CFM and REG_VOLATILE. */
6162 unsigned char rws_insn[2];
6164 static inline void
6165 rws_insn_set (int regno)
6167 if (regno == REG_AR_CFM)
6168 rws_insn[0] = 1;
6169 else if (regno == REG_VOLATILE)
6170 rws_insn[1] = 1;
6173 static inline int
6174 rws_insn_test (int regno)
6176 if (regno == REG_AR_CFM)
6177 return rws_insn[0];
6178 if (regno == REG_VOLATILE)
6179 return rws_insn[1];
6180 return 0;
6182 #endif
6184 /* Indicates whether this is the first instruction after a stop bit,
6185 in which case we don't need another stop bit. Without this,
6186 ia64_variable_issue will die when scheduling an alloc. */
6187 static int first_instruction;
6189 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
6190 RTL for one instruction. */
6191 struct reg_flags
6193 unsigned int is_write : 1; /* Is register being written? */
6194 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
6195 unsigned int is_branch : 1; /* Is register used as part of a branch? */
6196 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
6197 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
6198 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
6201 static void rws_update (int, struct reg_flags, int);
6202 static int rws_access_regno (int, struct reg_flags, int);
6203 static int rws_access_reg (rtx, struct reg_flags, int);
6204 static void update_set_flags (rtx, struct reg_flags *);
6205 static int set_src_needs_barrier (rtx, struct reg_flags, int);
6206 static int rtx_needs_barrier (rtx, struct reg_flags, int);
6207 static void init_insn_group_barriers (void);
6208 static int group_barrier_needed (rtx_insn *);
6209 static int safe_group_barrier_needed (rtx_insn *);
6210 static int in_safe_group_barrier;
6212 /* Update *RWS for REGNO, which is being written by the current instruction,
6213 with predicate PRED, and associated register flags in FLAGS. */
6215 static void
6216 rws_update (int regno, struct reg_flags flags, int pred)
6218 if (pred)
6219 rws_sum[regno].write_count++;
6220 else
6221 rws_sum[regno].write_count = 2;
6222 rws_sum[regno].written_by_fp |= flags.is_fp;
6223 /* ??? Not tracking and/or across differing predicates. */
6224 rws_sum[regno].written_by_and = flags.is_and;
6225 rws_sum[regno].written_by_or = flags.is_or;
6226 rws_sum[regno].first_pred = pred;
6229 /* Handle an access to register REGNO of type FLAGS using predicate register
6230 PRED. Update rws_sum array. Return 1 if this access creates
6231 a dependency with an earlier instruction in the same group. */
6233 static int
6234 rws_access_regno (int regno, struct reg_flags flags, int pred)
6236 int need_barrier = 0;
6238 gcc_assert (regno < NUM_REGS);
6240 if (! PR_REGNO_P (regno))
6241 flags.is_and = flags.is_or = 0;
6243 if (flags.is_write)
6245 int write_count;
6247 rws_insn_set (regno);
6248 write_count = rws_sum[regno].write_count;
6250 switch (write_count)
6252 case 0:
6253 /* The register has not been written yet. */
6254 if (!in_safe_group_barrier)
6255 rws_update (regno, flags, pred);
6256 break;
6258 case 1:
6259 /* The register has been written via a predicate. Treat
6260 it like a unconditional write and do not try to check
6261 for complementary pred reg in earlier write. */
6262 if (flags.is_and && rws_sum[regno].written_by_and)
6264 else if (flags.is_or && rws_sum[regno].written_by_or)
6266 else
6267 need_barrier = 1;
6268 if (!in_safe_group_barrier)
6269 rws_update (regno, flags, pred);
6270 break;
6272 case 2:
6273 /* The register has been unconditionally written already. We
6274 need a barrier. */
6275 if (flags.is_and && rws_sum[regno].written_by_and)
6277 else if (flags.is_or && rws_sum[regno].written_by_or)
6279 else
6280 need_barrier = 1;
6281 if (!in_safe_group_barrier)
6283 rws_sum[regno].written_by_and = flags.is_and;
6284 rws_sum[regno].written_by_or = flags.is_or;
6286 break;
6288 default:
6289 gcc_unreachable ();
6292 else
6294 if (flags.is_branch)
6296 /* Branches have several RAW exceptions that allow to avoid
6297 barriers. */
6299 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
6300 /* RAW dependencies on branch regs are permissible as long
6301 as the writer is a non-branch instruction. Since we
6302 never generate code that uses a branch register written
6303 by a branch instruction, handling this case is
6304 easy. */
6305 return 0;
6307 if (REGNO_REG_CLASS (regno) == PR_REGS
6308 && ! rws_sum[regno].written_by_fp)
6309 /* The predicates of a branch are available within the
6310 same insn group as long as the predicate was written by
6311 something other than a floating-point instruction. */
6312 return 0;
6315 if (flags.is_and && rws_sum[regno].written_by_and)
6316 return 0;
6317 if (flags.is_or && rws_sum[regno].written_by_or)
6318 return 0;
6320 switch (rws_sum[regno].write_count)
6322 case 0:
6323 /* The register has not been written yet. */
6324 break;
6326 case 1:
6327 /* The register has been written via a predicate, assume we
6328 need a barrier (don't check for complementary regs). */
6329 need_barrier = 1;
6330 break;
6332 case 2:
6333 /* The register has been unconditionally written already. We
6334 need a barrier. */
6335 need_barrier = 1;
6336 break;
6338 default:
6339 gcc_unreachable ();
6343 return need_barrier;
6346 static int
6347 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
6349 int regno = REGNO (reg);
6350 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
6352 if (n == 1)
6353 return rws_access_regno (regno, flags, pred);
6354 else
6356 int need_barrier = 0;
6357 while (--n >= 0)
6358 need_barrier |= rws_access_regno (regno + n, flags, pred);
6359 return need_barrier;
6363 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
6364 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
6366 static void
6367 update_set_flags (rtx x, struct reg_flags *pflags)
6369 rtx src = SET_SRC (x);
6371 switch (GET_CODE (src))
6373 case CALL:
6374 return;
6376 case IF_THEN_ELSE:
6377 /* There are four cases here:
6378 (1) The destination is (pc), in which case this is a branch,
6379 nothing here applies.
6380 (2) The destination is ar.lc, in which case this is a
6381 doloop_end_internal,
6382 (3) The destination is an fp register, in which case this is
6383 an fselect instruction.
6384 (4) The condition has (unspec [(reg)] UNSPEC_LDC), in which case
6385 this is a check load.
6386 In all cases, nothing we do in this function applies. */
6387 return;
6389 default:
6390 if (COMPARISON_P (src)
6391 && SCALAR_FLOAT_MODE_P (GET_MODE (XEXP (src, 0))))
6392 /* Set pflags->is_fp to 1 so that we know we're dealing
6393 with a floating point comparison when processing the
6394 destination of the SET. */
6395 pflags->is_fp = 1;
6397 /* Discover if this is a parallel comparison. We only handle
6398 and.orcm and or.andcm at present, since we must retain a
6399 strict inverse on the predicate pair. */
6400 else if (GET_CODE (src) == AND)
6401 pflags->is_and = 1;
6402 else if (GET_CODE (src) == IOR)
6403 pflags->is_or = 1;
6405 break;
6409 /* Subroutine of rtx_needs_barrier; this function determines whether the
6410 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
6411 are as in rtx_needs_barrier. COND is an rtx that holds the condition
6412 for this insn. */
6414 static int
6415 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred)
6417 int need_barrier = 0;
6418 rtx dst;
6419 rtx src = SET_SRC (x);
6421 if (GET_CODE (src) == CALL)
6422 /* We don't need to worry about the result registers that
6423 get written by subroutine call. */
6424 return rtx_needs_barrier (src, flags, pred);
6425 else if (SET_DEST (x) == pc_rtx)
6427 /* X is a conditional branch. */
6428 /* ??? This seems redundant, as the caller sets this bit for
6429 all JUMP_INSNs. */
6430 if (!ia64_spec_check_src_p (src))
6431 flags.is_branch = 1;
6432 return rtx_needs_barrier (src, flags, pred);
6435 if (ia64_spec_check_src_p (src))
6436 /* Avoid checking one register twice (in condition
6437 and in 'then' section) for ldc pattern. */
6439 gcc_assert (REG_P (XEXP (src, 2)));
6440 need_barrier = rtx_needs_barrier (XEXP (src, 2), flags, pred);
6442 /* We process MEM below. */
6443 src = XEXP (src, 1);
6446 need_barrier |= rtx_needs_barrier (src, flags, pred);
6448 dst = SET_DEST (x);
6449 if (GET_CODE (dst) == ZERO_EXTRACT)
6451 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
6452 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
6454 return need_barrier;
6457 /* Handle an access to rtx X of type FLAGS using predicate register
6458 PRED. Return 1 if this access creates a dependency with an earlier
6459 instruction in the same group. */
6461 static int
6462 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
6464 int i, j;
6465 int is_complemented = 0;
6466 int need_barrier = 0;
6467 const char *format_ptr;
6468 struct reg_flags new_flags;
6469 rtx cond;
6471 if (! x)
6472 return 0;
6474 new_flags = flags;
6476 switch (GET_CODE (x))
6478 case SET:
6479 update_set_flags (x, &new_flags);
6480 need_barrier = set_src_needs_barrier (x, new_flags, pred);
6481 if (GET_CODE (SET_SRC (x)) != CALL)
6483 new_flags.is_write = 1;
6484 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
6486 break;
6488 case CALL:
6489 new_flags.is_write = 0;
6490 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
6492 /* Avoid multiple register writes, in case this is a pattern with
6493 multiple CALL rtx. This avoids a failure in rws_access_reg. */
6494 if (! flags.is_sibcall && ! rws_insn_test (REG_AR_CFM))
6496 new_flags.is_write = 1;
6497 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
6498 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
6499 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
6501 break;
6503 case COND_EXEC:
6504 /* X is a predicated instruction. */
6506 cond = COND_EXEC_TEST (x);
6507 gcc_assert (!pred);
6508 need_barrier = rtx_needs_barrier (cond, flags, 0);
6510 if (GET_CODE (cond) == EQ)
6511 is_complemented = 1;
6512 cond = XEXP (cond, 0);
6513 gcc_assert (GET_CODE (cond) == REG
6514 && REGNO_REG_CLASS (REGNO (cond)) == PR_REGS);
6515 pred = REGNO (cond);
6516 if (is_complemented)
6517 ++pred;
6519 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
6520 return need_barrier;
6522 case CLOBBER:
6523 case USE:
6524 /* Clobber & use are for earlier compiler-phases only. */
6525 break;
6527 case ASM_OPERANDS:
6528 case ASM_INPUT:
6529 /* We always emit stop bits for traditional asms. We emit stop bits
6530 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
6531 if (GET_CODE (x) != ASM_OPERANDS
6532 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
6534 /* Avoid writing the register multiple times if we have multiple
6535 asm outputs. This avoids a failure in rws_access_reg. */
6536 if (! rws_insn_test (REG_VOLATILE))
6538 new_flags.is_write = 1;
6539 rws_access_regno (REG_VOLATILE, new_flags, pred);
6541 return 1;
6544 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
6545 We cannot just fall through here since then we would be confused
6546 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
6547 traditional asms unlike their normal usage. */
6549 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
6550 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
6551 need_barrier = 1;
6552 break;
6554 case PARALLEL:
6555 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
6557 rtx pat = XVECEXP (x, 0, i);
6558 switch (GET_CODE (pat))
6560 case SET:
6561 update_set_flags (pat, &new_flags);
6562 need_barrier |= set_src_needs_barrier (pat, new_flags, pred);
6563 break;
6565 case USE:
6566 case CALL:
6567 case ASM_OPERANDS:
6568 case ASM_INPUT:
6569 need_barrier |= rtx_needs_barrier (pat, flags, pred);
6570 break;
6572 case CLOBBER:
6573 if (REG_P (XEXP (pat, 0))
6574 && extract_asm_operands (x) != NULL_RTX
6575 && REGNO (XEXP (pat, 0)) != AR_UNAT_REGNUM)
6577 new_flags.is_write = 1;
6578 need_barrier |= rtx_needs_barrier (XEXP (pat, 0),
6579 new_flags, pred);
6580 new_flags = flags;
6582 break;
6584 case RETURN:
6585 break;
6587 default:
6588 gcc_unreachable ();
6591 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
6593 rtx pat = XVECEXP (x, 0, i);
6594 if (GET_CODE (pat) == SET)
6596 if (GET_CODE (SET_SRC (pat)) != CALL)
6598 new_flags.is_write = 1;
6599 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
6600 pred);
6603 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
6604 need_barrier |= rtx_needs_barrier (pat, flags, pred);
6606 break;
6608 case SUBREG:
6609 need_barrier |= rtx_needs_barrier (SUBREG_REG (x), flags, pred);
6610 break;
6611 case REG:
6612 if (REGNO (x) == AR_UNAT_REGNUM)
6614 for (i = 0; i < 64; ++i)
6615 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
6617 else
6618 need_barrier = rws_access_reg (x, flags, pred);
6619 break;
6621 case MEM:
6622 /* Find the regs used in memory address computation. */
6623 new_flags.is_write = 0;
6624 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
6625 break;
6627 case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR:
6628 case SYMBOL_REF: case LABEL_REF: case CONST:
6629 break;
6631 /* Operators with side-effects. */
6632 case POST_INC: case POST_DEC:
6633 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
6635 new_flags.is_write = 0;
6636 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
6637 new_flags.is_write = 1;
6638 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
6639 break;
6641 case POST_MODIFY:
6642 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
6644 new_flags.is_write = 0;
6645 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
6646 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
6647 new_flags.is_write = 1;
6648 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
6649 break;
6651 /* Handle common unary and binary ops for efficiency. */
6652 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
6653 case MOD: case UDIV: case UMOD: case AND: case IOR:
6654 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
6655 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
6656 case NE: case EQ: case GE: case GT: case LE:
6657 case LT: case GEU: case GTU: case LEU: case LTU:
6658 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
6659 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
6660 break;
6662 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
6663 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
6664 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
6665 case SQRT: case FFS: case POPCOUNT:
6666 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
6667 break;
6669 case VEC_SELECT:
6670 /* VEC_SELECT's second argument is a PARALLEL with integers that
6671 describe the elements selected. On ia64, those integers are
6672 always constants. Avoid walking the PARALLEL so that we don't
6673 get confused with "normal" parallels and then die. */
6674 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
6675 break;
6677 case UNSPEC:
6678 switch (XINT (x, 1))
6680 case UNSPEC_LTOFF_DTPMOD:
6681 case UNSPEC_LTOFF_DTPREL:
6682 case UNSPEC_DTPREL:
6683 case UNSPEC_LTOFF_TPREL:
6684 case UNSPEC_TPREL:
6685 case UNSPEC_PRED_REL_MUTEX:
6686 case UNSPEC_PIC_CALL:
6687 case UNSPEC_MF:
6688 case UNSPEC_FETCHADD_ACQ:
6689 case UNSPEC_FETCHADD_REL:
6690 case UNSPEC_BSP_VALUE:
6691 case UNSPEC_FLUSHRS:
6692 case UNSPEC_BUNDLE_SELECTOR:
6693 break;
6695 case UNSPEC_GR_SPILL:
6696 case UNSPEC_GR_RESTORE:
6698 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
6699 HOST_WIDE_INT bit = (offset >> 3) & 63;
6701 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6702 new_flags.is_write = (XINT (x, 1) == UNSPEC_GR_SPILL);
6703 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
6704 new_flags, pred);
6705 break;
6708 case UNSPEC_FR_SPILL:
6709 case UNSPEC_FR_RESTORE:
6710 case UNSPEC_GETF_EXP:
6711 case UNSPEC_SETF_EXP:
6712 case UNSPEC_ADDP4:
6713 case UNSPEC_FR_SQRT_RECIP_APPROX:
6714 case UNSPEC_FR_SQRT_RECIP_APPROX_RES:
6715 case UNSPEC_LDA:
6716 case UNSPEC_LDS:
6717 case UNSPEC_LDS_A:
6718 case UNSPEC_LDSA:
6719 case UNSPEC_CHKACLR:
6720 case UNSPEC_CHKS:
6721 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6722 break;
6724 case UNSPEC_FR_RECIP_APPROX:
6725 case UNSPEC_SHRP:
6726 case UNSPEC_COPYSIGN:
6727 case UNSPEC_FR_RECIP_APPROX_RES:
6728 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6729 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
6730 break;
6732 case UNSPEC_CMPXCHG_ACQ:
6733 case UNSPEC_CMPXCHG_REL:
6734 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
6735 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
6736 break;
6738 default:
6739 gcc_unreachable ();
6741 break;
6743 case UNSPEC_VOLATILE:
6744 switch (XINT (x, 1))
6746 case UNSPECV_ALLOC:
6747 /* Alloc must always be the first instruction of a group.
6748 We force this by always returning true. */
6749 /* ??? We might get better scheduling if we explicitly check for
6750 input/local/output register dependencies, and modify the
6751 scheduler so that alloc is always reordered to the start of
6752 the current group. We could then eliminate all of the
6753 first_instruction code. */
6754 rws_access_regno (AR_PFS_REGNUM, flags, pred);
6756 new_flags.is_write = 1;
6757 rws_access_regno (REG_AR_CFM, new_flags, pred);
6758 return 1;
6760 case UNSPECV_SET_BSP:
6761 case UNSPECV_PROBE_STACK_RANGE:
6762 need_barrier = 1;
6763 break;
6765 case UNSPECV_BLOCKAGE:
6766 case UNSPECV_INSN_GROUP_BARRIER:
6767 case UNSPECV_BREAK:
6768 case UNSPECV_PSAC_ALL:
6769 case UNSPECV_PSAC_NORMAL:
6770 return 0;
6772 case UNSPECV_PROBE_STACK_ADDRESS:
6773 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6774 break;
6776 default:
6777 gcc_unreachable ();
6779 break;
6781 case RETURN:
6782 new_flags.is_write = 0;
6783 need_barrier = rws_access_regno (REG_RP, flags, pred);
6784 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
6786 new_flags.is_write = 1;
6787 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
6788 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
6789 break;
6791 default:
6792 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
6793 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6794 switch (format_ptr[i])
6796 case '0': /* unused field */
6797 case 'i': /* integer */
6798 case 'n': /* note */
6799 case 'w': /* wide integer */
6800 case 's': /* pointer to string */
6801 case 'S': /* optional pointer to string */
6802 break;
6804 case 'e':
6805 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
6806 need_barrier = 1;
6807 break;
6809 case 'E':
6810 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
6811 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
6812 need_barrier = 1;
6813 break;
6815 default:
6816 gcc_unreachable ();
6818 break;
6820 return need_barrier;
6823 /* Clear out the state for group_barrier_needed at the start of a
6824 sequence of insns. */
6826 static void
6827 init_insn_group_barriers (void)
6829 memset (rws_sum, 0, sizeof (rws_sum));
6830 first_instruction = 1;
6833 /* Given the current state, determine whether a group barrier (a stop bit) is
6834 necessary before INSN. Return nonzero if so. This modifies the state to
6835 include the effects of INSN as a side-effect. */
6837 static int
6838 group_barrier_needed (rtx_insn *insn)
6840 rtx pat;
6841 int need_barrier = 0;
6842 struct reg_flags flags;
6844 memset (&flags, 0, sizeof (flags));
6845 switch (GET_CODE (insn))
6847 case NOTE:
6848 case DEBUG_INSN:
6849 break;
6851 case BARRIER:
6852 /* A barrier doesn't imply an instruction group boundary. */
6853 break;
6855 case CODE_LABEL:
6856 memset (rws_insn, 0, sizeof (rws_insn));
6857 return 1;
6859 case CALL_INSN:
6860 flags.is_branch = 1;
6861 flags.is_sibcall = SIBLING_CALL_P (insn);
6862 memset (rws_insn, 0, sizeof (rws_insn));
6864 /* Don't bundle a call following another call. */
6865 if ((pat = prev_active_insn (insn)) && CALL_P (pat))
6867 need_barrier = 1;
6868 break;
6871 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
6872 break;
6874 case JUMP_INSN:
6875 if (!ia64_spec_check_p (insn))
6876 flags.is_branch = 1;
6878 /* Don't bundle a jump following a call. */
6879 if ((pat = prev_active_insn (insn)) && CALL_P (pat))
6881 need_barrier = 1;
6882 break;
6884 /* FALLTHRU */
6886 case INSN:
6887 if (GET_CODE (PATTERN (insn)) == USE
6888 || GET_CODE (PATTERN (insn)) == CLOBBER)
6889 /* Don't care about USE and CLOBBER "insns"---those are used to
6890 indicate to the optimizer that it shouldn't get rid of
6891 certain operations. */
6892 break;
6894 pat = PATTERN (insn);
6896 /* Ug. Hack hacks hacked elsewhere. */
6897 switch (recog_memoized (insn))
6899 /* We play dependency tricks with the epilogue in order
6900 to get proper schedules. Undo this for dv analysis. */
6901 case CODE_FOR_epilogue_deallocate_stack:
6902 case CODE_FOR_prologue_allocate_stack:
6903 pat = XVECEXP (pat, 0, 0);
6904 break;
6906 /* The pattern we use for br.cloop confuses the code above.
6907 The second element of the vector is representative. */
6908 case CODE_FOR_doloop_end_internal:
6909 pat = XVECEXP (pat, 0, 1);
6910 break;
6912 /* Doesn't generate code. */
6913 case CODE_FOR_pred_rel_mutex:
6914 case CODE_FOR_prologue_use:
6915 return 0;
6917 default:
6918 break;
6921 memset (rws_insn, 0, sizeof (rws_insn));
6922 need_barrier = rtx_needs_barrier (pat, flags, 0);
6924 /* Check to see if the previous instruction was a volatile
6925 asm. */
6926 if (! need_barrier)
6927 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
6929 break;
6931 default:
6932 gcc_unreachable ();
6935 if (first_instruction && important_for_bundling_p (insn))
6937 need_barrier = 0;
6938 first_instruction = 0;
6941 return need_barrier;
6944 /* Like group_barrier_needed, but do not clobber the current state. */
6946 static int
6947 safe_group_barrier_needed (rtx_insn *insn)
6949 int saved_first_instruction;
6950 int t;
6952 saved_first_instruction = first_instruction;
6953 in_safe_group_barrier = 1;
6955 t = group_barrier_needed (insn);
6957 first_instruction = saved_first_instruction;
6958 in_safe_group_barrier = 0;
6960 return t;
6963 /* Scan the current function and insert stop bits as necessary to
6964 eliminate dependencies. This function assumes that a final
6965 instruction scheduling pass has been run which has already
6966 inserted most of the necessary stop bits. This function only
6967 inserts new ones at basic block boundaries, since these are
6968 invisible to the scheduler. */
6970 static void
6971 emit_insn_group_barriers (FILE *dump)
6973 rtx_insn *insn;
6974 rtx_insn *last_label = 0;
6975 int insns_since_last_label = 0;
6977 init_insn_group_barriers ();
6979 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6981 if (LABEL_P (insn))
6983 if (insns_since_last_label)
6984 last_label = insn;
6985 insns_since_last_label = 0;
6987 else if (NOTE_P (insn)
6988 && NOTE_KIND (insn) == NOTE_INSN_BASIC_BLOCK)
6990 if (insns_since_last_label)
6991 last_label = insn;
6992 insns_since_last_label = 0;
6994 else if (NONJUMP_INSN_P (insn)
6995 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
6996 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
6998 init_insn_group_barriers ();
6999 last_label = 0;
7001 else if (NONDEBUG_INSN_P (insn))
7003 insns_since_last_label = 1;
7005 if (group_barrier_needed (insn))
7007 if (last_label)
7009 if (dump)
7010 fprintf (dump, "Emitting stop before label %d\n",
7011 INSN_UID (last_label));
7012 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
7013 insn = last_label;
7015 init_insn_group_barriers ();
7016 last_label = 0;
7023 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
7024 This function has to emit all necessary group barriers. */
7026 static void
7027 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
7029 rtx_insn *insn;
7031 init_insn_group_barriers ();
7033 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7035 if (BARRIER_P (insn))
7037 rtx_insn *last = prev_active_insn (insn);
7039 if (! last)
7040 continue;
7041 if (JUMP_TABLE_DATA_P (last))
7042 last = prev_active_insn (last);
7043 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
7044 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
7046 init_insn_group_barriers ();
7048 else if (NONDEBUG_INSN_P (insn))
7050 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
7051 init_insn_group_barriers ();
7052 else if (group_barrier_needed (insn))
7054 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
7055 init_insn_group_barriers ();
7056 group_barrier_needed (insn);
7064 /* Instruction scheduling support. */
7066 #define NR_BUNDLES 10
7068 /* A list of names of all available bundles. */
7070 static const char *bundle_name [NR_BUNDLES] =
7072 ".mii",
7073 ".mmi",
7074 ".mfi",
7075 ".mmf",
7076 #if NR_BUNDLES == 10
7077 ".bbb",
7078 ".mbb",
7079 #endif
7080 ".mib",
7081 ".mmb",
7082 ".mfb",
7083 ".mlx"
7086 /* Nonzero if we should insert stop bits into the schedule. */
7088 int ia64_final_schedule = 0;
7090 /* Codes of the corresponding queried units: */
7092 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
7093 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
7095 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
7096 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
7098 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
7100 /* The following variable value is an insn group barrier. */
7102 static rtx_insn *dfa_stop_insn;
7104 /* The following variable value is the last issued insn. */
7106 static rtx_insn *last_scheduled_insn;
7108 /* The following variable value is pointer to a DFA state used as
7109 temporary variable. */
7111 static state_t temp_dfa_state = NULL;
7113 /* The following variable value is DFA state after issuing the last
7114 insn. */
7116 static state_t prev_cycle_state = NULL;
7118 /* The following array element values are TRUE if the corresponding
7119 insn requires to add stop bits before it. */
7121 static char *stops_p = NULL;
7123 /* The following variable is used to set up the mentioned above array. */
7125 static int stop_before_p = 0;
7127 /* The following variable value is length of the arrays `clocks' and
7128 `add_cycles'. */
7130 static int clocks_length;
7132 /* The following variable value is number of data speculations in progress. */
7133 static int pending_data_specs = 0;
7135 /* Number of memory references on current and three future processor cycles. */
7136 static char mem_ops_in_group[4];
7138 /* Number of current processor cycle (from scheduler's point of view). */
7139 static int current_cycle;
7141 static rtx ia64_single_set (rtx_insn *);
7142 static void ia64_emit_insn_before (rtx, rtx_insn *);
7144 /* Map a bundle number to its pseudo-op. */
7146 const char *
7147 get_bundle_name (int b)
7149 return bundle_name[b];
7153 /* Return the maximum number of instructions a cpu can issue. */
7155 static int
7156 ia64_issue_rate (void)
7158 return 6;
7161 /* Helper function - like single_set, but look inside COND_EXEC. */
7163 static rtx
7164 ia64_single_set (rtx_insn *insn)
7166 rtx x = PATTERN (insn), ret;
7167 if (GET_CODE (x) == COND_EXEC)
7168 x = COND_EXEC_CODE (x);
7169 if (GET_CODE (x) == SET)
7170 return x;
7172 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
7173 Although they are not classical single set, the second set is there just
7174 to protect it from moving past FP-relative stack accesses. */
7175 switch (recog_memoized (insn))
7177 case CODE_FOR_prologue_allocate_stack:
7178 case CODE_FOR_prologue_allocate_stack_pr:
7179 case CODE_FOR_epilogue_deallocate_stack:
7180 case CODE_FOR_epilogue_deallocate_stack_pr:
7181 ret = XVECEXP (x, 0, 0);
7182 break;
7184 default:
7185 ret = single_set_2 (insn, x);
7186 break;
7189 return ret;
7192 /* Adjust the cost of a scheduling dependency.
7193 Return the new cost of a dependency of type DEP_TYPE or INSN on DEP_INSN.
7194 COST is the current cost, DW is dependency weakness. */
7195 static int
7196 ia64_adjust_cost (rtx_insn *insn, int dep_type1, rtx_insn *dep_insn,
7197 int cost, dw_t dw)
7199 enum reg_note dep_type = (enum reg_note) dep_type1;
7200 enum attr_itanium_class dep_class;
7201 enum attr_itanium_class insn_class;
7203 insn_class = ia64_safe_itanium_class (insn);
7204 dep_class = ia64_safe_itanium_class (dep_insn);
7206 /* Treat true memory dependencies separately. Ignore apparent true
7207 dependence between store and call (call has a MEM inside a SYMBOL_REF). */
7208 if (dep_type == REG_DEP_TRUE
7209 && (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF)
7210 && (insn_class == ITANIUM_CLASS_BR || insn_class == ITANIUM_CLASS_SCALL))
7211 return 0;
7213 if (dw == MIN_DEP_WEAK)
7214 /* Store and load are likely to alias, use higher cost to avoid stall. */
7215 return PARAM_VALUE (PARAM_SCHED_MEM_TRUE_DEP_COST);
7216 else if (dw > MIN_DEP_WEAK)
7218 /* Store and load are less likely to alias. */
7219 if (mflag_sched_fp_mem_deps_zero_cost && dep_class == ITANIUM_CLASS_STF)
7220 /* Assume there will be no cache conflict for floating-point data.
7221 For integer data, L1 conflict penalty is huge (17 cycles), so we
7222 never assume it will not cause a conflict. */
7223 return 0;
7224 else
7225 return cost;
7228 if (dep_type != REG_DEP_OUTPUT)
7229 return cost;
7231 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
7232 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
7233 return 0;
7235 return cost;
7238 /* Like emit_insn_before, but skip cycle_display notes.
7239 ??? When cycle display notes are implemented, update this. */
7241 static void
7242 ia64_emit_insn_before (rtx insn, rtx_insn *before)
7244 emit_insn_before (insn, before);
7247 /* The following function marks insns who produce addresses for load
7248 and store insns. Such insns will be placed into M slots because it
7249 decrease latency time for Itanium1 (see function
7250 `ia64_produce_address_p' and the DFA descriptions). */
7252 static void
7253 ia64_dependencies_evaluation_hook (rtx_insn *head, rtx_insn *tail)
7255 rtx_insn *insn, *next, *next_tail;
7257 /* Before reload, which_alternative is not set, which means that
7258 ia64_safe_itanium_class will produce wrong results for (at least)
7259 move instructions. */
7260 if (!reload_completed)
7261 return;
7263 next_tail = NEXT_INSN (tail);
7264 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
7265 if (INSN_P (insn))
7266 insn->call = 0;
7267 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
7268 if (INSN_P (insn)
7269 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
7271 sd_iterator_def sd_it;
7272 dep_t dep;
7273 bool has_mem_op_consumer_p = false;
7275 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
7277 enum attr_itanium_class c;
7279 if (DEP_TYPE (dep) != REG_DEP_TRUE)
7280 continue;
7282 next = DEP_CON (dep);
7283 c = ia64_safe_itanium_class (next);
7284 if ((c == ITANIUM_CLASS_ST
7285 || c == ITANIUM_CLASS_STF)
7286 && ia64_st_address_bypass_p (insn, next))
7288 has_mem_op_consumer_p = true;
7289 break;
7291 else if ((c == ITANIUM_CLASS_LD
7292 || c == ITANIUM_CLASS_FLD
7293 || c == ITANIUM_CLASS_FLDP)
7294 && ia64_ld_address_bypass_p (insn, next))
7296 has_mem_op_consumer_p = true;
7297 break;
7301 insn->call = has_mem_op_consumer_p;
7305 /* We're beginning a new block. Initialize data structures as necessary. */
7307 static void
7308 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7309 int sched_verbose ATTRIBUTE_UNUSED,
7310 int max_ready ATTRIBUTE_UNUSED)
7312 if (flag_checking && !sel_sched_p () && reload_completed)
7314 for (rtx_insn *insn = NEXT_INSN (current_sched_info->prev_head);
7315 insn != current_sched_info->next_tail;
7316 insn = NEXT_INSN (insn))
7317 gcc_assert (!SCHED_GROUP_P (insn));
7319 last_scheduled_insn = NULL;
7320 init_insn_group_barriers ();
7322 current_cycle = 0;
7323 memset (mem_ops_in_group, 0, sizeof (mem_ops_in_group));
7326 /* We're beginning a scheduling pass. Check assertion. */
7328 static void
7329 ia64_sched_init_global (FILE *dump ATTRIBUTE_UNUSED,
7330 int sched_verbose ATTRIBUTE_UNUSED,
7331 int max_ready ATTRIBUTE_UNUSED)
7333 gcc_assert (pending_data_specs == 0);
7336 /* Scheduling pass is now finished. Free/reset static variable. */
7337 static void
7338 ia64_sched_finish_global (FILE *dump ATTRIBUTE_UNUSED,
7339 int sched_verbose ATTRIBUTE_UNUSED)
7341 gcc_assert (pending_data_specs == 0);
7344 /* Return TRUE if INSN is a load (either normal or speculative, but not a
7345 speculation check), FALSE otherwise. */
7346 static bool
7347 is_load_p (rtx_insn *insn)
7349 enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn);
7351 return
7352 ((insn_class == ITANIUM_CLASS_LD || insn_class == ITANIUM_CLASS_FLD)
7353 && get_attr_check_load (insn) == CHECK_LOAD_NO);
7356 /* If INSN is a memory reference, memoize it in MEM_OPS_IN_GROUP global array
7357 (taking account for 3-cycle cache reference postponing for stores: Intel
7358 Itanium 2 Reference Manual for Software Development and Optimization,
7359 6.7.3.1). */
7360 static void
7361 record_memory_reference (rtx_insn *insn)
7363 enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn);
7365 switch (insn_class) {
7366 case ITANIUM_CLASS_FLD:
7367 case ITANIUM_CLASS_LD:
7368 mem_ops_in_group[current_cycle % 4]++;
7369 break;
7370 case ITANIUM_CLASS_STF:
7371 case ITANIUM_CLASS_ST:
7372 mem_ops_in_group[(current_cycle + 3) % 4]++;
7373 break;
7374 default:;
7378 /* We are about to being issuing insns for this clock cycle.
7379 Override the default sort algorithm to better slot instructions. */
7381 static int
7382 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx_insn **ready,
7383 int *pn_ready, int clock_var,
7384 int reorder_type)
7386 int n_asms;
7387 int n_ready = *pn_ready;
7388 rtx_insn **e_ready = ready + n_ready;
7389 rtx_insn **insnp;
7391 if (sched_verbose)
7392 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
7394 if (reorder_type == 0)
7396 /* First, move all USEs, CLOBBERs and other crud out of the way. */
7397 n_asms = 0;
7398 for (insnp = ready; insnp < e_ready; insnp++)
7399 if (insnp < e_ready)
7401 rtx_insn *insn = *insnp;
7402 enum attr_type t = ia64_safe_type (insn);
7403 if (t == TYPE_UNKNOWN)
7405 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
7406 || asm_noperands (PATTERN (insn)) >= 0)
7408 rtx_insn *lowest = ready[n_asms];
7409 ready[n_asms] = insn;
7410 *insnp = lowest;
7411 n_asms++;
7413 else
7415 rtx_insn *highest = ready[n_ready - 1];
7416 ready[n_ready - 1] = insn;
7417 *insnp = highest;
7418 return 1;
7423 if (n_asms < n_ready)
7425 /* Some normal insns to process. Skip the asms. */
7426 ready += n_asms;
7427 n_ready -= n_asms;
7429 else if (n_ready > 0)
7430 return 1;
7433 if (ia64_final_schedule)
7435 int deleted = 0;
7436 int nr_need_stop = 0;
7438 for (insnp = ready; insnp < e_ready; insnp++)
7439 if (safe_group_barrier_needed (*insnp))
7440 nr_need_stop++;
7442 if (reorder_type == 1 && n_ready == nr_need_stop)
7443 return 0;
7444 if (reorder_type == 0)
7445 return 1;
7446 insnp = e_ready;
7447 /* Move down everything that needs a stop bit, preserving
7448 relative order. */
7449 while (insnp-- > ready + deleted)
7450 while (insnp >= ready + deleted)
7452 rtx_insn *insn = *insnp;
7453 if (! safe_group_barrier_needed (insn))
7454 break;
7455 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
7456 *ready = insn;
7457 deleted++;
7459 n_ready -= deleted;
7460 ready += deleted;
7463 current_cycle = clock_var;
7464 if (reload_completed && mem_ops_in_group[clock_var % 4] >= ia64_max_memory_insns)
7466 int moved = 0;
7468 insnp = e_ready;
7469 /* Move down loads/stores, preserving relative order. */
7470 while (insnp-- > ready + moved)
7471 while (insnp >= ready + moved)
7473 rtx_insn *insn = *insnp;
7474 if (! is_load_p (insn))
7475 break;
7476 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
7477 *ready = insn;
7478 moved++;
7480 n_ready -= moved;
7481 ready += moved;
7484 return 1;
7487 /* We are about to being issuing insns for this clock cycle. Override
7488 the default sort algorithm to better slot instructions. */
7490 static int
7491 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx_insn **ready,
7492 int *pn_ready, int clock_var)
7494 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
7495 pn_ready, clock_var, 0);
7498 /* Like ia64_sched_reorder, but called after issuing each insn.
7499 Override the default sort algorithm to better slot instructions. */
7501 static int
7502 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
7503 int sched_verbose ATTRIBUTE_UNUSED, rtx_insn **ready,
7504 int *pn_ready, int clock_var)
7506 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
7507 clock_var, 1);
7510 /* We are about to issue INSN. Return the number of insns left on the
7511 ready queue that can be issued this cycle. */
7513 static int
7514 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
7515 int sched_verbose ATTRIBUTE_UNUSED,
7516 rtx_insn *insn,
7517 int can_issue_more ATTRIBUTE_UNUSED)
7519 if (sched_deps_info->generate_spec_deps && !sel_sched_p ())
7520 /* Modulo scheduling does not extend h_i_d when emitting
7521 new instructions. Don't use h_i_d, if we don't have to. */
7523 if (DONE_SPEC (insn) & BEGIN_DATA)
7524 pending_data_specs++;
7525 if (CHECK_SPEC (insn) & BEGIN_DATA)
7526 pending_data_specs--;
7529 if (DEBUG_INSN_P (insn))
7530 return 1;
7532 last_scheduled_insn = insn;
7533 memcpy (prev_cycle_state, curr_state, dfa_state_size);
7534 if (reload_completed)
7536 int needed = group_barrier_needed (insn);
7538 gcc_assert (!needed);
7539 if (CALL_P (insn))
7540 init_insn_group_barriers ();
7541 stops_p [INSN_UID (insn)] = stop_before_p;
7542 stop_before_p = 0;
7544 record_memory_reference (insn);
7546 return 1;
7549 /* We are choosing insn from the ready queue. Return zero if INSN
7550 can be chosen. */
7552 static int
7553 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx_insn *insn, int ready_index)
7555 gcc_assert (insn && INSN_P (insn));
7557 /* Size of ALAT is 32. As far as we perform conservative
7558 data speculation, we keep ALAT half-empty. */
7559 if (pending_data_specs >= 16 && (TODO_SPEC (insn) & BEGIN_DATA))
7560 return ready_index == 0 ? -1 : 1;
7562 if (ready_index == 0)
7563 return 0;
7565 if ((!reload_completed
7566 || !safe_group_barrier_needed (insn))
7567 && (!mflag_sched_mem_insns_hard_limit
7568 || !is_load_p (insn)
7569 || mem_ops_in_group[current_cycle % 4] < ia64_max_memory_insns))
7570 return 0;
7572 return 1;
7575 /* The following variable value is pseudo-insn used by the DFA insn
7576 scheduler to change the DFA state when the simulated clock is
7577 increased. */
7579 static rtx_insn *dfa_pre_cycle_insn;
7581 /* Returns 1 when a meaningful insn was scheduled between the last group
7582 barrier and LAST. */
7583 static int
7584 scheduled_good_insn (rtx_insn *last)
7586 if (last && recog_memoized (last) >= 0)
7587 return 1;
7589 for ( ;
7590 last != NULL && !NOTE_INSN_BASIC_BLOCK_P (last)
7591 && !stops_p[INSN_UID (last)];
7592 last = PREV_INSN (last))
7593 /* We could hit a NOTE_INSN_DELETED here which is actually outside
7594 the ebb we're scheduling. */
7595 if (INSN_P (last) && recog_memoized (last) >= 0)
7596 return 1;
7598 return 0;
7601 /* We are about to being issuing INSN. Return nonzero if we cannot
7602 issue it on given cycle CLOCK and return zero if we should not sort
7603 the ready queue on the next clock start. */
7605 static int
7606 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx_insn *insn, int last_clock,
7607 int clock, int *sort_p)
7609 gcc_assert (insn && INSN_P (insn));
7611 if (DEBUG_INSN_P (insn))
7612 return 0;
7614 /* When a group barrier is needed for insn, last_scheduled_insn
7615 should be set. */
7616 gcc_assert (!(reload_completed && safe_group_barrier_needed (insn))
7617 || last_scheduled_insn);
7619 if ((reload_completed
7620 && (safe_group_barrier_needed (insn)
7621 || (mflag_sched_stop_bits_after_every_cycle
7622 && last_clock != clock
7623 && last_scheduled_insn
7624 && scheduled_good_insn (last_scheduled_insn))))
7625 || (last_scheduled_insn
7626 && (CALL_P (last_scheduled_insn)
7627 || unknown_for_bundling_p (last_scheduled_insn))))
7629 init_insn_group_barriers ();
7631 if (verbose && dump)
7632 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
7633 last_clock == clock ? " + cycle advance" : "");
7635 stop_before_p = 1;
7636 current_cycle = clock;
7637 mem_ops_in_group[current_cycle % 4] = 0;
7639 if (last_clock == clock)
7641 state_transition (curr_state, dfa_stop_insn);
7642 if (TARGET_EARLY_STOP_BITS)
7643 *sort_p = (last_scheduled_insn == NULL_RTX
7644 || ! CALL_P (last_scheduled_insn));
7645 else
7646 *sort_p = 0;
7647 return 1;
7650 if (last_scheduled_insn)
7652 if (unknown_for_bundling_p (last_scheduled_insn))
7653 state_reset (curr_state);
7654 else
7656 memcpy (curr_state, prev_cycle_state, dfa_state_size);
7657 state_transition (curr_state, dfa_stop_insn);
7658 state_transition (curr_state, dfa_pre_cycle_insn);
7659 state_transition (curr_state, NULL);
7663 return 0;
7666 /* Implement targetm.sched.h_i_d_extended hook.
7667 Extend internal data structures. */
7668 static void
7669 ia64_h_i_d_extended (void)
7671 if (stops_p != NULL)
7673 int new_clocks_length = get_max_uid () * 3 / 2;
7674 stops_p = (char *) xrecalloc (stops_p, new_clocks_length, clocks_length, 1);
7675 clocks_length = new_clocks_length;
7680 /* This structure describes the data used by the backend to guide scheduling.
7681 When the current scheduling point is switched, this data should be saved
7682 and restored later, if the scheduler returns to this point. */
7683 struct _ia64_sched_context
7685 state_t prev_cycle_state;
7686 rtx_insn *last_scheduled_insn;
7687 struct reg_write_state rws_sum[NUM_REGS];
7688 struct reg_write_state rws_insn[NUM_REGS];
7689 int first_instruction;
7690 int pending_data_specs;
7691 int current_cycle;
7692 char mem_ops_in_group[4];
7694 typedef struct _ia64_sched_context *ia64_sched_context_t;
7696 /* Allocates a scheduling context. */
7697 static void *
7698 ia64_alloc_sched_context (void)
7700 return xmalloc (sizeof (struct _ia64_sched_context));
7703 /* Initializes the _SC context with clean data, if CLEAN_P, and from
7704 the global context otherwise. */
7705 static void
7706 ia64_init_sched_context (void *_sc, bool clean_p)
7708 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7710 sc->prev_cycle_state = xmalloc (dfa_state_size);
7711 if (clean_p)
7713 state_reset (sc->prev_cycle_state);
7714 sc->last_scheduled_insn = NULL;
7715 memset (sc->rws_sum, 0, sizeof (rws_sum));
7716 memset (sc->rws_insn, 0, sizeof (rws_insn));
7717 sc->first_instruction = 1;
7718 sc->pending_data_specs = 0;
7719 sc->current_cycle = 0;
7720 memset (sc->mem_ops_in_group, 0, sizeof (mem_ops_in_group));
7722 else
7724 memcpy (sc->prev_cycle_state, prev_cycle_state, dfa_state_size);
7725 sc->last_scheduled_insn = last_scheduled_insn;
7726 memcpy (sc->rws_sum, rws_sum, sizeof (rws_sum));
7727 memcpy (sc->rws_insn, rws_insn, sizeof (rws_insn));
7728 sc->first_instruction = first_instruction;
7729 sc->pending_data_specs = pending_data_specs;
7730 sc->current_cycle = current_cycle;
7731 memcpy (sc->mem_ops_in_group, mem_ops_in_group, sizeof (mem_ops_in_group));
7735 /* Sets the global scheduling context to the one pointed to by _SC. */
7736 static void
7737 ia64_set_sched_context (void *_sc)
7739 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7741 gcc_assert (sc != NULL);
7743 memcpy (prev_cycle_state, sc->prev_cycle_state, dfa_state_size);
7744 last_scheduled_insn = sc->last_scheduled_insn;
7745 memcpy (rws_sum, sc->rws_sum, sizeof (rws_sum));
7746 memcpy (rws_insn, sc->rws_insn, sizeof (rws_insn));
7747 first_instruction = sc->first_instruction;
7748 pending_data_specs = sc->pending_data_specs;
7749 current_cycle = sc->current_cycle;
7750 memcpy (mem_ops_in_group, sc->mem_ops_in_group, sizeof (mem_ops_in_group));
7753 /* Clears the data in the _SC scheduling context. */
7754 static void
7755 ia64_clear_sched_context (void *_sc)
7757 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7759 free (sc->prev_cycle_state);
7760 sc->prev_cycle_state = NULL;
7763 /* Frees the _SC scheduling context. */
7764 static void
7765 ia64_free_sched_context (void *_sc)
7767 gcc_assert (_sc != NULL);
7769 free (_sc);
7772 typedef rtx (* gen_func_t) (rtx, rtx);
7774 /* Return a function that will generate a load of mode MODE_NO
7775 with speculation types TS. */
7776 static gen_func_t
7777 get_spec_load_gen_function (ds_t ts, int mode_no)
7779 static gen_func_t gen_ld_[] = {
7780 gen_movbi,
7781 gen_movqi_internal,
7782 gen_movhi_internal,
7783 gen_movsi_internal,
7784 gen_movdi_internal,
7785 gen_movsf_internal,
7786 gen_movdf_internal,
7787 gen_movxf_internal,
7788 gen_movti_internal,
7789 gen_zero_extendqidi2,
7790 gen_zero_extendhidi2,
7791 gen_zero_extendsidi2,
7794 static gen_func_t gen_ld_a[] = {
7795 gen_movbi_advanced,
7796 gen_movqi_advanced,
7797 gen_movhi_advanced,
7798 gen_movsi_advanced,
7799 gen_movdi_advanced,
7800 gen_movsf_advanced,
7801 gen_movdf_advanced,
7802 gen_movxf_advanced,
7803 gen_movti_advanced,
7804 gen_zero_extendqidi2_advanced,
7805 gen_zero_extendhidi2_advanced,
7806 gen_zero_extendsidi2_advanced,
7808 static gen_func_t gen_ld_s[] = {
7809 gen_movbi_speculative,
7810 gen_movqi_speculative,
7811 gen_movhi_speculative,
7812 gen_movsi_speculative,
7813 gen_movdi_speculative,
7814 gen_movsf_speculative,
7815 gen_movdf_speculative,
7816 gen_movxf_speculative,
7817 gen_movti_speculative,
7818 gen_zero_extendqidi2_speculative,
7819 gen_zero_extendhidi2_speculative,
7820 gen_zero_extendsidi2_speculative,
7822 static gen_func_t gen_ld_sa[] = {
7823 gen_movbi_speculative_advanced,
7824 gen_movqi_speculative_advanced,
7825 gen_movhi_speculative_advanced,
7826 gen_movsi_speculative_advanced,
7827 gen_movdi_speculative_advanced,
7828 gen_movsf_speculative_advanced,
7829 gen_movdf_speculative_advanced,
7830 gen_movxf_speculative_advanced,
7831 gen_movti_speculative_advanced,
7832 gen_zero_extendqidi2_speculative_advanced,
7833 gen_zero_extendhidi2_speculative_advanced,
7834 gen_zero_extendsidi2_speculative_advanced,
7836 static gen_func_t gen_ld_s_a[] = {
7837 gen_movbi_speculative_a,
7838 gen_movqi_speculative_a,
7839 gen_movhi_speculative_a,
7840 gen_movsi_speculative_a,
7841 gen_movdi_speculative_a,
7842 gen_movsf_speculative_a,
7843 gen_movdf_speculative_a,
7844 gen_movxf_speculative_a,
7845 gen_movti_speculative_a,
7846 gen_zero_extendqidi2_speculative_a,
7847 gen_zero_extendhidi2_speculative_a,
7848 gen_zero_extendsidi2_speculative_a,
7851 gen_func_t *gen_ld;
7853 if (ts & BEGIN_DATA)
7855 if (ts & BEGIN_CONTROL)
7856 gen_ld = gen_ld_sa;
7857 else
7858 gen_ld = gen_ld_a;
7860 else if (ts & BEGIN_CONTROL)
7862 if ((spec_info->flags & SEL_SCHED_SPEC_DONT_CHECK_CONTROL)
7863 || ia64_needs_block_p (ts))
7864 gen_ld = gen_ld_s;
7865 else
7866 gen_ld = gen_ld_s_a;
7868 else if (ts == 0)
7869 gen_ld = gen_ld_;
7870 else
7871 gcc_unreachable ();
7873 return gen_ld[mode_no];
7876 /* Constants that help mapping 'machine_mode' to int. */
7877 enum SPEC_MODES
7879 SPEC_MODE_INVALID = -1,
7880 SPEC_MODE_FIRST = 0,
7881 SPEC_MODE_FOR_EXTEND_FIRST = 1,
7882 SPEC_MODE_FOR_EXTEND_LAST = 3,
7883 SPEC_MODE_LAST = 8
7886 enum
7888 /* Offset to reach ZERO_EXTEND patterns. */
7889 SPEC_GEN_EXTEND_OFFSET = SPEC_MODE_LAST - SPEC_MODE_FOR_EXTEND_FIRST + 1
7892 /* Return index of the MODE. */
7893 static int
7894 ia64_mode_to_int (machine_mode mode)
7896 switch (mode)
7898 case E_BImode: return 0; /* SPEC_MODE_FIRST */
7899 case E_QImode: return 1; /* SPEC_MODE_FOR_EXTEND_FIRST */
7900 case E_HImode: return 2;
7901 case E_SImode: return 3; /* SPEC_MODE_FOR_EXTEND_LAST */
7902 case E_DImode: return 4;
7903 case E_SFmode: return 5;
7904 case E_DFmode: return 6;
7905 case E_XFmode: return 7;
7906 case E_TImode:
7907 /* ??? This mode needs testing. Bypasses for ldfp8 instruction are not
7908 mentioned in itanium[12].md. Predicate fp_register_operand also
7909 needs to be defined. Bottom line: better disable for now. */
7910 return SPEC_MODE_INVALID;
7911 default: return SPEC_MODE_INVALID;
7915 /* Provide information about speculation capabilities. */
7916 static void
7917 ia64_set_sched_flags (spec_info_t spec_info)
7919 unsigned int *flags = &(current_sched_info->flags);
7921 if (*flags & SCHED_RGN
7922 || *flags & SCHED_EBB
7923 || *flags & SEL_SCHED)
7925 int mask = 0;
7927 if ((mflag_sched_br_data_spec && !reload_completed && optimize > 0)
7928 || (mflag_sched_ar_data_spec && reload_completed))
7930 mask |= BEGIN_DATA;
7932 if (!sel_sched_p ()
7933 && ((mflag_sched_br_in_data_spec && !reload_completed)
7934 || (mflag_sched_ar_in_data_spec && reload_completed)))
7935 mask |= BE_IN_DATA;
7938 if (mflag_sched_control_spec
7939 && (!sel_sched_p ()
7940 || reload_completed))
7942 mask |= BEGIN_CONTROL;
7944 if (!sel_sched_p () && mflag_sched_in_control_spec)
7945 mask |= BE_IN_CONTROL;
7948 spec_info->mask = mask;
7950 if (mask)
7952 *flags |= USE_DEPS_LIST | DO_SPECULATION;
7954 if (mask & BE_IN_SPEC)
7955 *flags |= NEW_BBS;
7957 spec_info->flags = 0;
7959 if ((mask & CONTROL_SPEC)
7960 && sel_sched_p () && mflag_sel_sched_dont_check_control_spec)
7961 spec_info->flags |= SEL_SCHED_SPEC_DONT_CHECK_CONTROL;
7963 if (sched_verbose >= 1)
7964 spec_info->dump = sched_dump;
7965 else
7966 spec_info->dump = 0;
7968 if (mflag_sched_count_spec_in_critical_path)
7969 spec_info->flags |= COUNT_SPEC_IN_CRITICAL_PATH;
7972 else
7973 spec_info->mask = 0;
7976 /* If INSN is an appropriate load return its mode.
7977 Return -1 otherwise. */
7978 static int
7979 get_mode_no_for_insn (rtx_insn *insn)
7981 rtx reg, mem, mode_rtx;
7982 int mode_no;
7983 bool extend_p;
7985 extract_insn_cached (insn);
7987 /* We use WHICH_ALTERNATIVE only after reload. This will
7988 guarantee that reload won't touch a speculative insn. */
7990 if (recog_data.n_operands != 2)
7991 return -1;
7993 reg = recog_data.operand[0];
7994 mem = recog_data.operand[1];
7996 /* We should use MEM's mode since REG's mode in presence of
7997 ZERO_EXTEND will always be DImode. */
7998 if (get_attr_speculable1 (insn) == SPECULABLE1_YES)
7999 /* Process non-speculative ld. */
8001 if (!reload_completed)
8003 /* Do not speculate into regs like ar.lc. */
8004 if (!REG_P (reg) || AR_REGNO_P (REGNO (reg)))
8005 return -1;
8007 if (!MEM_P (mem))
8008 return -1;
8011 rtx mem_reg = XEXP (mem, 0);
8013 if (!REG_P (mem_reg))
8014 return -1;
8017 mode_rtx = mem;
8019 else if (get_attr_speculable2 (insn) == SPECULABLE2_YES)
8021 gcc_assert (REG_P (reg) && MEM_P (mem));
8022 mode_rtx = mem;
8024 else
8025 return -1;
8027 else if (get_attr_data_speculative (insn) == DATA_SPECULATIVE_YES
8028 || get_attr_control_speculative (insn) == CONTROL_SPECULATIVE_YES
8029 || get_attr_check_load (insn) == CHECK_LOAD_YES)
8030 /* Process speculative ld or ld.c. */
8032 gcc_assert (REG_P (reg) && MEM_P (mem));
8033 mode_rtx = mem;
8035 else
8037 enum attr_itanium_class attr_class = get_attr_itanium_class (insn);
8039 if (attr_class == ITANIUM_CLASS_CHK_A
8040 || attr_class == ITANIUM_CLASS_CHK_S_I
8041 || attr_class == ITANIUM_CLASS_CHK_S_F)
8042 /* Process chk. */
8043 mode_rtx = reg;
8044 else
8045 return -1;
8048 mode_no = ia64_mode_to_int (GET_MODE (mode_rtx));
8050 if (mode_no == SPEC_MODE_INVALID)
8051 return -1;
8053 extend_p = (GET_MODE (reg) != GET_MODE (mode_rtx));
8055 if (extend_p)
8057 if (!(SPEC_MODE_FOR_EXTEND_FIRST <= mode_no
8058 && mode_no <= SPEC_MODE_FOR_EXTEND_LAST))
8059 return -1;
8061 mode_no += SPEC_GEN_EXTEND_OFFSET;
8064 return mode_no;
8067 /* If X is an unspec part of a speculative load, return its code.
8068 Return -1 otherwise. */
8069 static int
8070 get_spec_unspec_code (const_rtx x)
8072 if (GET_CODE (x) != UNSPEC)
8073 return -1;
8076 int code;
8078 code = XINT (x, 1);
8080 switch (code)
8082 case UNSPEC_LDA:
8083 case UNSPEC_LDS:
8084 case UNSPEC_LDS_A:
8085 case UNSPEC_LDSA:
8086 return code;
8088 default:
8089 return -1;
8094 /* Implement skip_rtx_p hook. */
8095 static bool
8096 ia64_skip_rtx_p (const_rtx x)
8098 return get_spec_unspec_code (x) != -1;
8101 /* If INSN is a speculative load, return its UNSPEC code.
8102 Return -1 otherwise. */
8103 static int
8104 get_insn_spec_code (const_rtx insn)
8106 rtx pat, reg, mem;
8108 pat = PATTERN (insn);
8110 if (GET_CODE (pat) == COND_EXEC)
8111 pat = COND_EXEC_CODE (pat);
8113 if (GET_CODE (pat) != SET)
8114 return -1;
8116 reg = SET_DEST (pat);
8117 if (!REG_P (reg))
8118 return -1;
8120 mem = SET_SRC (pat);
8121 if (GET_CODE (mem) == ZERO_EXTEND)
8122 mem = XEXP (mem, 0);
8124 return get_spec_unspec_code (mem);
8127 /* If INSN is a speculative load, return a ds with the speculation types.
8128 Otherwise [if INSN is a normal instruction] return 0. */
8129 static ds_t
8130 ia64_get_insn_spec_ds (rtx_insn *insn)
8132 int code = get_insn_spec_code (insn);
8134 switch (code)
8136 case UNSPEC_LDA:
8137 return BEGIN_DATA;
8139 case UNSPEC_LDS:
8140 case UNSPEC_LDS_A:
8141 return BEGIN_CONTROL;
8143 case UNSPEC_LDSA:
8144 return BEGIN_DATA | BEGIN_CONTROL;
8146 default:
8147 return 0;
8151 /* If INSN is a speculative load return a ds with the speculation types that
8152 will be checked.
8153 Otherwise [if INSN is a normal instruction] return 0. */
8154 static ds_t
8155 ia64_get_insn_checked_ds (rtx_insn *insn)
8157 int code = get_insn_spec_code (insn);
8159 switch (code)
8161 case UNSPEC_LDA:
8162 return BEGIN_DATA | BEGIN_CONTROL;
8164 case UNSPEC_LDS:
8165 return BEGIN_CONTROL;
8167 case UNSPEC_LDS_A:
8168 case UNSPEC_LDSA:
8169 return BEGIN_DATA | BEGIN_CONTROL;
8171 default:
8172 return 0;
8176 /* If GEN_P is true, calculate the index of needed speculation check and return
8177 speculative pattern for INSN with speculative mode TS, machine mode
8178 MODE_NO and with ZERO_EXTEND (if EXTEND_P is true).
8179 If GEN_P is false, just calculate the index of needed speculation check. */
8180 static rtx
8181 ia64_gen_spec_load (rtx insn, ds_t ts, int mode_no)
8183 rtx pat, new_pat;
8184 gen_func_t gen_load;
8186 gen_load = get_spec_load_gen_function (ts, mode_no);
8188 new_pat = gen_load (copy_rtx (recog_data.operand[0]),
8189 copy_rtx (recog_data.operand[1]));
8191 pat = PATTERN (insn);
8192 if (GET_CODE (pat) == COND_EXEC)
8193 new_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
8194 new_pat);
8196 return new_pat;
8199 static bool
8200 insn_can_be_in_speculative_p (rtx insn ATTRIBUTE_UNUSED,
8201 ds_t ds ATTRIBUTE_UNUSED)
8203 return false;
8206 /* Implement targetm.sched.speculate_insn hook.
8207 Check if the INSN can be TS speculative.
8208 If 'no' - return -1.
8209 If 'yes' - generate speculative pattern in the NEW_PAT and return 1.
8210 If current pattern of the INSN already provides TS speculation,
8211 return 0. */
8212 static int
8213 ia64_speculate_insn (rtx_insn *insn, ds_t ts, rtx *new_pat)
8215 int mode_no;
8216 int res;
8218 gcc_assert (!(ts & ~SPECULATIVE));
8220 if (ia64_spec_check_p (insn))
8221 return -1;
8223 if ((ts & BE_IN_SPEC)
8224 && !insn_can_be_in_speculative_p (insn, ts))
8225 return -1;
8227 mode_no = get_mode_no_for_insn (insn);
8229 if (mode_no != SPEC_MODE_INVALID)
8231 if (ia64_get_insn_spec_ds (insn) == ds_get_speculation_types (ts))
8232 res = 0;
8233 else
8235 res = 1;
8236 *new_pat = ia64_gen_spec_load (insn, ts, mode_no);
8239 else
8240 res = -1;
8242 return res;
8245 /* Return a function that will generate a check for speculation TS with mode
8246 MODE_NO.
8247 If simple check is needed, pass true for SIMPLE_CHECK_P.
8248 If clearing check is needed, pass true for CLEARING_CHECK_P. */
8249 static gen_func_t
8250 get_spec_check_gen_function (ds_t ts, int mode_no,
8251 bool simple_check_p, bool clearing_check_p)
8253 static gen_func_t gen_ld_c_clr[] = {
8254 gen_movbi_clr,
8255 gen_movqi_clr,
8256 gen_movhi_clr,
8257 gen_movsi_clr,
8258 gen_movdi_clr,
8259 gen_movsf_clr,
8260 gen_movdf_clr,
8261 gen_movxf_clr,
8262 gen_movti_clr,
8263 gen_zero_extendqidi2_clr,
8264 gen_zero_extendhidi2_clr,
8265 gen_zero_extendsidi2_clr,
8267 static gen_func_t gen_ld_c_nc[] = {
8268 gen_movbi_nc,
8269 gen_movqi_nc,
8270 gen_movhi_nc,
8271 gen_movsi_nc,
8272 gen_movdi_nc,
8273 gen_movsf_nc,
8274 gen_movdf_nc,
8275 gen_movxf_nc,
8276 gen_movti_nc,
8277 gen_zero_extendqidi2_nc,
8278 gen_zero_extendhidi2_nc,
8279 gen_zero_extendsidi2_nc,
8281 static gen_func_t gen_chk_a_clr[] = {
8282 gen_advanced_load_check_clr_bi,
8283 gen_advanced_load_check_clr_qi,
8284 gen_advanced_load_check_clr_hi,
8285 gen_advanced_load_check_clr_si,
8286 gen_advanced_load_check_clr_di,
8287 gen_advanced_load_check_clr_sf,
8288 gen_advanced_load_check_clr_df,
8289 gen_advanced_load_check_clr_xf,
8290 gen_advanced_load_check_clr_ti,
8291 gen_advanced_load_check_clr_di,
8292 gen_advanced_load_check_clr_di,
8293 gen_advanced_load_check_clr_di,
8295 static gen_func_t gen_chk_a_nc[] = {
8296 gen_advanced_load_check_nc_bi,
8297 gen_advanced_load_check_nc_qi,
8298 gen_advanced_load_check_nc_hi,
8299 gen_advanced_load_check_nc_si,
8300 gen_advanced_load_check_nc_di,
8301 gen_advanced_load_check_nc_sf,
8302 gen_advanced_load_check_nc_df,
8303 gen_advanced_load_check_nc_xf,
8304 gen_advanced_load_check_nc_ti,
8305 gen_advanced_load_check_nc_di,
8306 gen_advanced_load_check_nc_di,
8307 gen_advanced_load_check_nc_di,
8309 static gen_func_t gen_chk_s[] = {
8310 gen_speculation_check_bi,
8311 gen_speculation_check_qi,
8312 gen_speculation_check_hi,
8313 gen_speculation_check_si,
8314 gen_speculation_check_di,
8315 gen_speculation_check_sf,
8316 gen_speculation_check_df,
8317 gen_speculation_check_xf,
8318 gen_speculation_check_ti,
8319 gen_speculation_check_di,
8320 gen_speculation_check_di,
8321 gen_speculation_check_di,
8324 gen_func_t *gen_check;
8326 if (ts & BEGIN_DATA)
8328 /* We don't need recovery because even if this is ld.sa
8329 ALAT entry will be allocated only if NAT bit is set to zero.
8330 So it is enough to use ld.c here. */
8332 if (simple_check_p)
8334 gcc_assert (mflag_sched_spec_ldc);
8336 if (clearing_check_p)
8337 gen_check = gen_ld_c_clr;
8338 else
8339 gen_check = gen_ld_c_nc;
8341 else
8343 if (clearing_check_p)
8344 gen_check = gen_chk_a_clr;
8345 else
8346 gen_check = gen_chk_a_nc;
8349 else if (ts & BEGIN_CONTROL)
8351 if (simple_check_p)
8352 /* We might want to use ld.sa -> ld.c instead of
8353 ld.s -> chk.s. */
8355 gcc_assert (!ia64_needs_block_p (ts));
8357 if (clearing_check_p)
8358 gen_check = gen_ld_c_clr;
8359 else
8360 gen_check = gen_ld_c_nc;
8362 else
8364 gen_check = gen_chk_s;
8367 else
8368 gcc_unreachable ();
8370 gcc_assert (mode_no >= 0);
8371 return gen_check[mode_no];
8374 /* Return nonzero, if INSN needs branchy recovery check. */
8375 static bool
8376 ia64_needs_block_p (ds_t ts)
8378 if (ts & BEGIN_DATA)
8379 return !mflag_sched_spec_ldc;
8381 gcc_assert ((ts & BEGIN_CONTROL) != 0);
8383 return !(mflag_sched_spec_control_ldc && mflag_sched_spec_ldc);
8386 /* Generate (or regenerate) a recovery check for INSN. */
8387 static rtx
8388 ia64_gen_spec_check (rtx_insn *insn, rtx_insn *label, ds_t ds)
8390 rtx op1, pat, check_pat;
8391 gen_func_t gen_check;
8392 int mode_no;
8394 mode_no = get_mode_no_for_insn (insn);
8395 gcc_assert (mode_no >= 0);
8397 if (label)
8398 op1 = label;
8399 else
8401 gcc_assert (!ia64_needs_block_p (ds));
8402 op1 = copy_rtx (recog_data.operand[1]);
8405 gen_check = get_spec_check_gen_function (ds, mode_no, label == NULL_RTX,
8406 true);
8408 check_pat = gen_check (copy_rtx (recog_data.operand[0]), op1);
8410 pat = PATTERN (insn);
8411 if (GET_CODE (pat) == COND_EXEC)
8412 check_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
8413 check_pat);
8415 return check_pat;
8418 /* Return nonzero, if X is branchy recovery check. */
8419 static int
8420 ia64_spec_check_p (rtx x)
8422 x = PATTERN (x);
8423 if (GET_CODE (x) == COND_EXEC)
8424 x = COND_EXEC_CODE (x);
8425 if (GET_CODE (x) == SET)
8426 return ia64_spec_check_src_p (SET_SRC (x));
8427 return 0;
8430 /* Return nonzero, if SRC belongs to recovery check. */
8431 static int
8432 ia64_spec_check_src_p (rtx src)
8434 if (GET_CODE (src) == IF_THEN_ELSE)
8436 rtx t;
8438 t = XEXP (src, 0);
8439 if (GET_CODE (t) == NE)
8441 t = XEXP (t, 0);
8443 if (GET_CODE (t) == UNSPEC)
8445 int code;
8447 code = XINT (t, 1);
8449 if (code == UNSPEC_LDCCLR
8450 || code == UNSPEC_LDCNC
8451 || code == UNSPEC_CHKACLR
8452 || code == UNSPEC_CHKANC
8453 || code == UNSPEC_CHKS)
8455 gcc_assert (code != 0);
8456 return code;
8461 return 0;
8465 /* The following page contains abstract data `bundle states' which are
8466 used for bundling insns (inserting nops and template generation). */
8468 /* The following describes state of insn bundling. */
8470 struct bundle_state
8472 /* Unique bundle state number to identify them in the debugging
8473 output */
8474 int unique_num;
8475 rtx_insn *insn; /* corresponding insn, NULL for the 1st and the last state */
8476 /* number nops before and after the insn */
8477 short before_nops_num, after_nops_num;
8478 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
8479 insn */
8480 int cost; /* cost of the state in cycles */
8481 int accumulated_insns_num; /* number of all previous insns including
8482 nops. L is considered as 2 insns */
8483 int branch_deviation; /* deviation of previous branches from 3rd slots */
8484 int middle_bundle_stops; /* number of stop bits in the middle of bundles */
8485 struct bundle_state *next; /* next state with the same insn_num */
8486 struct bundle_state *originator; /* originator (previous insn state) */
8487 /* All bundle states are in the following chain. */
8488 struct bundle_state *allocated_states_chain;
8489 /* The DFA State after issuing the insn and the nops. */
8490 state_t dfa_state;
8493 /* The following is map insn number to the corresponding bundle state. */
8495 static struct bundle_state **index_to_bundle_states;
8497 /* The unique number of next bundle state. */
8499 static int bundle_states_num;
8501 /* All allocated bundle states are in the following chain. */
8503 static struct bundle_state *allocated_bundle_states_chain;
8505 /* All allocated but not used bundle states are in the following
8506 chain. */
8508 static struct bundle_state *free_bundle_state_chain;
8511 /* The following function returns a free bundle state. */
8513 static struct bundle_state *
8514 get_free_bundle_state (void)
8516 struct bundle_state *result;
8518 if (free_bundle_state_chain != NULL)
8520 result = free_bundle_state_chain;
8521 free_bundle_state_chain = result->next;
8523 else
8525 result = XNEW (struct bundle_state);
8526 result->dfa_state = xmalloc (dfa_state_size);
8527 result->allocated_states_chain = allocated_bundle_states_chain;
8528 allocated_bundle_states_chain = result;
8530 result->unique_num = bundle_states_num++;
8531 return result;
8535 /* The following function frees given bundle state. */
8537 static void
8538 free_bundle_state (struct bundle_state *state)
8540 state->next = free_bundle_state_chain;
8541 free_bundle_state_chain = state;
8544 /* Start work with abstract data `bundle states'. */
8546 static void
8547 initiate_bundle_states (void)
8549 bundle_states_num = 0;
8550 free_bundle_state_chain = NULL;
8551 allocated_bundle_states_chain = NULL;
8554 /* Finish work with abstract data `bundle states'. */
8556 static void
8557 finish_bundle_states (void)
8559 struct bundle_state *curr_state, *next_state;
8561 for (curr_state = allocated_bundle_states_chain;
8562 curr_state != NULL;
8563 curr_state = next_state)
8565 next_state = curr_state->allocated_states_chain;
8566 free (curr_state->dfa_state);
8567 free (curr_state);
8571 /* Hashtable helpers. */
8573 struct bundle_state_hasher : nofree_ptr_hash <bundle_state>
8575 static inline hashval_t hash (const bundle_state *);
8576 static inline bool equal (const bundle_state *, const bundle_state *);
8579 /* The function returns hash of BUNDLE_STATE. */
8581 inline hashval_t
8582 bundle_state_hasher::hash (const bundle_state *state)
8584 unsigned result, i;
8586 for (result = i = 0; i < dfa_state_size; i++)
8587 result += (((unsigned char *) state->dfa_state) [i]
8588 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
8589 return result + state->insn_num;
8592 /* The function returns nonzero if the bundle state keys are equal. */
8594 inline bool
8595 bundle_state_hasher::equal (const bundle_state *state1,
8596 const bundle_state *state2)
8598 return (state1->insn_num == state2->insn_num
8599 && memcmp (state1->dfa_state, state2->dfa_state,
8600 dfa_state_size) == 0);
8603 /* Hash table of the bundle states. The key is dfa_state and insn_num
8604 of the bundle states. */
8606 static hash_table<bundle_state_hasher> *bundle_state_table;
8608 /* The function inserts the BUNDLE_STATE into the hash table. The
8609 function returns nonzero if the bundle has been inserted into the
8610 table. The table contains the best bundle state with given key. */
8612 static int
8613 insert_bundle_state (struct bundle_state *bundle_state)
8615 struct bundle_state **entry_ptr;
8617 entry_ptr = bundle_state_table->find_slot (bundle_state, INSERT);
8618 if (*entry_ptr == NULL)
8620 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
8621 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
8622 *entry_ptr = bundle_state;
8623 return TRUE;
8625 else if (bundle_state->cost < (*entry_ptr)->cost
8626 || (bundle_state->cost == (*entry_ptr)->cost
8627 && ((*entry_ptr)->accumulated_insns_num
8628 > bundle_state->accumulated_insns_num
8629 || ((*entry_ptr)->accumulated_insns_num
8630 == bundle_state->accumulated_insns_num
8631 && ((*entry_ptr)->branch_deviation
8632 > bundle_state->branch_deviation
8633 || ((*entry_ptr)->branch_deviation
8634 == bundle_state->branch_deviation
8635 && (*entry_ptr)->middle_bundle_stops
8636 > bundle_state->middle_bundle_stops))))))
8639 struct bundle_state temp;
8641 temp = **entry_ptr;
8642 **entry_ptr = *bundle_state;
8643 (*entry_ptr)->next = temp.next;
8644 *bundle_state = temp;
8646 return FALSE;
8649 /* Start work with the hash table. */
8651 static void
8652 initiate_bundle_state_table (void)
8654 bundle_state_table = new hash_table<bundle_state_hasher> (50);
8657 /* Finish work with the hash table. */
8659 static void
8660 finish_bundle_state_table (void)
8662 delete bundle_state_table;
8663 bundle_state_table = NULL;
8668 /* The following variable is a insn `nop' used to check bundle states
8669 with different number of inserted nops. */
8671 static rtx_insn *ia64_nop;
8673 /* The following function tries to issue NOPS_NUM nops for the current
8674 state without advancing processor cycle. If it failed, the
8675 function returns FALSE and frees the current state. */
8677 static int
8678 try_issue_nops (struct bundle_state *curr_state, int nops_num)
8680 int i;
8682 for (i = 0; i < nops_num; i++)
8683 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
8685 free_bundle_state (curr_state);
8686 return FALSE;
8688 return TRUE;
8691 /* The following function tries to issue INSN for the current
8692 state without advancing processor cycle. If it failed, the
8693 function returns FALSE and frees the current state. */
8695 static int
8696 try_issue_insn (struct bundle_state *curr_state, rtx insn)
8698 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
8700 free_bundle_state (curr_state);
8701 return FALSE;
8703 return TRUE;
8706 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
8707 starting with ORIGINATOR without advancing processor cycle. If
8708 TRY_BUNDLE_END_P is TRUE, the function also/only (if
8709 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
8710 If it was successful, the function creates new bundle state and
8711 insert into the hash table and into `index_to_bundle_states'. */
8713 static void
8714 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
8715 rtx_insn *insn, int try_bundle_end_p,
8716 int only_bundle_end_p)
8718 struct bundle_state *curr_state;
8720 curr_state = get_free_bundle_state ();
8721 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
8722 curr_state->insn = insn;
8723 curr_state->insn_num = originator->insn_num + 1;
8724 curr_state->cost = originator->cost;
8725 curr_state->originator = originator;
8726 curr_state->before_nops_num = before_nops_num;
8727 curr_state->after_nops_num = 0;
8728 curr_state->accumulated_insns_num
8729 = originator->accumulated_insns_num + before_nops_num;
8730 curr_state->branch_deviation = originator->branch_deviation;
8731 curr_state->middle_bundle_stops = originator->middle_bundle_stops;
8732 gcc_assert (insn);
8733 if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
8735 gcc_assert (GET_MODE (insn) != TImode);
8736 if (!try_issue_nops (curr_state, before_nops_num))
8737 return;
8738 if (!try_issue_insn (curr_state, insn))
8739 return;
8740 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
8741 if (curr_state->accumulated_insns_num % 3 != 0)
8742 curr_state->middle_bundle_stops++;
8743 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
8744 && curr_state->accumulated_insns_num % 3 != 0)
8746 free_bundle_state (curr_state);
8747 return;
8750 else if (GET_MODE (insn) != TImode)
8752 if (!try_issue_nops (curr_state, before_nops_num))
8753 return;
8754 if (!try_issue_insn (curr_state, insn))
8755 return;
8756 curr_state->accumulated_insns_num++;
8757 gcc_assert (!unknown_for_bundling_p (insn));
8759 if (ia64_safe_type (insn) == TYPE_L)
8760 curr_state->accumulated_insns_num++;
8762 else
8764 /* If this is an insn that must be first in a group, then don't allow
8765 nops to be emitted before it. Currently, alloc is the only such
8766 supported instruction. */
8767 /* ??? The bundling automatons should handle this for us, but they do
8768 not yet have support for the first_insn attribute. */
8769 if (before_nops_num > 0 && get_attr_first_insn (insn) == FIRST_INSN_YES)
8771 free_bundle_state (curr_state);
8772 return;
8775 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
8776 state_transition (curr_state->dfa_state, NULL);
8777 curr_state->cost++;
8778 if (!try_issue_nops (curr_state, before_nops_num))
8779 return;
8780 if (!try_issue_insn (curr_state, insn))
8781 return;
8782 curr_state->accumulated_insns_num++;
8783 if (unknown_for_bundling_p (insn))
8785 /* Finish bundle containing asm insn. */
8786 curr_state->after_nops_num
8787 = 3 - curr_state->accumulated_insns_num % 3;
8788 curr_state->accumulated_insns_num
8789 += 3 - curr_state->accumulated_insns_num % 3;
8791 else if (ia64_safe_type (insn) == TYPE_L)
8792 curr_state->accumulated_insns_num++;
8794 if (ia64_safe_type (insn) == TYPE_B)
8795 curr_state->branch_deviation
8796 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
8797 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
8799 if (!only_bundle_end_p && insert_bundle_state (curr_state))
8801 state_t dfa_state;
8802 struct bundle_state *curr_state1;
8803 struct bundle_state *allocated_states_chain;
8805 curr_state1 = get_free_bundle_state ();
8806 dfa_state = curr_state1->dfa_state;
8807 allocated_states_chain = curr_state1->allocated_states_chain;
8808 *curr_state1 = *curr_state;
8809 curr_state1->dfa_state = dfa_state;
8810 curr_state1->allocated_states_chain = allocated_states_chain;
8811 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
8812 dfa_state_size);
8813 curr_state = curr_state1;
8815 if (!try_issue_nops (curr_state,
8816 3 - curr_state->accumulated_insns_num % 3))
8817 return;
8818 curr_state->after_nops_num
8819 = 3 - curr_state->accumulated_insns_num % 3;
8820 curr_state->accumulated_insns_num
8821 += 3 - curr_state->accumulated_insns_num % 3;
8823 if (!insert_bundle_state (curr_state))
8824 free_bundle_state (curr_state);
8825 return;
8828 /* The following function returns position in the two window bundle
8829 for given STATE. */
8831 static int
8832 get_max_pos (state_t state)
8834 if (cpu_unit_reservation_p (state, pos_6))
8835 return 6;
8836 else if (cpu_unit_reservation_p (state, pos_5))
8837 return 5;
8838 else if (cpu_unit_reservation_p (state, pos_4))
8839 return 4;
8840 else if (cpu_unit_reservation_p (state, pos_3))
8841 return 3;
8842 else if (cpu_unit_reservation_p (state, pos_2))
8843 return 2;
8844 else if (cpu_unit_reservation_p (state, pos_1))
8845 return 1;
8846 else
8847 return 0;
8850 /* The function returns code of a possible template for given position
8851 and state. The function should be called only with 2 values of
8852 position equal to 3 or 6. We avoid generating F NOPs by putting
8853 templates containing F insns at the end of the template search
8854 because undocumented anomaly in McKinley derived cores which can
8855 cause stalls if an F-unit insn (including a NOP) is issued within a
8856 six-cycle window after reading certain application registers (such
8857 as ar.bsp). Furthermore, power-considerations also argue against
8858 the use of F-unit instructions unless they're really needed. */
8860 static int
8861 get_template (state_t state, int pos)
8863 switch (pos)
8865 case 3:
8866 if (cpu_unit_reservation_p (state, _0mmi_))
8867 return 1;
8868 else if (cpu_unit_reservation_p (state, _0mii_))
8869 return 0;
8870 else if (cpu_unit_reservation_p (state, _0mmb_))
8871 return 7;
8872 else if (cpu_unit_reservation_p (state, _0mib_))
8873 return 6;
8874 else if (cpu_unit_reservation_p (state, _0mbb_))
8875 return 5;
8876 else if (cpu_unit_reservation_p (state, _0bbb_))
8877 return 4;
8878 else if (cpu_unit_reservation_p (state, _0mmf_))
8879 return 3;
8880 else if (cpu_unit_reservation_p (state, _0mfi_))
8881 return 2;
8882 else if (cpu_unit_reservation_p (state, _0mfb_))
8883 return 8;
8884 else if (cpu_unit_reservation_p (state, _0mlx_))
8885 return 9;
8886 else
8887 gcc_unreachable ();
8888 case 6:
8889 if (cpu_unit_reservation_p (state, _1mmi_))
8890 return 1;
8891 else if (cpu_unit_reservation_p (state, _1mii_))
8892 return 0;
8893 else if (cpu_unit_reservation_p (state, _1mmb_))
8894 return 7;
8895 else if (cpu_unit_reservation_p (state, _1mib_))
8896 return 6;
8897 else if (cpu_unit_reservation_p (state, _1mbb_))
8898 return 5;
8899 else if (cpu_unit_reservation_p (state, _1bbb_))
8900 return 4;
8901 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
8902 return 3;
8903 else if (cpu_unit_reservation_p (state, _1mfi_))
8904 return 2;
8905 else if (cpu_unit_reservation_p (state, _1mfb_))
8906 return 8;
8907 else if (cpu_unit_reservation_p (state, _1mlx_))
8908 return 9;
8909 else
8910 gcc_unreachable ();
8911 default:
8912 gcc_unreachable ();
8916 /* True when INSN is important for bundling. */
8918 static bool
8919 important_for_bundling_p (rtx_insn *insn)
8921 return (INSN_P (insn)
8922 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
8923 && GET_CODE (PATTERN (insn)) != USE
8924 && GET_CODE (PATTERN (insn)) != CLOBBER);
8927 /* The following function returns an insn important for insn bundling
8928 followed by INSN and before TAIL. */
8930 static rtx_insn *
8931 get_next_important_insn (rtx_insn *insn, rtx_insn *tail)
8933 for (; insn && insn != tail; insn = NEXT_INSN (insn))
8934 if (important_for_bundling_p (insn))
8935 return insn;
8936 return NULL;
8939 /* True when INSN is unknown, but important, for bundling. */
8941 static bool
8942 unknown_for_bundling_p (rtx_insn *insn)
8944 return (INSN_P (insn)
8945 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_UNKNOWN
8946 && GET_CODE (PATTERN (insn)) != USE
8947 && GET_CODE (PATTERN (insn)) != CLOBBER);
8950 /* Add a bundle selector TEMPLATE0 before INSN. */
8952 static void
8953 ia64_add_bundle_selector_before (int template0, rtx_insn *insn)
8955 rtx b = gen_bundle_selector (GEN_INT (template0));
8957 ia64_emit_insn_before (b, insn);
8958 #if NR_BUNDLES == 10
8959 if ((template0 == 4 || template0 == 5)
8960 && ia64_except_unwind_info (&global_options) == UI_TARGET)
8962 int i;
8963 rtx note = NULL_RTX;
8965 /* In .mbb and .bbb bundles, check if CALL_INSN isn't in the
8966 first or second slot. If it is and has REG_EH_NOTE set, copy it
8967 to following nops, as br.call sets rp to the address of following
8968 bundle and therefore an EH region end must be on a bundle
8969 boundary. */
8970 insn = PREV_INSN (insn);
8971 for (i = 0; i < 3; i++)
8974 insn = next_active_insn (insn);
8975 while (NONJUMP_INSN_P (insn)
8976 && get_attr_empty (insn) == EMPTY_YES);
8977 if (CALL_P (insn))
8978 note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
8979 else if (note)
8981 int code;
8983 gcc_assert ((code = recog_memoized (insn)) == CODE_FOR_nop
8984 || code == CODE_FOR_nop_b);
8985 if (find_reg_note (insn, REG_EH_REGION, NULL_RTX))
8986 note = NULL_RTX;
8987 else
8988 add_reg_note (insn, REG_EH_REGION, XEXP (note, 0));
8992 #endif
8995 /* The following function does insn bundling. Bundling means
8996 inserting templates and nop insns to fit insn groups into permitted
8997 templates. Instruction scheduling uses NDFA (non-deterministic
8998 finite automata) encoding informations about the templates and the
8999 inserted nops. Nondeterminism of the automata permits follows
9000 all possible insn sequences very fast.
9002 Unfortunately it is not possible to get information about inserting
9003 nop insns and used templates from the automata states. The
9004 automata only says that we can issue an insn possibly inserting
9005 some nops before it and using some template. Therefore insn
9006 bundling in this function is implemented by using DFA
9007 (deterministic finite automata). We follow all possible insn
9008 sequences by inserting 0-2 nops (that is what the NDFA describe for
9009 insn scheduling) before/after each insn being bundled. We know the
9010 start of simulated processor cycle from insn scheduling (insn
9011 starting a new cycle has TImode).
9013 Simple implementation of insn bundling would create enormous
9014 number of possible insn sequences satisfying information about new
9015 cycle ticks taken from the insn scheduling. To make the algorithm
9016 practical we use dynamic programming. Each decision (about
9017 inserting nops and implicitly about previous decisions) is described
9018 by structure bundle_state (see above). If we generate the same
9019 bundle state (key is automaton state after issuing the insns and
9020 nops for it), we reuse already generated one. As consequence we
9021 reject some decisions which cannot improve the solution and
9022 reduce memory for the algorithm.
9024 When we reach the end of EBB (extended basic block), we choose the
9025 best sequence and then, moving back in EBB, insert templates for
9026 the best alternative. The templates are taken from querying
9027 automaton state for each insn in chosen bundle states.
9029 So the algorithm makes two (forward and backward) passes through
9030 EBB. */
9032 static void
9033 bundling (FILE *dump, int verbose, rtx_insn *prev_head_insn, rtx_insn *tail)
9035 struct bundle_state *curr_state, *next_state, *best_state;
9036 rtx_insn *insn, *next_insn;
9037 int insn_num;
9038 int i, bundle_end_p, only_bundle_end_p, asm_p;
9039 int pos = 0, max_pos, template0, template1;
9040 rtx_insn *b;
9041 enum attr_type type;
9043 insn_num = 0;
9044 /* Count insns in the EBB. */
9045 for (insn = NEXT_INSN (prev_head_insn);
9046 insn && insn != tail;
9047 insn = NEXT_INSN (insn))
9048 if (INSN_P (insn))
9049 insn_num++;
9050 if (insn_num == 0)
9051 return;
9052 bundling_p = 1;
9053 dfa_clean_insn_cache ();
9054 initiate_bundle_state_table ();
9055 index_to_bundle_states = XNEWVEC (struct bundle_state *, insn_num + 2);
9056 /* First (forward) pass -- generation of bundle states. */
9057 curr_state = get_free_bundle_state ();
9058 curr_state->insn = NULL;
9059 curr_state->before_nops_num = 0;
9060 curr_state->after_nops_num = 0;
9061 curr_state->insn_num = 0;
9062 curr_state->cost = 0;
9063 curr_state->accumulated_insns_num = 0;
9064 curr_state->branch_deviation = 0;
9065 curr_state->middle_bundle_stops = 0;
9066 curr_state->next = NULL;
9067 curr_state->originator = NULL;
9068 state_reset (curr_state->dfa_state);
9069 index_to_bundle_states [0] = curr_state;
9070 insn_num = 0;
9071 /* Shift cycle mark if it is put on insn which could be ignored. */
9072 for (insn = NEXT_INSN (prev_head_insn);
9073 insn != tail;
9074 insn = NEXT_INSN (insn))
9075 if (INSN_P (insn)
9076 && !important_for_bundling_p (insn)
9077 && GET_MODE (insn) == TImode)
9079 PUT_MODE (insn, VOIDmode);
9080 for (next_insn = NEXT_INSN (insn);
9081 next_insn != tail;
9082 next_insn = NEXT_INSN (next_insn))
9083 if (important_for_bundling_p (next_insn)
9084 && INSN_CODE (next_insn) != CODE_FOR_insn_group_barrier)
9086 PUT_MODE (next_insn, TImode);
9087 break;
9090 /* Forward pass: generation of bundle states. */
9091 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
9092 insn != NULL_RTX;
9093 insn = next_insn)
9095 gcc_assert (important_for_bundling_p (insn));
9096 type = ia64_safe_type (insn);
9097 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
9098 insn_num++;
9099 index_to_bundle_states [insn_num] = NULL;
9100 for (curr_state = index_to_bundle_states [insn_num - 1];
9101 curr_state != NULL;
9102 curr_state = next_state)
9104 pos = curr_state->accumulated_insns_num % 3;
9105 next_state = curr_state->next;
9106 /* We must fill up the current bundle in order to start a
9107 subsequent asm insn in a new bundle. Asm insn is always
9108 placed in a separate bundle. */
9109 only_bundle_end_p
9110 = (next_insn != NULL_RTX
9111 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
9112 && unknown_for_bundling_p (next_insn));
9113 /* We may fill up the current bundle if it is the cycle end
9114 without a group barrier. */
9115 bundle_end_p
9116 = (only_bundle_end_p || next_insn == NULL_RTX
9117 || (GET_MODE (next_insn) == TImode
9118 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
9119 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
9120 || type == TYPE_S)
9121 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
9122 only_bundle_end_p);
9123 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
9124 only_bundle_end_p);
9125 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
9126 only_bundle_end_p);
9128 gcc_assert (index_to_bundle_states [insn_num]);
9129 for (curr_state = index_to_bundle_states [insn_num];
9130 curr_state != NULL;
9131 curr_state = curr_state->next)
9132 if (verbose >= 2 && dump)
9134 /* This structure is taken from generated code of the
9135 pipeline hazard recognizer (see file insn-attrtab.c).
9136 Please don't forget to change the structure if a new
9137 automaton is added to .md file. */
9138 struct DFA_chip
9140 unsigned short one_automaton_state;
9141 unsigned short oneb_automaton_state;
9142 unsigned short two_automaton_state;
9143 unsigned short twob_automaton_state;
9146 fprintf
9147 (dump,
9148 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, mid.stops %d state %d) for %d\n",
9149 curr_state->unique_num,
9150 (curr_state->originator == NULL
9151 ? -1 : curr_state->originator->unique_num),
9152 curr_state->cost,
9153 curr_state->before_nops_num, curr_state->after_nops_num,
9154 curr_state->accumulated_insns_num, curr_state->branch_deviation,
9155 curr_state->middle_bundle_stops,
9156 ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state,
9157 INSN_UID (insn));
9161 /* We should find a solution because the 2nd insn scheduling has
9162 found one. */
9163 gcc_assert (index_to_bundle_states [insn_num]);
9164 /* Find a state corresponding to the best insn sequence. */
9165 best_state = NULL;
9166 for (curr_state = index_to_bundle_states [insn_num];
9167 curr_state != NULL;
9168 curr_state = curr_state->next)
9169 /* We are just looking at the states with fully filled up last
9170 bundle. The first we prefer insn sequences with minimal cost
9171 then with minimal inserted nops and finally with branch insns
9172 placed in the 3rd slots. */
9173 if (curr_state->accumulated_insns_num % 3 == 0
9174 && (best_state == NULL || best_state->cost > curr_state->cost
9175 || (best_state->cost == curr_state->cost
9176 && (curr_state->accumulated_insns_num
9177 < best_state->accumulated_insns_num
9178 || (curr_state->accumulated_insns_num
9179 == best_state->accumulated_insns_num
9180 && (curr_state->branch_deviation
9181 < best_state->branch_deviation
9182 || (curr_state->branch_deviation
9183 == best_state->branch_deviation
9184 && curr_state->middle_bundle_stops
9185 < best_state->middle_bundle_stops)))))))
9186 best_state = curr_state;
9187 /* Second (backward) pass: adding nops and templates. */
9188 gcc_assert (best_state);
9189 insn_num = best_state->before_nops_num;
9190 template0 = template1 = -1;
9191 for (curr_state = best_state;
9192 curr_state->originator != NULL;
9193 curr_state = curr_state->originator)
9195 insn = curr_state->insn;
9196 asm_p = unknown_for_bundling_p (insn);
9197 insn_num++;
9198 if (verbose >= 2 && dump)
9200 struct DFA_chip
9202 unsigned short one_automaton_state;
9203 unsigned short oneb_automaton_state;
9204 unsigned short two_automaton_state;
9205 unsigned short twob_automaton_state;
9208 fprintf
9209 (dump,
9210 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, mid.stops %d, state %d) for %d\n",
9211 curr_state->unique_num,
9212 (curr_state->originator == NULL
9213 ? -1 : curr_state->originator->unique_num),
9214 curr_state->cost,
9215 curr_state->before_nops_num, curr_state->after_nops_num,
9216 curr_state->accumulated_insns_num, curr_state->branch_deviation,
9217 curr_state->middle_bundle_stops,
9218 ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state,
9219 INSN_UID (insn));
9221 /* Find the position in the current bundle window. The window can
9222 contain at most two bundles. Two bundle window means that
9223 the processor will make two bundle rotation. */
9224 max_pos = get_max_pos (curr_state->dfa_state);
9225 if (max_pos == 6
9226 /* The following (negative template number) means that the
9227 processor did one bundle rotation. */
9228 || (max_pos == 3 && template0 < 0))
9230 /* We are at the end of the window -- find template(s) for
9231 its bundle(s). */
9232 pos = max_pos;
9233 if (max_pos == 3)
9234 template0 = get_template (curr_state->dfa_state, 3);
9235 else
9237 template1 = get_template (curr_state->dfa_state, 3);
9238 template0 = get_template (curr_state->dfa_state, 6);
9241 if (max_pos > 3 && template1 < 0)
9242 /* It may happen when we have the stop inside a bundle. */
9244 gcc_assert (pos <= 3);
9245 template1 = get_template (curr_state->dfa_state, 3);
9246 pos += 3;
9248 if (!asm_p)
9249 /* Emit nops after the current insn. */
9250 for (i = 0; i < curr_state->after_nops_num; i++)
9252 rtx nop_pat = gen_nop ();
9253 rtx_insn *nop = emit_insn_after (nop_pat, insn);
9254 pos--;
9255 gcc_assert (pos >= 0);
9256 if (pos % 3 == 0)
9258 /* We are at the start of a bundle: emit the template
9259 (it should be defined). */
9260 gcc_assert (template0 >= 0);
9261 ia64_add_bundle_selector_before (template0, nop);
9262 /* If we have two bundle window, we make one bundle
9263 rotation. Otherwise template0 will be undefined
9264 (negative value). */
9265 template0 = template1;
9266 template1 = -1;
9269 /* Move the position backward in the window. Group barrier has
9270 no slot. Asm insn takes all bundle. */
9271 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
9272 && !unknown_for_bundling_p (insn))
9273 pos--;
9274 /* Long insn takes 2 slots. */
9275 if (ia64_safe_type (insn) == TYPE_L)
9276 pos--;
9277 gcc_assert (pos >= 0);
9278 if (pos % 3 == 0
9279 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
9280 && !unknown_for_bundling_p (insn))
9282 /* The current insn is at the bundle start: emit the
9283 template. */
9284 gcc_assert (template0 >= 0);
9285 ia64_add_bundle_selector_before (template0, insn);
9286 b = PREV_INSN (insn);
9287 insn = b;
9288 /* See comment above in analogous place for emitting nops
9289 after the insn. */
9290 template0 = template1;
9291 template1 = -1;
9293 /* Emit nops after the current insn. */
9294 for (i = 0; i < curr_state->before_nops_num; i++)
9296 rtx nop_pat = gen_nop ();
9297 ia64_emit_insn_before (nop_pat, insn);
9298 rtx_insn *nop = PREV_INSN (insn);
9299 insn = nop;
9300 pos--;
9301 gcc_assert (pos >= 0);
9302 if (pos % 3 == 0)
9304 /* See comment above in analogous place for emitting nops
9305 after the insn. */
9306 gcc_assert (template0 >= 0);
9307 ia64_add_bundle_selector_before (template0, insn);
9308 b = PREV_INSN (insn);
9309 insn = b;
9310 template0 = template1;
9311 template1 = -1;
9316 if (flag_checking)
9318 /* Assert right calculation of middle_bundle_stops. */
9319 int num = best_state->middle_bundle_stops;
9320 bool start_bundle = true, end_bundle = false;
9322 for (insn = NEXT_INSN (prev_head_insn);
9323 insn && insn != tail;
9324 insn = NEXT_INSN (insn))
9326 if (!INSN_P (insn))
9327 continue;
9328 if (recog_memoized (insn) == CODE_FOR_bundle_selector)
9329 start_bundle = true;
9330 else
9332 rtx_insn *next_insn;
9334 for (next_insn = NEXT_INSN (insn);
9335 next_insn && next_insn != tail;
9336 next_insn = NEXT_INSN (next_insn))
9337 if (INSN_P (next_insn)
9338 && (ia64_safe_itanium_class (next_insn)
9339 != ITANIUM_CLASS_IGNORE
9340 || recog_memoized (next_insn)
9341 == CODE_FOR_bundle_selector)
9342 && GET_CODE (PATTERN (next_insn)) != USE
9343 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
9344 break;
9346 end_bundle = next_insn == NULL_RTX
9347 || next_insn == tail
9348 || (INSN_P (next_insn)
9349 && recog_memoized (next_insn) == CODE_FOR_bundle_selector);
9350 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier
9351 && !start_bundle && !end_bundle
9352 && next_insn
9353 && !unknown_for_bundling_p (next_insn))
9354 num--;
9356 start_bundle = false;
9360 gcc_assert (num == 0);
9363 free (index_to_bundle_states);
9364 finish_bundle_state_table ();
9365 bundling_p = 0;
9366 dfa_clean_insn_cache ();
9369 /* The following function is called at the end of scheduling BB or
9370 EBB. After reload, it inserts stop bits and does insn bundling. */
9372 static void
9373 ia64_sched_finish (FILE *dump, int sched_verbose)
9375 if (sched_verbose)
9376 fprintf (dump, "// Finishing schedule.\n");
9377 if (!reload_completed)
9378 return;
9379 if (reload_completed)
9381 final_emit_insn_group_barriers (dump);
9382 bundling (dump, sched_verbose, current_sched_info->prev_head,
9383 current_sched_info->next_tail);
9384 if (sched_verbose && dump)
9385 fprintf (dump, "// finishing %d-%d\n",
9386 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
9387 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
9389 return;
9393 /* The following function inserts stop bits in scheduled BB or EBB. */
9395 static void
9396 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
9398 rtx_insn *insn;
9399 int need_barrier_p = 0;
9400 int seen_good_insn = 0;
9402 init_insn_group_barriers ();
9404 for (insn = NEXT_INSN (current_sched_info->prev_head);
9405 insn != current_sched_info->next_tail;
9406 insn = NEXT_INSN (insn))
9408 if (BARRIER_P (insn))
9410 rtx_insn *last = prev_active_insn (insn);
9412 if (! last)
9413 continue;
9414 if (JUMP_TABLE_DATA_P (last))
9415 last = prev_active_insn (last);
9416 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
9417 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
9419 init_insn_group_barriers ();
9420 seen_good_insn = 0;
9421 need_barrier_p = 0;
9423 else if (NONDEBUG_INSN_P (insn))
9425 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
9427 init_insn_group_barriers ();
9428 seen_good_insn = 0;
9429 need_barrier_p = 0;
9431 else if (need_barrier_p || group_barrier_needed (insn)
9432 || (mflag_sched_stop_bits_after_every_cycle
9433 && GET_MODE (insn) == TImode
9434 && seen_good_insn))
9436 if (TARGET_EARLY_STOP_BITS)
9438 rtx_insn *last;
9440 for (last = insn;
9441 last != current_sched_info->prev_head;
9442 last = PREV_INSN (last))
9443 if (INSN_P (last) && GET_MODE (last) == TImode
9444 && stops_p [INSN_UID (last)])
9445 break;
9446 if (last == current_sched_info->prev_head)
9447 last = insn;
9448 last = prev_active_insn (last);
9449 if (last
9450 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
9451 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
9452 last);
9453 init_insn_group_barriers ();
9454 for (last = NEXT_INSN (last);
9455 last != insn;
9456 last = NEXT_INSN (last))
9457 if (INSN_P (last))
9459 group_barrier_needed (last);
9460 if (recog_memoized (last) >= 0
9461 && important_for_bundling_p (last))
9462 seen_good_insn = 1;
9465 else
9467 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
9468 insn);
9469 init_insn_group_barriers ();
9470 seen_good_insn = 0;
9472 group_barrier_needed (insn);
9473 if (recog_memoized (insn) >= 0
9474 && important_for_bundling_p (insn))
9475 seen_good_insn = 1;
9477 else if (recog_memoized (insn) >= 0
9478 && important_for_bundling_p (insn))
9479 seen_good_insn = 1;
9480 need_barrier_p = (CALL_P (insn) || unknown_for_bundling_p (insn));
9487 /* If the following function returns TRUE, we will use the DFA
9488 insn scheduler. */
9490 static int
9491 ia64_first_cycle_multipass_dfa_lookahead (void)
9493 return (reload_completed ? 6 : 4);
9496 /* The following function initiates variable `dfa_pre_cycle_insn'. */
9498 static void
9499 ia64_init_dfa_pre_cycle_insn (void)
9501 if (temp_dfa_state == NULL)
9503 dfa_state_size = state_size ();
9504 temp_dfa_state = xmalloc (dfa_state_size);
9505 prev_cycle_state = xmalloc (dfa_state_size);
9507 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
9508 SET_PREV_INSN (dfa_pre_cycle_insn) = SET_NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
9509 recog_memoized (dfa_pre_cycle_insn);
9510 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
9511 SET_PREV_INSN (dfa_stop_insn) = SET_NEXT_INSN (dfa_stop_insn) = NULL_RTX;
9512 recog_memoized (dfa_stop_insn);
9515 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
9516 used by the DFA insn scheduler. */
9518 static rtx
9519 ia64_dfa_pre_cycle_insn (void)
9521 return dfa_pre_cycle_insn;
9524 /* The following function returns TRUE if PRODUCER (of type ilog or
9525 ld) produces address for CONSUMER (of type st or stf). */
9528 ia64_st_address_bypass_p (rtx_insn *producer, rtx_insn *consumer)
9530 rtx dest, reg, mem;
9532 gcc_assert (producer && consumer);
9533 dest = ia64_single_set (producer);
9534 gcc_assert (dest);
9535 reg = SET_DEST (dest);
9536 gcc_assert (reg);
9537 if (GET_CODE (reg) == SUBREG)
9538 reg = SUBREG_REG (reg);
9539 gcc_assert (GET_CODE (reg) == REG);
9541 dest = ia64_single_set (consumer);
9542 gcc_assert (dest);
9543 mem = SET_DEST (dest);
9544 gcc_assert (mem && GET_CODE (mem) == MEM);
9545 return reg_mentioned_p (reg, mem);
9548 /* The following function returns TRUE if PRODUCER (of type ilog or
9549 ld) produces address for CONSUMER (of type ld or fld). */
9552 ia64_ld_address_bypass_p (rtx_insn *producer, rtx_insn *consumer)
9554 rtx dest, src, reg, mem;
9556 gcc_assert (producer && consumer);
9557 dest = ia64_single_set (producer);
9558 gcc_assert (dest);
9559 reg = SET_DEST (dest);
9560 gcc_assert (reg);
9561 if (GET_CODE (reg) == SUBREG)
9562 reg = SUBREG_REG (reg);
9563 gcc_assert (GET_CODE (reg) == REG);
9565 src = ia64_single_set (consumer);
9566 gcc_assert (src);
9567 mem = SET_SRC (src);
9568 gcc_assert (mem);
9570 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
9571 mem = XVECEXP (mem, 0, 0);
9572 else if (GET_CODE (mem) == IF_THEN_ELSE)
9573 /* ??? Is this bypass necessary for ld.c? */
9575 gcc_assert (XINT (XEXP (XEXP (mem, 0), 0), 1) == UNSPEC_LDCCLR);
9576 mem = XEXP (mem, 1);
9579 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
9580 mem = XEXP (mem, 0);
9582 if (GET_CODE (mem) == UNSPEC)
9584 int c = XINT (mem, 1);
9586 gcc_assert (c == UNSPEC_LDA || c == UNSPEC_LDS || c == UNSPEC_LDS_A
9587 || c == UNSPEC_LDSA);
9588 mem = XVECEXP (mem, 0, 0);
9591 /* Note that LO_SUM is used for GOT loads. */
9592 gcc_assert (GET_CODE (mem) == LO_SUM || GET_CODE (mem) == MEM);
9594 return reg_mentioned_p (reg, mem);
9597 /* The following function returns TRUE if INSN produces address for a
9598 load/store insn. We will place such insns into M slot because it
9599 decreases its latency time. */
9602 ia64_produce_address_p (rtx insn)
9604 return insn->call;
9608 /* Emit pseudo-ops for the assembler to describe predicate relations.
9609 At present this assumes that we only consider predicate pairs to
9610 be mutex, and that the assembler can deduce proper values from
9611 straight-line code. */
9613 static void
9614 emit_predicate_relation_info (void)
9616 basic_block bb;
9618 FOR_EACH_BB_REVERSE_FN (bb, cfun)
9620 int r;
9621 rtx_insn *head = BB_HEAD (bb);
9623 /* We only need such notes at code labels. */
9624 if (! LABEL_P (head))
9625 continue;
9626 if (NOTE_INSN_BASIC_BLOCK_P (NEXT_INSN (head)))
9627 head = NEXT_INSN (head);
9629 /* Skip p0, which may be thought to be live due to (reg:DI p0)
9630 grabbing the entire block of predicate registers. */
9631 for (r = PR_REG (2); r < PR_REG (64); r += 2)
9632 if (REGNO_REG_SET_P (df_get_live_in (bb), r))
9634 rtx p = gen_rtx_REG (BImode, r);
9635 rtx_insn *n = emit_insn_after (gen_pred_rel_mutex (p), head);
9636 if (head == BB_END (bb))
9637 BB_END (bb) = n;
9638 head = n;
9642 /* Look for conditional calls that do not return, and protect predicate
9643 relations around them. Otherwise the assembler will assume the call
9644 returns, and complain about uses of call-clobbered predicates after
9645 the call. */
9646 FOR_EACH_BB_REVERSE_FN (bb, cfun)
9648 rtx_insn *insn = BB_HEAD (bb);
9650 while (1)
9652 if (CALL_P (insn)
9653 && GET_CODE (PATTERN (insn)) == COND_EXEC
9654 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
9656 rtx_insn *b =
9657 emit_insn_before (gen_safe_across_calls_all (), insn);
9658 rtx_insn *a = emit_insn_after (gen_safe_across_calls_normal (), insn);
9659 if (BB_HEAD (bb) == insn)
9660 BB_HEAD (bb) = b;
9661 if (BB_END (bb) == insn)
9662 BB_END (bb) = a;
9665 if (insn == BB_END (bb))
9666 break;
9667 insn = NEXT_INSN (insn);
9672 /* Perform machine dependent operations on the rtl chain INSNS. */
9674 static void
9675 ia64_reorg (void)
9677 /* We are freeing block_for_insn in the toplev to keep compatibility
9678 with old MDEP_REORGS that are not CFG based. Recompute it now. */
9679 compute_bb_for_insn ();
9681 /* If optimizing, we'll have split before scheduling. */
9682 if (optimize == 0)
9683 split_all_insns ();
9685 if (optimize && flag_schedule_insns_after_reload
9686 && dbg_cnt (ia64_sched2))
9688 basic_block bb;
9689 timevar_push (TV_SCHED2);
9690 ia64_final_schedule = 1;
9692 /* We can't let modulo-sched prevent us from scheduling any bbs,
9693 since we need the final schedule to produce bundle information. */
9694 FOR_EACH_BB_FN (bb, cfun)
9695 bb->flags &= ~BB_DISABLE_SCHEDULE;
9697 initiate_bundle_states ();
9698 ia64_nop = make_insn_raw (gen_nop ());
9699 SET_PREV_INSN (ia64_nop) = SET_NEXT_INSN (ia64_nop) = NULL_RTX;
9700 recog_memoized (ia64_nop);
9701 clocks_length = get_max_uid () + 1;
9702 stops_p = XCNEWVEC (char, clocks_length);
9704 if (ia64_tune == PROCESSOR_ITANIUM2)
9706 pos_1 = get_cpu_unit_code ("2_1");
9707 pos_2 = get_cpu_unit_code ("2_2");
9708 pos_3 = get_cpu_unit_code ("2_3");
9709 pos_4 = get_cpu_unit_code ("2_4");
9710 pos_5 = get_cpu_unit_code ("2_5");
9711 pos_6 = get_cpu_unit_code ("2_6");
9712 _0mii_ = get_cpu_unit_code ("2b_0mii.");
9713 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
9714 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
9715 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
9716 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
9717 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
9718 _0mib_ = get_cpu_unit_code ("2b_0mib.");
9719 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
9720 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
9721 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
9722 _1mii_ = get_cpu_unit_code ("2b_1mii.");
9723 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
9724 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
9725 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
9726 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
9727 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
9728 _1mib_ = get_cpu_unit_code ("2b_1mib.");
9729 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
9730 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
9731 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
9733 else
9735 pos_1 = get_cpu_unit_code ("1_1");
9736 pos_2 = get_cpu_unit_code ("1_2");
9737 pos_3 = get_cpu_unit_code ("1_3");
9738 pos_4 = get_cpu_unit_code ("1_4");
9739 pos_5 = get_cpu_unit_code ("1_5");
9740 pos_6 = get_cpu_unit_code ("1_6");
9741 _0mii_ = get_cpu_unit_code ("1b_0mii.");
9742 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
9743 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
9744 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
9745 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
9746 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
9747 _0mib_ = get_cpu_unit_code ("1b_0mib.");
9748 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
9749 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
9750 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
9751 _1mii_ = get_cpu_unit_code ("1b_1mii.");
9752 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
9753 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
9754 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
9755 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
9756 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
9757 _1mib_ = get_cpu_unit_code ("1b_1mib.");
9758 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
9759 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
9760 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
9763 if (flag_selective_scheduling2
9764 && !maybe_skip_selective_scheduling ())
9765 run_selective_scheduling ();
9766 else
9767 schedule_ebbs ();
9769 /* Redo alignment computation, as it might gone wrong. */
9770 compute_alignments ();
9772 /* We cannot reuse this one because it has been corrupted by the
9773 evil glat. */
9774 finish_bundle_states ();
9775 free (stops_p);
9776 stops_p = NULL;
9777 emit_insn_group_barriers (dump_file);
9779 ia64_final_schedule = 0;
9780 timevar_pop (TV_SCHED2);
9782 else
9783 emit_all_insn_group_barriers (dump_file);
9785 df_analyze ();
9787 /* A call must not be the last instruction in a function, so that the
9788 return address is still within the function, so that unwinding works
9789 properly. Note that IA-64 differs from dwarf2 on this point. */
9790 if (ia64_except_unwind_info (&global_options) == UI_TARGET)
9792 rtx_insn *insn;
9793 int saw_stop = 0;
9795 insn = get_last_insn ();
9796 if (! INSN_P (insn))
9797 insn = prev_active_insn (insn);
9798 if (insn)
9800 /* Skip over insns that expand to nothing. */
9801 while (NONJUMP_INSN_P (insn)
9802 && get_attr_empty (insn) == EMPTY_YES)
9804 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
9805 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
9806 saw_stop = 1;
9807 insn = prev_active_insn (insn);
9809 if (CALL_P (insn))
9811 if (! saw_stop)
9812 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
9813 emit_insn (gen_break_f ());
9814 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
9819 emit_predicate_relation_info ();
9821 if (flag_var_tracking)
9823 timevar_push (TV_VAR_TRACKING);
9824 variable_tracking_main ();
9825 timevar_pop (TV_VAR_TRACKING);
9827 df_finish_pass (false);
9830 /* Return true if REGNO is used by the epilogue. */
9833 ia64_epilogue_uses (int regno)
9835 switch (regno)
9837 case R_GR (1):
9838 /* With a call to a function in another module, we will write a new
9839 value to "gp". After returning from such a call, we need to make
9840 sure the function restores the original gp-value, even if the
9841 function itself does not use the gp anymore. */
9842 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
9844 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
9845 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
9846 /* For functions defined with the syscall_linkage attribute, all
9847 input registers are marked as live at all function exits. This
9848 prevents the register allocator from using the input registers,
9849 which in turn makes it possible to restart a system call after
9850 an interrupt without having to save/restore the input registers.
9851 This also prevents kernel data from leaking to application code. */
9852 return lookup_attribute ("syscall_linkage",
9853 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
9855 case R_BR (0):
9856 /* Conditional return patterns can't represent the use of `b0' as
9857 the return address, so we force the value live this way. */
9858 return 1;
9860 case AR_PFS_REGNUM:
9861 /* Likewise for ar.pfs, which is used by br.ret. */
9862 return 1;
9864 default:
9865 return 0;
9869 /* Return true if REGNO is used by the frame unwinder. */
9872 ia64_eh_uses (int regno)
9874 unsigned int r;
9876 if (! reload_completed)
9877 return 0;
9879 if (regno == 0)
9880 return 0;
9882 for (r = reg_save_b0; r <= reg_save_ar_lc; r++)
9883 if (regno == current_frame_info.r[r]
9884 || regno == emitted_frame_related_regs[r])
9885 return 1;
9887 return 0;
9890 /* Return true if this goes in small data/bss. */
9892 /* ??? We could also support own long data here. Generating movl/add/ld8
9893 instead of addl,ld8/ld8. This makes the code bigger, but should make the
9894 code faster because there is one less load. This also includes incomplete
9895 types which can't go in sdata/sbss. */
9897 static bool
9898 ia64_in_small_data_p (const_tree exp)
9900 if (TARGET_NO_SDATA)
9901 return false;
9903 /* We want to merge strings, so we never consider them small data. */
9904 if (TREE_CODE (exp) == STRING_CST)
9905 return false;
9907 /* Functions are never small data. */
9908 if (TREE_CODE (exp) == FUNCTION_DECL)
9909 return false;
9911 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
9913 const char *section = DECL_SECTION_NAME (exp);
9915 if (strcmp (section, ".sdata") == 0
9916 || strncmp (section, ".sdata.", 7) == 0
9917 || strncmp (section, ".gnu.linkonce.s.", 16) == 0
9918 || strcmp (section, ".sbss") == 0
9919 || strncmp (section, ".sbss.", 6) == 0
9920 || strncmp (section, ".gnu.linkonce.sb.", 17) == 0)
9921 return true;
9923 else
9925 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
9927 /* If this is an incomplete type with size 0, then we can't put it
9928 in sdata because it might be too big when completed. */
9929 if (size > 0 && size <= ia64_section_threshold)
9930 return true;
9933 return false;
9936 /* Output assembly directives for prologue regions. */
9938 /* The current basic block number. */
9940 static bool last_block;
9942 /* True if we need a copy_state command at the start of the next block. */
9944 static bool need_copy_state;
9946 #ifndef MAX_ARTIFICIAL_LABEL_BYTES
9947 # define MAX_ARTIFICIAL_LABEL_BYTES 30
9948 #endif
9950 /* The function emits unwind directives for the start of an epilogue. */
9952 static void
9953 process_epilogue (FILE *asm_out_file, rtx insn ATTRIBUTE_UNUSED,
9954 bool unwind, bool frame ATTRIBUTE_UNUSED)
9956 /* If this isn't the last block of the function, then we need to label the
9957 current state, and copy it back in at the start of the next block. */
9959 if (!last_block)
9961 if (unwind)
9962 fprintf (asm_out_file, "\t.label_state %d\n",
9963 ++cfun->machine->state_num);
9964 need_copy_state = true;
9967 if (unwind)
9968 fprintf (asm_out_file, "\t.restore sp\n");
9971 /* This function processes a SET pattern for REG_CFA_ADJUST_CFA. */
9973 static void
9974 process_cfa_adjust_cfa (FILE *asm_out_file, rtx pat, rtx insn,
9975 bool unwind, bool frame)
9977 rtx dest = SET_DEST (pat);
9978 rtx src = SET_SRC (pat);
9980 if (dest == stack_pointer_rtx)
9982 if (GET_CODE (src) == PLUS)
9984 rtx op0 = XEXP (src, 0);
9985 rtx op1 = XEXP (src, 1);
9987 gcc_assert (op0 == dest && GET_CODE (op1) == CONST_INT);
9989 if (INTVAL (op1) < 0)
9991 gcc_assert (!frame_pointer_needed);
9992 if (unwind)
9993 fprintf (asm_out_file,
9994 "\t.fframe " HOST_WIDE_INT_PRINT_DEC"\n",
9995 -INTVAL (op1));
9997 else
9998 process_epilogue (asm_out_file, insn, unwind, frame);
10000 else
10002 gcc_assert (src == hard_frame_pointer_rtx);
10003 process_epilogue (asm_out_file, insn, unwind, frame);
10006 else if (dest == hard_frame_pointer_rtx)
10008 gcc_assert (src == stack_pointer_rtx);
10009 gcc_assert (frame_pointer_needed);
10011 if (unwind)
10012 fprintf (asm_out_file, "\t.vframe r%d\n",
10013 ia64_dbx_register_number (REGNO (dest)));
10015 else
10016 gcc_unreachable ();
10019 /* This function processes a SET pattern for REG_CFA_REGISTER. */
10021 static void
10022 process_cfa_register (FILE *asm_out_file, rtx pat, bool unwind)
10024 rtx dest = SET_DEST (pat);
10025 rtx src = SET_SRC (pat);
10026 int dest_regno = REGNO (dest);
10027 int src_regno;
10029 if (src == pc_rtx)
10031 /* Saving return address pointer. */
10032 if (unwind)
10033 fprintf (asm_out_file, "\t.save rp, r%d\n",
10034 ia64_dbx_register_number (dest_regno));
10035 return;
10038 src_regno = REGNO (src);
10040 switch (src_regno)
10042 case PR_REG (0):
10043 gcc_assert (dest_regno == current_frame_info.r[reg_save_pr]);
10044 if (unwind)
10045 fprintf (asm_out_file, "\t.save pr, r%d\n",
10046 ia64_dbx_register_number (dest_regno));
10047 break;
10049 case AR_UNAT_REGNUM:
10050 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_unat]);
10051 if (unwind)
10052 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
10053 ia64_dbx_register_number (dest_regno));
10054 break;
10056 case AR_LC_REGNUM:
10057 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_lc]);
10058 if (unwind)
10059 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
10060 ia64_dbx_register_number (dest_regno));
10061 break;
10063 default:
10064 /* Everything else should indicate being stored to memory. */
10065 gcc_unreachable ();
10069 /* This function processes a SET pattern for REG_CFA_OFFSET. */
10071 static void
10072 process_cfa_offset (FILE *asm_out_file, rtx pat, bool unwind)
10074 rtx dest = SET_DEST (pat);
10075 rtx src = SET_SRC (pat);
10076 int src_regno = REGNO (src);
10077 const char *saveop;
10078 HOST_WIDE_INT off;
10079 rtx base;
10081 gcc_assert (MEM_P (dest));
10082 if (GET_CODE (XEXP (dest, 0)) == REG)
10084 base = XEXP (dest, 0);
10085 off = 0;
10087 else
10089 gcc_assert (GET_CODE (XEXP (dest, 0)) == PLUS
10090 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT);
10091 base = XEXP (XEXP (dest, 0), 0);
10092 off = INTVAL (XEXP (XEXP (dest, 0), 1));
10095 if (base == hard_frame_pointer_rtx)
10097 saveop = ".savepsp";
10098 off = - off;
10100 else
10102 gcc_assert (base == stack_pointer_rtx);
10103 saveop = ".savesp";
10106 src_regno = REGNO (src);
10107 switch (src_regno)
10109 case BR_REG (0):
10110 gcc_assert (!current_frame_info.r[reg_save_b0]);
10111 if (unwind)
10112 fprintf (asm_out_file, "\t%s rp, " HOST_WIDE_INT_PRINT_DEC "\n",
10113 saveop, off);
10114 break;
10116 case PR_REG (0):
10117 gcc_assert (!current_frame_info.r[reg_save_pr]);
10118 if (unwind)
10119 fprintf (asm_out_file, "\t%s pr, " HOST_WIDE_INT_PRINT_DEC "\n",
10120 saveop, off);
10121 break;
10123 case AR_LC_REGNUM:
10124 gcc_assert (!current_frame_info.r[reg_save_ar_lc]);
10125 if (unwind)
10126 fprintf (asm_out_file, "\t%s ar.lc, " HOST_WIDE_INT_PRINT_DEC "\n",
10127 saveop, off);
10128 break;
10130 case AR_PFS_REGNUM:
10131 gcc_assert (!current_frame_info.r[reg_save_ar_pfs]);
10132 if (unwind)
10133 fprintf (asm_out_file, "\t%s ar.pfs, " HOST_WIDE_INT_PRINT_DEC "\n",
10134 saveop, off);
10135 break;
10137 case AR_UNAT_REGNUM:
10138 gcc_assert (!current_frame_info.r[reg_save_ar_unat]);
10139 if (unwind)
10140 fprintf (asm_out_file, "\t%s ar.unat, " HOST_WIDE_INT_PRINT_DEC "\n",
10141 saveop, off);
10142 break;
10144 case GR_REG (4):
10145 case GR_REG (5):
10146 case GR_REG (6):
10147 case GR_REG (7):
10148 if (unwind)
10149 fprintf (asm_out_file, "\t.save.g 0x%x\n",
10150 1 << (src_regno - GR_REG (4)));
10151 break;
10153 case BR_REG (1):
10154 case BR_REG (2):
10155 case BR_REG (3):
10156 case BR_REG (4):
10157 case BR_REG (5):
10158 if (unwind)
10159 fprintf (asm_out_file, "\t.save.b 0x%x\n",
10160 1 << (src_regno - BR_REG (1)));
10161 break;
10163 case FR_REG (2):
10164 case FR_REG (3):
10165 case FR_REG (4):
10166 case FR_REG (5):
10167 if (unwind)
10168 fprintf (asm_out_file, "\t.save.f 0x%x\n",
10169 1 << (src_regno - FR_REG (2)));
10170 break;
10172 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
10173 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
10174 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
10175 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
10176 if (unwind)
10177 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
10178 1 << (src_regno - FR_REG (12)));
10179 break;
10181 default:
10182 /* ??? For some reason we mark other general registers, even those
10183 we can't represent in the unwind info. Ignore them. */
10184 break;
10188 /* This function looks at a single insn and emits any directives
10189 required to unwind this insn. */
10191 static void
10192 ia64_asm_unwind_emit (FILE *asm_out_file, rtx_insn *insn)
10194 bool unwind = ia64_except_unwind_info (&global_options) == UI_TARGET;
10195 bool frame = dwarf2out_do_frame ();
10196 rtx note, pat;
10197 bool handled_one;
10199 if (!unwind && !frame)
10200 return;
10202 if (NOTE_INSN_BASIC_BLOCK_P (insn))
10204 last_block = NOTE_BASIC_BLOCK (insn)->next_bb
10205 == EXIT_BLOCK_PTR_FOR_FN (cfun);
10207 /* Restore unwind state from immediately before the epilogue. */
10208 if (need_copy_state)
10210 if (unwind)
10212 fprintf (asm_out_file, "\t.body\n");
10213 fprintf (asm_out_file, "\t.copy_state %d\n",
10214 cfun->machine->state_num);
10216 need_copy_state = false;
10220 if (NOTE_P (insn) || ! RTX_FRAME_RELATED_P (insn))
10221 return;
10223 /* Look for the ALLOC insn. */
10224 if (INSN_CODE (insn) == CODE_FOR_alloc)
10226 rtx dest = SET_DEST (XVECEXP (PATTERN (insn), 0, 0));
10227 int dest_regno = REGNO (dest);
10229 /* If this is the final destination for ar.pfs, then this must
10230 be the alloc in the prologue. */
10231 if (dest_regno == current_frame_info.r[reg_save_ar_pfs])
10233 if (unwind)
10234 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
10235 ia64_dbx_register_number (dest_regno));
10237 else
10239 /* This must be an alloc before a sibcall. We must drop the
10240 old frame info. The easiest way to drop the old frame
10241 info is to ensure we had a ".restore sp" directive
10242 followed by a new prologue. If the procedure doesn't
10243 have a memory-stack frame, we'll issue a dummy ".restore
10244 sp" now. */
10245 if (current_frame_info.total_size == 0 && !frame_pointer_needed)
10246 /* if haven't done process_epilogue() yet, do it now */
10247 process_epilogue (asm_out_file, insn, unwind, frame);
10248 if (unwind)
10249 fprintf (asm_out_file, "\t.prologue\n");
10251 return;
10254 handled_one = false;
10255 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
10256 switch (REG_NOTE_KIND (note))
10258 case REG_CFA_ADJUST_CFA:
10259 pat = XEXP (note, 0);
10260 if (pat == NULL)
10261 pat = PATTERN (insn);
10262 process_cfa_adjust_cfa (asm_out_file, pat, insn, unwind, frame);
10263 handled_one = true;
10264 break;
10266 case REG_CFA_OFFSET:
10267 pat = XEXP (note, 0);
10268 if (pat == NULL)
10269 pat = PATTERN (insn);
10270 process_cfa_offset (asm_out_file, pat, unwind);
10271 handled_one = true;
10272 break;
10274 case REG_CFA_REGISTER:
10275 pat = XEXP (note, 0);
10276 if (pat == NULL)
10277 pat = PATTERN (insn);
10278 process_cfa_register (asm_out_file, pat, unwind);
10279 handled_one = true;
10280 break;
10282 case REG_FRAME_RELATED_EXPR:
10283 case REG_CFA_DEF_CFA:
10284 case REG_CFA_EXPRESSION:
10285 case REG_CFA_RESTORE:
10286 case REG_CFA_SET_VDRAP:
10287 /* Not used in the ia64 port. */
10288 gcc_unreachable ();
10290 default:
10291 /* Not a frame-related note. */
10292 break;
10295 /* All REG_FRAME_RELATED_P insns, besides ALLOC, are marked with the
10296 explicit action to take. No guessing required. */
10297 gcc_assert (handled_one);
10300 /* Implement TARGET_ASM_EMIT_EXCEPT_PERSONALITY. */
10302 static void
10303 ia64_asm_emit_except_personality (rtx personality)
10305 fputs ("\t.personality\t", asm_out_file);
10306 output_addr_const (asm_out_file, personality);
10307 fputc ('\n', asm_out_file);
10310 /* Implement TARGET_ASM_INITIALIZE_SECTIONS. */
10312 static void
10313 ia64_asm_init_sections (void)
10315 exception_section = get_unnamed_section (0, output_section_asm_op,
10316 "\t.handlerdata");
10319 /* Implement TARGET_DEBUG_UNWIND_INFO. */
10321 static enum unwind_info_type
10322 ia64_debug_unwind_info (void)
10324 return UI_TARGET;
10327 enum ia64_builtins
10329 IA64_BUILTIN_BSP,
10330 IA64_BUILTIN_COPYSIGNQ,
10331 IA64_BUILTIN_FABSQ,
10332 IA64_BUILTIN_FLUSHRS,
10333 IA64_BUILTIN_INFQ,
10334 IA64_BUILTIN_HUGE_VALQ,
10335 IA64_BUILTIN_NANQ,
10336 IA64_BUILTIN_NANSQ,
10337 IA64_BUILTIN_max
10340 static GTY(()) tree ia64_builtins[(int) IA64_BUILTIN_max];
10342 void
10343 ia64_init_builtins (void)
10345 tree fpreg_type;
10346 tree float80_type;
10347 tree decl;
10349 /* The __fpreg type. */
10350 fpreg_type = make_node (REAL_TYPE);
10351 TYPE_PRECISION (fpreg_type) = 82;
10352 layout_type (fpreg_type);
10353 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
10355 /* The __float80 type. */
10356 if (float64x_type_node != NULL_TREE
10357 && TYPE_MODE (float64x_type_node) == XFmode)
10358 float80_type = float64x_type_node;
10359 else
10361 float80_type = make_node (REAL_TYPE);
10362 TYPE_PRECISION (float80_type) = 80;
10363 layout_type (float80_type);
10365 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
10367 /* The __float128 type. */
10368 if (!TARGET_HPUX)
10370 tree ftype;
10371 tree const_string_type
10372 = build_pointer_type (build_qualified_type
10373 (char_type_node, TYPE_QUAL_CONST));
10375 (*lang_hooks.types.register_builtin_type) (float128_type_node,
10376 "__float128");
10378 /* TFmode support builtins. */
10379 ftype = build_function_type_list (float128_type_node, NULL_TREE);
10380 decl = add_builtin_function ("__builtin_infq", ftype,
10381 IA64_BUILTIN_INFQ, BUILT_IN_MD,
10382 NULL, NULL_TREE);
10383 ia64_builtins[IA64_BUILTIN_INFQ] = decl;
10385 decl = add_builtin_function ("__builtin_huge_valq", ftype,
10386 IA64_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
10387 NULL, NULL_TREE);
10388 ia64_builtins[IA64_BUILTIN_HUGE_VALQ] = decl;
10390 ftype = build_function_type_list (float128_type_node,
10391 const_string_type,
10392 NULL_TREE);
10393 decl = add_builtin_function ("__builtin_nanq", ftype,
10394 IA64_BUILTIN_NANQ, BUILT_IN_MD,
10395 "nanq", NULL_TREE);
10396 TREE_READONLY (decl) = 1;
10397 ia64_builtins[IA64_BUILTIN_NANQ] = decl;
10399 decl = add_builtin_function ("__builtin_nansq", ftype,
10400 IA64_BUILTIN_NANSQ, BUILT_IN_MD,
10401 "nansq", NULL_TREE);
10402 TREE_READONLY (decl) = 1;
10403 ia64_builtins[IA64_BUILTIN_NANSQ] = decl;
10405 ftype = build_function_type_list (float128_type_node,
10406 float128_type_node,
10407 NULL_TREE);
10408 decl = add_builtin_function ("__builtin_fabsq", ftype,
10409 IA64_BUILTIN_FABSQ, BUILT_IN_MD,
10410 "__fabstf2", NULL_TREE);
10411 TREE_READONLY (decl) = 1;
10412 ia64_builtins[IA64_BUILTIN_FABSQ] = decl;
10414 ftype = build_function_type_list (float128_type_node,
10415 float128_type_node,
10416 float128_type_node,
10417 NULL_TREE);
10418 decl = add_builtin_function ("__builtin_copysignq", ftype,
10419 IA64_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
10420 "__copysigntf3", NULL_TREE);
10421 TREE_READONLY (decl) = 1;
10422 ia64_builtins[IA64_BUILTIN_COPYSIGNQ] = decl;
10424 else
10425 /* Under HPUX, this is a synonym for "long double". */
10426 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
10427 "__float128");
10429 /* Fwrite on VMS is non-standard. */
10430 #if TARGET_ABI_OPEN_VMS
10431 vms_patch_builtins ();
10432 #endif
10434 #define def_builtin(name, type, code) \
10435 add_builtin_function ((name), (type), (code), BUILT_IN_MD, \
10436 NULL, NULL_TREE)
10438 decl = def_builtin ("__builtin_ia64_bsp",
10439 build_function_type_list (ptr_type_node, NULL_TREE),
10440 IA64_BUILTIN_BSP);
10441 ia64_builtins[IA64_BUILTIN_BSP] = decl;
10443 decl = def_builtin ("__builtin_ia64_flushrs",
10444 build_function_type_list (void_type_node, NULL_TREE),
10445 IA64_BUILTIN_FLUSHRS);
10446 ia64_builtins[IA64_BUILTIN_FLUSHRS] = decl;
10448 #undef def_builtin
10450 if (TARGET_HPUX)
10452 if ((decl = builtin_decl_explicit (BUILT_IN_FINITE)) != NULL_TREE)
10453 set_user_assembler_name (decl, "_Isfinite");
10454 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEF)) != NULL_TREE)
10455 set_user_assembler_name (decl, "_Isfinitef");
10456 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEL)) != NULL_TREE)
10457 set_user_assembler_name (decl, "_Isfinitef128");
10461 static tree
10462 ia64_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
10463 tree *args, bool ignore ATTRIBUTE_UNUSED)
10465 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
10467 enum ia64_builtins fn_code = (enum ia64_builtins)
10468 DECL_FUNCTION_CODE (fndecl);
10469 switch (fn_code)
10471 case IA64_BUILTIN_NANQ:
10472 case IA64_BUILTIN_NANSQ:
10474 tree type = TREE_TYPE (TREE_TYPE (fndecl));
10475 const char *str = c_getstr (*args);
10476 int quiet = fn_code == IA64_BUILTIN_NANQ;
10477 REAL_VALUE_TYPE real;
10479 if (str && real_nan (&real, str, quiet, TYPE_MODE (type)))
10480 return build_real (type, real);
10481 return NULL_TREE;
10484 default:
10485 break;
10489 #ifdef SUBTARGET_FOLD_BUILTIN
10490 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
10491 #endif
10493 return NULL_TREE;
10497 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10498 machine_mode mode ATTRIBUTE_UNUSED,
10499 int ignore ATTRIBUTE_UNUSED)
10501 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10502 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10504 switch (fcode)
10506 case IA64_BUILTIN_BSP:
10507 if (! target || ! register_operand (target, DImode))
10508 target = gen_reg_rtx (DImode);
10509 emit_insn (gen_bsp_value (target));
10510 #ifdef POINTERS_EXTEND_UNSIGNED
10511 target = convert_memory_address (ptr_mode, target);
10512 #endif
10513 return target;
10515 case IA64_BUILTIN_FLUSHRS:
10516 emit_insn (gen_flushrs ());
10517 return const0_rtx;
10519 case IA64_BUILTIN_INFQ:
10520 case IA64_BUILTIN_HUGE_VALQ:
10522 machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
10523 REAL_VALUE_TYPE inf;
10524 rtx tmp;
10526 real_inf (&inf);
10527 tmp = const_double_from_real_value (inf, target_mode);
10529 tmp = validize_mem (force_const_mem (target_mode, tmp));
10531 if (target == 0)
10532 target = gen_reg_rtx (target_mode);
10534 emit_move_insn (target, tmp);
10535 return target;
10538 case IA64_BUILTIN_NANQ:
10539 case IA64_BUILTIN_NANSQ:
10540 case IA64_BUILTIN_FABSQ:
10541 case IA64_BUILTIN_COPYSIGNQ:
10542 return expand_call (exp, target, ignore);
10544 default:
10545 gcc_unreachable ();
10548 return NULL_RTX;
10551 /* Return the ia64 builtin for CODE. */
10553 static tree
10554 ia64_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
10556 if (code >= IA64_BUILTIN_max)
10557 return error_mark_node;
10559 return ia64_builtins[code];
10562 /* For the HP-UX IA64 aggregate parameters are passed stored in the
10563 most significant bits of the stack slot. */
10565 enum direction
10566 ia64_hpux_function_arg_padding (machine_mode mode, const_tree type)
10568 /* Exception to normal case for structures/unions/etc. */
10570 if (type && AGGREGATE_TYPE_P (type)
10571 && int_size_in_bytes (type) < UNITS_PER_WORD)
10572 return upward;
10574 /* Fall back to the default. */
10575 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
10578 /* Emit text to declare externally defined variables and functions, because
10579 the Intel assembler does not support undefined externals. */
10581 void
10582 ia64_asm_output_external (FILE *file, tree decl, const char *name)
10584 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
10585 set in order to avoid putting out names that are never really
10586 used. */
10587 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
10589 /* maybe_assemble_visibility will return 1 if the assembler
10590 visibility directive is output. */
10591 int need_visibility = ((*targetm.binds_local_p) (decl)
10592 && maybe_assemble_visibility (decl));
10594 /* GNU as does not need anything here, but the HP linker does
10595 need something for external functions. */
10596 if ((TARGET_HPUX_LD || !TARGET_GNU_AS)
10597 && TREE_CODE (decl) == FUNCTION_DECL)
10598 (*targetm.asm_out.globalize_decl_name) (file, decl);
10599 else if (need_visibility && !TARGET_GNU_AS)
10600 (*targetm.asm_out.globalize_label) (file, name);
10604 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
10605 modes of word_mode and larger. Rename the TFmode libfuncs using the
10606 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
10607 backward compatibility. */
10609 static void
10610 ia64_init_libfuncs (void)
10612 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
10613 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
10614 set_optab_libfunc (smod_optab, SImode, "__modsi3");
10615 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
10617 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
10618 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
10619 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
10620 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
10621 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
10623 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
10624 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
10625 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
10626 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
10627 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
10628 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
10630 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
10631 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
10632 set_conv_libfunc (sfix_optab, TImode, TFmode, "_U_Qfcnvfxt_quad_to_quad");
10633 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
10634 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
10636 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
10637 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
10638 set_conv_libfunc (sfloat_optab, TFmode, TImode, "_U_Qfcnvxf_quad_to_quad");
10639 /* HP-UX 11.23 libc does not have a function for unsigned
10640 SImode-to-TFmode conversion. */
10641 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxuf_dbl_to_quad");
10644 /* Rename all the TFmode libfuncs using the HPUX conventions. */
10646 static void
10647 ia64_hpux_init_libfuncs (void)
10649 ia64_init_libfuncs ();
10651 /* The HP SI millicode division and mod functions expect DI arguments.
10652 By turning them off completely we avoid using both libgcc and the
10653 non-standard millicode routines and use the HP DI millicode routines
10654 instead. */
10656 set_optab_libfunc (sdiv_optab, SImode, 0);
10657 set_optab_libfunc (udiv_optab, SImode, 0);
10658 set_optab_libfunc (smod_optab, SImode, 0);
10659 set_optab_libfunc (umod_optab, SImode, 0);
10661 set_optab_libfunc (sdiv_optab, DImode, "__milli_divI");
10662 set_optab_libfunc (udiv_optab, DImode, "__milli_divU");
10663 set_optab_libfunc (smod_optab, DImode, "__milli_remI");
10664 set_optab_libfunc (umod_optab, DImode, "__milli_remU");
10666 /* HP-UX libc has TF min/max/abs routines in it. */
10667 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
10668 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
10669 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
10671 /* ia64_expand_compare uses this. */
10672 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
10674 /* These should never be used. */
10675 set_optab_libfunc (eq_optab, TFmode, 0);
10676 set_optab_libfunc (ne_optab, TFmode, 0);
10677 set_optab_libfunc (gt_optab, TFmode, 0);
10678 set_optab_libfunc (ge_optab, TFmode, 0);
10679 set_optab_libfunc (lt_optab, TFmode, 0);
10680 set_optab_libfunc (le_optab, TFmode, 0);
10683 /* Rename the division and modulus functions in VMS. */
10685 static void
10686 ia64_vms_init_libfuncs (void)
10688 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10689 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10690 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10691 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10692 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10693 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10694 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10695 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10696 #ifdef MEM_LIBFUNCS_INIT
10697 MEM_LIBFUNCS_INIT;
10698 #endif
10701 /* Rename the TFmode libfuncs available from soft-fp in glibc using
10702 the HPUX conventions. */
10704 static void
10705 ia64_sysv4_init_libfuncs (void)
10707 ia64_init_libfuncs ();
10709 /* These functions are not part of the HPUX TFmode interface. We
10710 use them instead of _U_Qfcmp, which doesn't work the way we
10711 expect. */
10712 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
10713 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
10714 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
10715 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
10716 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
10717 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
10719 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
10720 glibc doesn't have them. */
10723 /* Use soft-fp. */
10725 static void
10726 ia64_soft_fp_init_libfuncs (void)
10730 static bool
10731 ia64_vms_valid_pointer_mode (machine_mode mode)
10733 return (mode == SImode || mode == DImode);
10736 /* For HPUX, it is illegal to have relocations in shared segments. */
10738 static int
10739 ia64_hpux_reloc_rw_mask (void)
10741 return 3;
10744 /* For others, relax this so that relocations to local data goes in
10745 read-only segments, but we still cannot allow global relocations
10746 in read-only segments. */
10748 static int
10749 ia64_reloc_rw_mask (void)
10751 return flag_pic ? 3 : 2;
10754 /* Return the section to use for X. The only special thing we do here
10755 is to honor small data. */
10757 static section *
10758 ia64_select_rtx_section (machine_mode mode, rtx x,
10759 unsigned HOST_WIDE_INT align)
10761 if (GET_MODE_SIZE (mode) > 0
10762 && GET_MODE_SIZE (mode) <= ia64_section_threshold
10763 && !TARGET_NO_SDATA)
10764 return sdata_section;
10765 else
10766 return default_elf_select_rtx_section (mode, x, align);
10769 static unsigned int
10770 ia64_section_type_flags (tree decl, const char *name, int reloc)
10772 unsigned int flags = 0;
10774 if (strcmp (name, ".sdata") == 0
10775 || strncmp (name, ".sdata.", 7) == 0
10776 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
10777 || strncmp (name, ".sdata2.", 8) == 0
10778 || strncmp (name, ".gnu.linkonce.s2.", 17) == 0
10779 || strcmp (name, ".sbss") == 0
10780 || strncmp (name, ".sbss.", 6) == 0
10781 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
10782 flags = SECTION_SMALL;
10784 flags |= default_section_type_flags (decl, name, reloc);
10785 return flags;
10788 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
10789 structure type and that the address of that type should be passed
10790 in out0, rather than in r8. */
10792 static bool
10793 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
10795 tree ret_type = TREE_TYPE (fntype);
10797 /* The Itanium C++ ABI requires that out0, rather than r8, be used
10798 as the structure return address parameter, if the return value
10799 type has a non-trivial copy constructor or destructor. It is not
10800 clear if this same convention should be used for other
10801 programming languages. Until G++ 3.4, we incorrectly used r8 for
10802 these return values. */
10803 return (abi_version_at_least (2)
10804 && ret_type
10805 && TYPE_MODE (ret_type) == BLKmode
10806 && TREE_ADDRESSABLE (ret_type)
10807 && lang_GNU_CXX ());
10810 /* Output the assembler code for a thunk function. THUNK_DECL is the
10811 declaration for the thunk function itself, FUNCTION is the decl for
10812 the target function. DELTA is an immediate constant offset to be
10813 added to THIS. If VCALL_OFFSET is nonzero, the word at
10814 *(*this + vcall_offset) should be added to THIS. */
10816 static void
10817 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
10818 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
10819 tree function)
10821 rtx this_rtx, funexp;
10822 rtx_insn *insn;
10823 unsigned int this_parmno;
10824 unsigned int this_regno;
10825 rtx delta_rtx;
10827 reload_completed = 1;
10828 epilogue_completed = 1;
10830 /* Set things up as ia64_expand_prologue might. */
10831 last_scratch_gr_reg = 15;
10833 memset (&current_frame_info, 0, sizeof (current_frame_info));
10834 current_frame_info.spill_cfa_off = -16;
10835 current_frame_info.n_input_regs = 1;
10836 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
10838 /* Mark the end of the (empty) prologue. */
10839 emit_note (NOTE_INSN_PROLOGUE_END);
10841 /* Figure out whether "this" will be the first parameter (the
10842 typical case) or the second parameter (as happens when the
10843 virtual function returns certain class objects). */
10844 this_parmno
10845 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
10846 ? 1 : 0);
10847 this_regno = IN_REG (this_parmno);
10848 if (!TARGET_REG_NAMES)
10849 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
10851 this_rtx = gen_rtx_REG (Pmode, this_regno);
10853 /* Apply the constant offset, if required. */
10854 delta_rtx = GEN_INT (delta);
10855 if (TARGET_ILP32)
10857 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
10858 REG_POINTER (tmp) = 1;
10859 if (delta && satisfies_constraint_I (delta_rtx))
10861 emit_insn (gen_ptr_extend_plus_imm (this_rtx, tmp, delta_rtx));
10862 delta = 0;
10864 else
10865 emit_insn (gen_ptr_extend (this_rtx, tmp));
10867 if (delta)
10869 if (!satisfies_constraint_I (delta_rtx))
10871 rtx tmp = gen_rtx_REG (Pmode, 2);
10872 emit_move_insn (tmp, delta_rtx);
10873 delta_rtx = tmp;
10875 emit_insn (gen_adddi3 (this_rtx, this_rtx, delta_rtx));
10878 /* Apply the offset from the vtable, if required. */
10879 if (vcall_offset)
10881 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
10882 rtx tmp = gen_rtx_REG (Pmode, 2);
10884 if (TARGET_ILP32)
10886 rtx t = gen_rtx_REG (ptr_mode, 2);
10887 REG_POINTER (t) = 1;
10888 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this_rtx));
10889 if (satisfies_constraint_I (vcall_offset_rtx))
10891 emit_insn (gen_ptr_extend_plus_imm (tmp, t, vcall_offset_rtx));
10892 vcall_offset = 0;
10894 else
10895 emit_insn (gen_ptr_extend (tmp, t));
10897 else
10898 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
10900 if (vcall_offset)
10902 if (!satisfies_constraint_J (vcall_offset_rtx))
10904 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
10905 emit_move_insn (tmp2, vcall_offset_rtx);
10906 vcall_offset_rtx = tmp2;
10908 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
10911 if (TARGET_ILP32)
10912 emit_insn (gen_zero_extendsidi2 (tmp, gen_rtx_MEM (ptr_mode, tmp)));
10913 else
10914 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
10916 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
10919 /* Generate a tail call to the target function. */
10920 if (! TREE_USED (function))
10922 assemble_external (function);
10923 TREE_USED (function) = 1;
10925 funexp = XEXP (DECL_RTL (function), 0);
10926 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
10927 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
10928 insn = get_last_insn ();
10929 SIBLING_CALL_P (insn) = 1;
10931 /* Code generation for calls relies on splitting. */
10932 reload_completed = 1;
10933 epilogue_completed = 1;
10934 try_split (PATTERN (insn), insn, 0);
10936 emit_barrier ();
10938 /* Run just enough of rest_of_compilation to get the insns emitted.
10939 There's not really enough bulk here to make other passes such as
10940 instruction scheduling worth while. Note that use_thunk calls
10941 assemble_start_function and assemble_end_function. */
10943 emit_all_insn_group_barriers (NULL);
10944 insn = get_insns ();
10945 shorten_branches (insn);
10946 final_start_function (insn, file, 1);
10947 final (insn, file, 1);
10948 final_end_function ();
10950 reload_completed = 0;
10951 epilogue_completed = 0;
10954 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
10956 static rtx
10957 ia64_struct_value_rtx (tree fntype,
10958 int incoming ATTRIBUTE_UNUSED)
10960 if (TARGET_ABI_OPEN_VMS ||
10961 (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype)))
10962 return NULL_RTX;
10963 return gen_rtx_REG (Pmode, GR_REG (8));
10966 static bool
10967 ia64_scalar_mode_supported_p (machine_mode mode)
10969 switch (mode)
10971 case E_QImode:
10972 case E_HImode:
10973 case E_SImode:
10974 case E_DImode:
10975 case E_TImode:
10976 return true;
10978 case E_SFmode:
10979 case E_DFmode:
10980 case E_XFmode:
10981 case E_RFmode:
10982 return true;
10984 case E_TFmode:
10985 return true;
10987 default:
10988 return false;
10992 static bool
10993 ia64_vector_mode_supported_p (machine_mode mode)
10995 switch (mode)
10997 case E_V8QImode:
10998 case E_V4HImode:
10999 case E_V2SImode:
11000 return true;
11002 case E_V2SFmode:
11003 return true;
11005 default:
11006 return false;
11010 /* Implement the FUNCTION_PROFILER macro. */
11012 void
11013 ia64_output_function_profiler (FILE *file, int labelno)
11015 bool indirect_call;
11017 /* If the function needs a static chain and the static chain
11018 register is r15, we use an indirect call so as to bypass
11019 the PLT stub in case the executable is dynamically linked,
11020 because the stub clobbers r15 as per 5.3.6 of the psABI.
11021 We don't need to do that in non canonical PIC mode. */
11023 if (cfun->static_chain_decl && !TARGET_NO_PIC && !TARGET_AUTO_PIC)
11025 gcc_assert (STATIC_CHAIN_REGNUM == 15);
11026 indirect_call = true;
11028 else
11029 indirect_call = false;
11031 if (TARGET_GNU_AS)
11032 fputs ("\t.prologue 4, r40\n", file);
11033 else
11034 fputs ("\t.prologue\n\t.save ar.pfs, r40\n", file);
11035 fputs ("\talloc out0 = ar.pfs, 8, 0, 4, 0\n", file);
11037 if (NO_PROFILE_COUNTERS)
11038 fputs ("\tmov out3 = r0\n", file);
11039 else
11041 char buf[20];
11042 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
11044 if (TARGET_AUTO_PIC)
11045 fputs ("\tmovl out3 = @gprel(", file);
11046 else
11047 fputs ("\taddl out3 = @ltoff(", file);
11048 assemble_name (file, buf);
11049 if (TARGET_AUTO_PIC)
11050 fputs (")\n", file);
11051 else
11052 fputs ("), r1\n", file);
11055 if (indirect_call)
11056 fputs ("\taddl r14 = @ltoff(@fptr(_mcount)), r1\n", file);
11057 fputs ("\t;;\n", file);
11059 fputs ("\t.save rp, r42\n", file);
11060 fputs ("\tmov out2 = b0\n", file);
11061 if (indirect_call)
11062 fputs ("\tld8 r14 = [r14]\n\t;;\n", file);
11063 fputs ("\t.body\n", file);
11064 fputs ("\tmov out1 = r1\n", file);
11065 if (indirect_call)
11067 fputs ("\tld8 r16 = [r14], 8\n\t;;\n", file);
11068 fputs ("\tmov b6 = r16\n", file);
11069 fputs ("\tld8 r1 = [r14]\n", file);
11070 fputs ("\tbr.call.sptk.many b0 = b6\n\t;;\n", file);
11072 else
11073 fputs ("\tbr.call.sptk.many b0 = _mcount\n\t;;\n", file);
11076 static GTY(()) rtx mcount_func_rtx;
11077 static rtx
11078 gen_mcount_func_rtx (void)
11080 if (!mcount_func_rtx)
11081 mcount_func_rtx = init_one_libfunc ("_mcount");
11082 return mcount_func_rtx;
11085 void
11086 ia64_profile_hook (int labelno)
11088 rtx label, ip;
11090 if (NO_PROFILE_COUNTERS)
11091 label = const0_rtx;
11092 else
11094 char buf[30];
11095 const char *label_name;
11096 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
11097 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
11098 label = gen_rtx_SYMBOL_REF (Pmode, label_name);
11099 SYMBOL_REF_FLAGS (label) = SYMBOL_FLAG_LOCAL;
11101 ip = gen_reg_rtx (Pmode);
11102 emit_insn (gen_ip_value (ip));
11103 emit_library_call (gen_mcount_func_rtx (), LCT_NORMAL,
11104 VOIDmode, 3,
11105 gen_rtx_REG (Pmode, BR_REG (0)), Pmode,
11106 ip, Pmode,
11107 label, Pmode);
11110 /* Return the mangling of TYPE if it is an extended fundamental type. */
11112 static const char *
11113 ia64_mangle_type (const_tree type)
11115 type = TYPE_MAIN_VARIANT (type);
11117 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
11118 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
11119 return NULL;
11121 /* On HP-UX, "long double" is mangled as "e" so __float128 is
11122 mangled as "e". */
11123 if (!TARGET_HPUX && TYPE_MODE (type) == TFmode)
11124 return "g";
11125 /* On HP-UX, "e" is not available as a mangling of __float80 so use
11126 an extended mangling. Elsewhere, "e" is available since long
11127 double is 80 bits. */
11128 if (TYPE_MODE (type) == XFmode)
11129 return TARGET_HPUX ? "u9__float80" : "e";
11130 if (TYPE_MODE (type) == RFmode)
11131 return "u7__fpreg";
11132 return NULL;
11135 /* Return the diagnostic message string if conversion from FROMTYPE to
11136 TOTYPE is not allowed, NULL otherwise. */
11137 static const char *
11138 ia64_invalid_conversion (const_tree fromtype, const_tree totype)
11140 /* Reject nontrivial conversion to or from __fpreg. */
11141 if (TYPE_MODE (fromtype) == RFmode
11142 && TYPE_MODE (totype) != RFmode
11143 && TYPE_MODE (totype) != VOIDmode)
11144 return N_("invalid conversion from %<__fpreg%>");
11145 if (TYPE_MODE (totype) == RFmode
11146 && TYPE_MODE (fromtype) != RFmode)
11147 return N_("invalid conversion to %<__fpreg%>");
11148 return NULL;
11151 /* Return the diagnostic message string if the unary operation OP is
11152 not permitted on TYPE, NULL otherwise. */
11153 static const char *
11154 ia64_invalid_unary_op (int op, const_tree type)
11156 /* Reject operations on __fpreg other than unary + or &. */
11157 if (TYPE_MODE (type) == RFmode
11158 && op != CONVERT_EXPR
11159 && op != ADDR_EXPR)
11160 return N_("invalid operation on %<__fpreg%>");
11161 return NULL;
11164 /* Return the diagnostic message string if the binary operation OP is
11165 not permitted on TYPE1 and TYPE2, NULL otherwise. */
11166 static const char *
11167 ia64_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
11169 /* Reject operations on __fpreg. */
11170 if (TYPE_MODE (type1) == RFmode || TYPE_MODE (type2) == RFmode)
11171 return N_("invalid operation on %<__fpreg%>");
11172 return NULL;
11175 /* HP-UX version_id attribute.
11176 For object foo, if the version_id is set to 1234 put out an alias
11177 of '.alias foo "foo{1234}" We can't use "foo{1234}" in anything
11178 other than an alias statement because it is an illegal symbol name. */
11180 static tree
11181 ia64_handle_version_id_attribute (tree *node ATTRIBUTE_UNUSED,
11182 tree name ATTRIBUTE_UNUSED,
11183 tree args,
11184 int flags ATTRIBUTE_UNUSED,
11185 bool *no_add_attrs)
11187 tree arg = TREE_VALUE (args);
11189 if (TREE_CODE (arg) != STRING_CST)
11191 error("version attribute is not a string");
11192 *no_add_attrs = true;
11193 return NULL_TREE;
11195 return NULL_TREE;
11198 /* Target hook for c_mode_for_suffix. */
11200 static machine_mode
11201 ia64_c_mode_for_suffix (char suffix)
11203 if (suffix == 'q')
11204 return TFmode;
11205 if (suffix == 'w')
11206 return XFmode;
11208 return VOIDmode;
11211 static GTY(()) rtx ia64_dconst_0_5_rtx;
11214 ia64_dconst_0_5 (void)
11216 if (! ia64_dconst_0_5_rtx)
11218 REAL_VALUE_TYPE rv;
11219 real_from_string (&rv, "0.5");
11220 ia64_dconst_0_5_rtx = const_double_from_real_value (rv, DFmode);
11222 return ia64_dconst_0_5_rtx;
11225 static GTY(()) rtx ia64_dconst_0_375_rtx;
11228 ia64_dconst_0_375 (void)
11230 if (! ia64_dconst_0_375_rtx)
11232 REAL_VALUE_TYPE rv;
11233 real_from_string (&rv, "0.375");
11234 ia64_dconst_0_375_rtx = const_double_from_real_value (rv, DFmode);
11236 return ia64_dconst_0_375_rtx;
11239 static machine_mode
11240 ia64_get_reg_raw_mode (int regno)
11242 if (FR_REGNO_P (regno))
11243 return XFmode;
11244 return default_get_reg_raw_mode(regno);
11247 /* Implement TARGET_MEMBER_TYPE_FORCES_BLK. ??? Might not be needed
11248 anymore. */
11250 bool
11251 ia64_member_type_forces_blk (const_tree, machine_mode mode)
11253 return TARGET_HPUX && mode == TFmode;
11256 /* Always default to .text section until HP-UX linker is fixed. */
11258 ATTRIBUTE_UNUSED static section *
11259 ia64_hpux_function_section (tree decl ATTRIBUTE_UNUSED,
11260 enum node_frequency freq ATTRIBUTE_UNUSED,
11261 bool startup ATTRIBUTE_UNUSED,
11262 bool exit ATTRIBUTE_UNUSED)
11264 return NULL;
11267 /* Construct (set target (vec_select op0 (parallel perm))) and
11268 return true if that's a valid instruction in the active ISA. */
11270 static bool
11271 expand_vselect (rtx target, rtx op0, const unsigned char *perm, unsigned nelt)
11273 rtx rperm[MAX_VECT_LEN], x;
11274 unsigned i;
11276 for (i = 0; i < nelt; ++i)
11277 rperm[i] = GEN_INT (perm[i]);
11279 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
11280 x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
11281 x = gen_rtx_SET (target, x);
11283 rtx_insn *insn = emit_insn (x);
11284 if (recog_memoized (insn) < 0)
11286 remove_insn (insn);
11287 return false;
11289 return true;
11292 /* Similar, but generate a vec_concat from op0 and op1 as well. */
11294 static bool
11295 expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
11296 const unsigned char *perm, unsigned nelt)
11298 machine_mode v2mode;
11299 rtx x;
11301 if (!GET_MODE_2XWIDER_MODE (GET_MODE (op0)).exists (&v2mode))
11302 return false;
11303 x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
11304 return expand_vselect (target, x, perm, nelt);
11307 /* Try to expand a no-op permutation. */
11309 static bool
11310 expand_vec_perm_identity (struct expand_vec_perm_d *d)
11312 unsigned i, nelt = d->nelt;
11314 for (i = 0; i < nelt; ++i)
11315 if (d->perm[i] != i)
11316 return false;
11318 if (!d->testing_p)
11319 emit_move_insn (d->target, d->op0);
11321 return true;
11324 /* Try to expand D via a shrp instruction. */
11326 static bool
11327 expand_vec_perm_shrp (struct expand_vec_perm_d *d)
11329 unsigned i, nelt = d->nelt, shift, mask;
11330 rtx tmp, hi, lo;
11332 /* ??? Don't force V2SFmode into the integer registers. */
11333 if (d->vmode == V2SFmode)
11334 return false;
11336 mask = (d->one_operand_p ? nelt - 1 : 2 * nelt - 1);
11338 shift = d->perm[0];
11339 if (BYTES_BIG_ENDIAN && shift > nelt)
11340 return false;
11342 for (i = 1; i < nelt; ++i)
11343 if (d->perm[i] != ((shift + i) & mask))
11344 return false;
11346 if (d->testing_p)
11347 return true;
11349 hi = shift < nelt ? d->op1 : d->op0;
11350 lo = shift < nelt ? d->op0 : d->op1;
11352 shift %= nelt;
11354 shift *= GET_MODE_UNIT_SIZE (d->vmode) * BITS_PER_UNIT;
11356 /* We've eliminated the shift 0 case via expand_vec_perm_identity. */
11357 gcc_assert (IN_RANGE (shift, 1, 63));
11359 /* Recall that big-endian elements are numbered starting at the top of
11360 the register. Ideally we'd have a shift-left-pair. But since we
11361 don't, convert to a shift the other direction. */
11362 if (BYTES_BIG_ENDIAN)
11363 shift = 64 - shift;
11365 tmp = gen_reg_rtx (DImode);
11366 hi = gen_lowpart (DImode, hi);
11367 lo = gen_lowpart (DImode, lo);
11368 emit_insn (gen_shrp (tmp, hi, lo, GEN_INT (shift)));
11370 emit_move_insn (d->target, gen_lowpart (d->vmode, tmp));
11371 return true;
11374 /* Try to instantiate D in a single instruction. */
11376 static bool
11377 expand_vec_perm_1 (struct expand_vec_perm_d *d)
11379 unsigned i, nelt = d->nelt;
11380 unsigned char perm2[MAX_VECT_LEN];
11382 /* Try single-operand selections. */
11383 if (d->one_operand_p)
11385 if (expand_vec_perm_identity (d))
11386 return true;
11387 if (expand_vselect (d->target, d->op0, d->perm, nelt))
11388 return true;
11391 /* Try two operand selections. */
11392 if (expand_vselect_vconcat (d->target, d->op0, d->op1, d->perm, nelt))
11393 return true;
11395 /* Recognize interleave style patterns with reversed operands. */
11396 if (!d->one_operand_p)
11398 for (i = 0; i < nelt; ++i)
11400 unsigned e = d->perm[i];
11401 if (e >= nelt)
11402 e -= nelt;
11403 else
11404 e += nelt;
11405 perm2[i] = e;
11408 if (expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
11409 return true;
11412 if (expand_vec_perm_shrp (d))
11413 return true;
11415 /* ??? Look for deposit-like permutations where most of the result
11416 comes from one vector unchanged and the rest comes from a
11417 sequential hunk of the other vector. */
11419 return false;
11422 /* Pattern match broadcast permutations. */
11424 static bool
11425 expand_vec_perm_broadcast (struct expand_vec_perm_d *d)
11427 unsigned i, elt, nelt = d->nelt;
11428 unsigned char perm2[2];
11429 rtx temp;
11430 bool ok;
11432 if (!d->one_operand_p)
11433 return false;
11435 elt = d->perm[0];
11436 for (i = 1; i < nelt; ++i)
11437 if (d->perm[i] != elt)
11438 return false;
11440 switch (d->vmode)
11442 case E_V2SImode:
11443 case E_V2SFmode:
11444 /* Implementable by interleave. */
11445 perm2[0] = elt;
11446 perm2[1] = elt + 2;
11447 ok = expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, 2);
11448 gcc_assert (ok);
11449 break;
11451 case E_V8QImode:
11452 /* Implementable by extract + broadcast. */
11453 if (BYTES_BIG_ENDIAN)
11454 elt = 7 - elt;
11455 elt *= BITS_PER_UNIT;
11456 temp = gen_reg_rtx (DImode);
11457 emit_insn (gen_extzv (temp, gen_lowpart (DImode, d->op0),
11458 GEN_INT (8), GEN_INT (elt)));
11459 emit_insn (gen_mux1_brcst_qi (d->target, gen_lowpart (QImode, temp)));
11460 break;
11462 case E_V4HImode:
11463 /* Should have been matched directly by vec_select. */
11464 default:
11465 gcc_unreachable ();
11468 return true;
11471 /* A subroutine of ia64_expand_vec_perm_const_1. Try to simplify a
11472 two vector permutation into a single vector permutation by using
11473 an interleave operation to merge the vectors. */
11475 static bool
11476 expand_vec_perm_interleave_2 (struct expand_vec_perm_d *d)
11478 struct expand_vec_perm_d dremap, dfinal;
11479 unsigned char remap[2 * MAX_VECT_LEN];
11480 unsigned contents, i, nelt, nelt2;
11481 unsigned h0, h1, h2, h3;
11482 rtx_insn *seq;
11483 bool ok;
11485 if (d->one_operand_p)
11486 return false;
11488 nelt = d->nelt;
11489 nelt2 = nelt / 2;
11491 /* Examine from whence the elements come. */
11492 contents = 0;
11493 for (i = 0; i < nelt; ++i)
11494 contents |= 1u << d->perm[i];
11496 memset (remap, 0xff, sizeof (remap));
11497 dremap = *d;
11499 h0 = (1u << nelt2) - 1;
11500 h1 = h0 << nelt2;
11501 h2 = h0 << nelt;
11502 h3 = h0 << (nelt + nelt2);
11504 if ((contents & (h0 | h2)) == contents) /* punpck even halves */
11506 for (i = 0; i < nelt; ++i)
11508 unsigned which = i / 2 + (i & 1 ? nelt : 0);
11509 remap[which] = i;
11510 dremap.perm[i] = which;
11513 else if ((contents & (h1 | h3)) == contents) /* punpck odd halves */
11515 for (i = 0; i < nelt; ++i)
11517 unsigned which = i / 2 + nelt2 + (i & 1 ? nelt : 0);
11518 remap[which] = i;
11519 dremap.perm[i] = which;
11522 else if ((contents & 0x5555) == contents) /* mix even elements */
11524 for (i = 0; i < nelt; ++i)
11526 unsigned which = (i & ~1) + (i & 1 ? nelt : 0);
11527 remap[which] = i;
11528 dremap.perm[i] = which;
11531 else if ((contents & 0xaaaa) == contents) /* mix odd elements */
11533 for (i = 0; i < nelt; ++i)
11535 unsigned which = (i | 1) + (i & 1 ? nelt : 0);
11536 remap[which] = i;
11537 dremap.perm[i] = which;
11540 else if (floor_log2 (contents) - ctz_hwi (contents) < (int)nelt) /* shrp */
11542 unsigned shift = ctz_hwi (contents);
11543 for (i = 0; i < nelt; ++i)
11545 unsigned which = (i + shift) & (2 * nelt - 1);
11546 remap[which] = i;
11547 dremap.perm[i] = which;
11550 else
11551 return false;
11553 /* Use the remapping array set up above to move the elements from their
11554 swizzled locations into their final destinations. */
11555 dfinal = *d;
11556 for (i = 0; i < nelt; ++i)
11558 unsigned e = remap[d->perm[i]];
11559 gcc_assert (e < nelt);
11560 dfinal.perm[i] = e;
11562 if (d->testing_p)
11563 dfinal.op0 = gen_raw_REG (dfinal.vmode, LAST_VIRTUAL_REGISTER + 1);
11564 else
11565 dfinal.op0 = gen_reg_rtx (dfinal.vmode);
11566 dfinal.op1 = dfinal.op0;
11567 dfinal.one_operand_p = true;
11568 dremap.target = dfinal.op0;
11570 /* Test if the final remap can be done with a single insn. For V4HImode
11571 this *will* succeed. For V8QImode or V2SImode it may not. */
11572 start_sequence ();
11573 ok = expand_vec_perm_1 (&dfinal);
11574 seq = get_insns ();
11575 end_sequence ();
11576 if (!ok)
11577 return false;
11578 if (d->testing_p)
11579 return true;
11581 ok = expand_vec_perm_1 (&dremap);
11582 gcc_assert (ok);
11584 emit_insn (seq);
11585 return true;
11588 /* A subroutine of ia64_expand_vec_perm_const_1. Emit a full V4HImode
11589 constant permutation via two mux2 and a merge. */
11591 static bool
11592 expand_vec_perm_v4hi_5 (struct expand_vec_perm_d *d)
11594 unsigned char perm2[4];
11595 rtx rmask[4];
11596 unsigned i;
11597 rtx t0, t1, mask, x;
11598 bool ok;
11600 if (d->vmode != V4HImode || d->one_operand_p)
11601 return false;
11602 if (d->testing_p)
11603 return true;
11605 for (i = 0; i < 4; ++i)
11607 perm2[i] = d->perm[i] & 3;
11608 rmask[i] = (d->perm[i] & 4 ? const0_rtx : constm1_rtx);
11610 mask = gen_rtx_CONST_VECTOR (V4HImode, gen_rtvec_v (4, rmask));
11611 mask = force_reg (V4HImode, mask);
11613 t0 = gen_reg_rtx (V4HImode);
11614 t1 = gen_reg_rtx (V4HImode);
11616 ok = expand_vselect (t0, d->op0, perm2, 4);
11617 gcc_assert (ok);
11618 ok = expand_vselect (t1, d->op1, perm2, 4);
11619 gcc_assert (ok);
11621 x = gen_rtx_AND (V4HImode, mask, t0);
11622 emit_insn (gen_rtx_SET (t0, x));
11624 x = gen_rtx_NOT (V4HImode, mask);
11625 x = gen_rtx_AND (V4HImode, x, t1);
11626 emit_insn (gen_rtx_SET (t1, x));
11628 x = gen_rtx_IOR (V4HImode, t0, t1);
11629 emit_insn (gen_rtx_SET (d->target, x));
11631 return true;
11634 /* The guts of ia64_expand_vec_perm_const, also used by the ok hook.
11635 With all of the interface bits taken care of, perform the expansion
11636 in D and return true on success. */
11638 static bool
11639 ia64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
11641 if (expand_vec_perm_1 (d))
11642 return true;
11643 if (expand_vec_perm_broadcast (d))
11644 return true;
11645 if (expand_vec_perm_interleave_2 (d))
11646 return true;
11647 if (expand_vec_perm_v4hi_5 (d))
11648 return true;
11649 return false;
11652 bool
11653 ia64_expand_vec_perm_const (rtx operands[4])
11655 struct expand_vec_perm_d d;
11656 unsigned char perm[MAX_VECT_LEN];
11657 int i, nelt, which;
11658 rtx sel;
11660 d.target = operands[0];
11661 d.op0 = operands[1];
11662 d.op1 = operands[2];
11663 sel = operands[3];
11665 d.vmode = GET_MODE (d.target);
11666 gcc_assert (VECTOR_MODE_P (d.vmode));
11667 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
11668 d.testing_p = false;
11670 gcc_assert (GET_CODE (sel) == CONST_VECTOR);
11671 gcc_assert (XVECLEN (sel, 0) == nelt);
11672 gcc_checking_assert (sizeof (d.perm) == sizeof (perm));
11674 for (i = which = 0; i < nelt; ++i)
11676 rtx e = XVECEXP (sel, 0, i);
11677 int ei = INTVAL (e) & (2 * nelt - 1);
11679 which |= (ei < nelt ? 1 : 2);
11680 d.perm[i] = ei;
11681 perm[i] = ei;
11684 switch (which)
11686 default:
11687 gcc_unreachable();
11689 case 3:
11690 if (!rtx_equal_p (d.op0, d.op1))
11692 d.one_operand_p = false;
11693 break;
11696 /* The elements of PERM do not suggest that only the first operand
11697 is used, but both operands are identical. Allow easier matching
11698 of the permutation by folding the permutation into the single
11699 input vector. */
11700 for (i = 0; i < nelt; ++i)
11701 if (d.perm[i] >= nelt)
11702 d.perm[i] -= nelt;
11703 /* FALLTHRU */
11705 case 1:
11706 d.op1 = d.op0;
11707 d.one_operand_p = true;
11708 break;
11710 case 2:
11711 for (i = 0; i < nelt; ++i)
11712 d.perm[i] -= nelt;
11713 d.op0 = d.op1;
11714 d.one_operand_p = true;
11715 break;
11718 if (ia64_expand_vec_perm_const_1 (&d))
11719 return true;
11721 /* If the mask says both arguments are needed, but they are the same,
11722 the above tried to expand with one_operand_p true. If that didn't
11723 work, retry with one_operand_p false, as that's what we used in _ok. */
11724 if (which == 3 && d.one_operand_p)
11726 memcpy (d.perm, perm, sizeof (perm));
11727 d.one_operand_p = false;
11728 return ia64_expand_vec_perm_const_1 (&d);
11731 return false;
11734 /* Implement targetm.vectorize.vec_perm_const_ok. */
11736 static bool
11737 ia64_vectorize_vec_perm_const_ok (machine_mode vmode,
11738 const unsigned char *sel)
11740 struct expand_vec_perm_d d;
11741 unsigned int i, nelt, which;
11742 bool ret;
11744 d.vmode = vmode;
11745 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
11746 d.testing_p = true;
11748 /* Extract the values from the vector CST into the permutation
11749 array in D. */
11750 memcpy (d.perm, sel, nelt);
11751 for (i = which = 0; i < nelt; ++i)
11753 unsigned char e = d.perm[i];
11754 gcc_assert (e < 2 * nelt);
11755 which |= (e < nelt ? 1 : 2);
11758 /* For all elements from second vector, fold the elements to first. */
11759 if (which == 2)
11760 for (i = 0; i < nelt; ++i)
11761 d.perm[i] -= nelt;
11763 /* Check whether the mask can be applied to the vector type. */
11764 d.one_operand_p = (which != 3);
11766 /* Otherwise we have to go through the motions and see if we can
11767 figure out how to generate the requested permutation. */
11768 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
11769 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
11770 if (!d.one_operand_p)
11771 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
11773 start_sequence ();
11774 ret = ia64_expand_vec_perm_const_1 (&d);
11775 end_sequence ();
11777 return ret;
11780 void
11781 ia64_expand_vec_setv2sf (rtx operands[3])
11783 struct expand_vec_perm_d d;
11784 unsigned int which;
11785 bool ok;
11787 d.target = operands[0];
11788 d.op0 = operands[0];
11789 d.op1 = gen_reg_rtx (V2SFmode);
11790 d.vmode = V2SFmode;
11791 d.nelt = 2;
11792 d.one_operand_p = false;
11793 d.testing_p = false;
11795 which = INTVAL (operands[2]);
11796 gcc_assert (which <= 1);
11797 d.perm[0] = 1 - which;
11798 d.perm[1] = which + 2;
11800 emit_insn (gen_fpack (d.op1, operands[1], CONST0_RTX (SFmode)));
11802 ok = ia64_expand_vec_perm_const_1 (&d);
11803 gcc_assert (ok);
11806 void
11807 ia64_expand_vec_perm_even_odd (rtx target, rtx op0, rtx op1, int odd)
11809 struct expand_vec_perm_d d;
11810 machine_mode vmode = GET_MODE (target);
11811 unsigned int i, nelt = GET_MODE_NUNITS (vmode);
11812 bool ok;
11814 d.target = target;
11815 d.op0 = op0;
11816 d.op1 = op1;
11817 d.vmode = vmode;
11818 d.nelt = nelt;
11819 d.one_operand_p = false;
11820 d.testing_p = false;
11822 for (i = 0; i < nelt; ++i)
11823 d.perm[i] = i * 2 + odd;
11825 ok = ia64_expand_vec_perm_const_1 (&d);
11826 gcc_assert (ok);
11829 #include "gt-ia64.h"