gcc/ada/
[official-gcc.git] / gcc / config / ia64 / ia64.c
blobbeb9e606c056e40ffa94ce637a6688e2eab3e63c
1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999-2014 Free Software Foundation, Inc.
3 Contributed by James E. Wilson <wilson@cygnus.com> and
4 David Mosberger <davidm@hpl.hp.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "stringpool.h"
29 #include "stor-layout.h"
30 #include "calls.h"
31 #include "varasm.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "conditions.h"
36 #include "output.h"
37 #include "insn-attr.h"
38 #include "flags.h"
39 #include "recog.h"
40 #include "expr.h"
41 #include "optabs.h"
42 #include "except.h"
43 #include "hashtab.h"
44 #include "hash-set.h"
45 #include "vec.h"
46 #include "machmode.h"
47 #include "input.h"
48 #include "function.h"
49 #include "ggc.h"
50 #include "predict.h"
51 #include "dominance.h"
52 #include "cfg.h"
53 #include "cfgrtl.h"
54 #include "cfganal.h"
55 #include "lcm.h"
56 #include "cfgbuild.h"
57 #include "cfgcleanup.h"
58 #include "basic-block.h"
59 #include "libfuncs.h"
60 #include "diagnostic-core.h"
61 #include "sched-int.h"
62 #include "timevar.h"
63 #include "target.h"
64 #include "target-def.h"
65 #include "common/common-target.h"
66 #include "tm_p.h"
67 #include "hash-table.h"
68 #include "langhooks.h"
69 #include "tree-ssa-alias.h"
70 #include "internal-fn.h"
71 #include "gimple-fold.h"
72 #include "tree-eh.h"
73 #include "gimple-expr.h"
74 #include "is-a.h"
75 #include "gimple.h"
76 #include "gimplify.h"
77 #include "intl.h"
78 #include "df.h"
79 #include "debug.h"
80 #include "params.h"
81 #include "dbgcnt.h"
82 #include "tm-constrs.h"
83 #include "sel-sched.h"
84 #include "reload.h"
85 #include "opts.h"
86 #include "dumpfile.h"
87 #include "builtins.h"
89 /* This is used for communication between ASM_OUTPUT_LABEL and
90 ASM_OUTPUT_LABELREF. */
91 int ia64_asm_output_label = 0;
93 /* Register names for ia64_expand_prologue. */
94 static const char * const ia64_reg_numbers[96] =
95 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
96 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
97 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
98 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
99 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
100 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
101 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
102 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
103 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
104 "r104","r105","r106","r107","r108","r109","r110","r111",
105 "r112","r113","r114","r115","r116","r117","r118","r119",
106 "r120","r121","r122","r123","r124","r125","r126","r127"};
108 /* ??? These strings could be shared with REGISTER_NAMES. */
109 static const char * const ia64_input_reg_names[8] =
110 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
112 /* ??? These strings could be shared with REGISTER_NAMES. */
113 static const char * const ia64_local_reg_names[80] =
114 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
115 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
116 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
117 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
118 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
119 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
120 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
121 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
122 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
123 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
125 /* ??? These strings could be shared with REGISTER_NAMES. */
126 static const char * const ia64_output_reg_names[8] =
127 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
129 /* Variables which are this size or smaller are put in the sdata/sbss
130 sections. */
132 unsigned int ia64_section_threshold;
134 /* The following variable is used by the DFA insn scheduler. The value is
135 TRUE if we do insn bundling instead of insn scheduling. */
136 int bundling_p = 0;
138 enum ia64_frame_regs
140 reg_fp,
141 reg_save_b0,
142 reg_save_pr,
143 reg_save_ar_pfs,
144 reg_save_ar_unat,
145 reg_save_ar_lc,
146 reg_save_gp,
147 number_of_ia64_frame_regs
150 /* Structure to be filled in by ia64_compute_frame_size with register
151 save masks and offsets for the current function. */
153 struct ia64_frame_info
155 HOST_WIDE_INT total_size; /* size of the stack frame, not including
156 the caller's scratch area. */
157 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
158 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
159 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
160 HARD_REG_SET mask; /* mask of saved registers. */
161 unsigned int gr_used_mask; /* mask of registers in use as gr spill
162 registers or long-term scratches. */
163 int n_spilled; /* number of spilled registers. */
164 int r[number_of_ia64_frame_regs]; /* Frame related registers. */
165 int n_input_regs; /* number of input registers used. */
166 int n_local_regs; /* number of local registers used. */
167 int n_output_regs; /* number of output registers used. */
168 int n_rotate_regs; /* number of rotating registers used. */
170 char need_regstk; /* true if a .regstk directive needed. */
171 char initialized; /* true if the data is finalized. */
174 /* Current frame information calculated by ia64_compute_frame_size. */
175 static struct ia64_frame_info current_frame_info;
176 /* The actual registers that are emitted. */
177 static int emitted_frame_related_regs[number_of_ia64_frame_regs];
179 static int ia64_first_cycle_multipass_dfa_lookahead (void);
180 static void ia64_dependencies_evaluation_hook (rtx_insn *, rtx_insn *);
181 static void ia64_init_dfa_pre_cycle_insn (void);
182 static rtx ia64_dfa_pre_cycle_insn (void);
183 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx_insn *, int);
184 static int ia64_dfa_new_cycle (FILE *, int, rtx_insn *, int, int, int *);
185 static void ia64_h_i_d_extended (void);
186 static void * ia64_alloc_sched_context (void);
187 static void ia64_init_sched_context (void *, bool);
188 static void ia64_set_sched_context (void *);
189 static void ia64_clear_sched_context (void *);
190 static void ia64_free_sched_context (void *);
191 static int ia64_mode_to_int (machine_mode);
192 static void ia64_set_sched_flags (spec_info_t);
193 static ds_t ia64_get_insn_spec_ds (rtx_insn *);
194 static ds_t ia64_get_insn_checked_ds (rtx_insn *);
195 static bool ia64_skip_rtx_p (const_rtx);
196 static int ia64_speculate_insn (rtx_insn *, ds_t, rtx *);
197 static bool ia64_needs_block_p (ds_t);
198 static rtx ia64_gen_spec_check (rtx_insn *, rtx_insn *, ds_t);
199 static int ia64_spec_check_p (rtx);
200 static int ia64_spec_check_src_p (rtx);
201 static rtx gen_tls_get_addr (void);
202 static rtx gen_thread_pointer (void);
203 static int find_gr_spill (enum ia64_frame_regs, int);
204 static int next_scratch_gr_reg (void);
205 static void mark_reg_gr_used_mask (rtx, void *);
206 static void ia64_compute_frame_size (HOST_WIDE_INT);
207 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
208 static void finish_spill_pointers (void);
209 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
210 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
211 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
212 static rtx gen_movdi_x (rtx, rtx, rtx);
213 static rtx gen_fr_spill_x (rtx, rtx, rtx);
214 static rtx gen_fr_restore_x (rtx, rtx, rtx);
216 static void ia64_option_override (void);
217 static bool ia64_can_eliminate (const int, const int);
218 static machine_mode hfa_element_mode (const_tree, bool);
219 static void ia64_setup_incoming_varargs (cumulative_args_t, machine_mode,
220 tree, int *, int);
221 static int ia64_arg_partial_bytes (cumulative_args_t, machine_mode,
222 tree, bool);
223 static rtx ia64_function_arg_1 (cumulative_args_t, machine_mode,
224 const_tree, bool, bool);
225 static rtx ia64_function_arg (cumulative_args_t, machine_mode,
226 const_tree, bool);
227 static rtx ia64_function_incoming_arg (cumulative_args_t,
228 machine_mode, const_tree, bool);
229 static void ia64_function_arg_advance (cumulative_args_t, machine_mode,
230 const_tree, bool);
231 static unsigned int ia64_function_arg_boundary (machine_mode,
232 const_tree);
233 static bool ia64_function_ok_for_sibcall (tree, tree);
234 static bool ia64_return_in_memory (const_tree, const_tree);
235 static rtx ia64_function_value (const_tree, const_tree, bool);
236 static rtx ia64_libcall_value (machine_mode, const_rtx);
237 static bool ia64_function_value_regno_p (const unsigned int);
238 static int ia64_register_move_cost (machine_mode, reg_class_t,
239 reg_class_t);
240 static int ia64_memory_move_cost (machine_mode mode, reg_class_t,
241 bool);
242 static bool ia64_rtx_costs (rtx, int, int, int, int *, bool);
243 static int ia64_unspec_may_trap_p (const_rtx, unsigned);
244 static void fix_range (const char *);
245 static struct machine_function * ia64_init_machine_status (void);
246 static void emit_insn_group_barriers (FILE *);
247 static void emit_all_insn_group_barriers (FILE *);
248 static void final_emit_insn_group_barriers (FILE *);
249 static void emit_predicate_relation_info (void);
250 static void ia64_reorg (void);
251 static bool ia64_in_small_data_p (const_tree);
252 static void process_epilogue (FILE *, rtx, bool, bool);
254 static bool ia64_assemble_integer (rtx, unsigned int, int);
255 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
256 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
257 static void ia64_output_function_end_prologue (FILE *);
259 static void ia64_print_operand (FILE *, rtx, int);
260 static void ia64_print_operand_address (FILE *, rtx);
261 static bool ia64_print_operand_punct_valid_p (unsigned char code);
263 static int ia64_issue_rate (void);
264 static int ia64_adjust_cost_2 (rtx_insn *, int, rtx_insn *, int, dw_t);
265 static void ia64_sched_init (FILE *, int, int);
266 static void ia64_sched_init_global (FILE *, int, int);
267 static void ia64_sched_finish_global (FILE *, int);
268 static void ia64_sched_finish (FILE *, int);
269 static int ia64_dfa_sched_reorder (FILE *, int, rtx_insn **, int *, int, int);
270 static int ia64_sched_reorder (FILE *, int, rtx_insn **, int *, int);
271 static int ia64_sched_reorder2 (FILE *, int, rtx_insn **, int *, int);
272 static int ia64_variable_issue (FILE *, int, rtx_insn *, int);
274 static void ia64_asm_unwind_emit (FILE *, rtx_insn *);
275 static void ia64_asm_emit_except_personality (rtx);
276 static void ia64_asm_init_sections (void);
278 static enum unwind_info_type ia64_debug_unwind_info (void);
280 static struct bundle_state *get_free_bundle_state (void);
281 static void free_bundle_state (struct bundle_state *);
282 static void initiate_bundle_states (void);
283 static void finish_bundle_states (void);
284 static int insert_bundle_state (struct bundle_state *);
285 static void initiate_bundle_state_table (void);
286 static void finish_bundle_state_table (void);
287 static int try_issue_nops (struct bundle_state *, int);
288 static int try_issue_insn (struct bundle_state *, rtx);
289 static void issue_nops_and_insn (struct bundle_state *, int, rtx_insn *,
290 int, int);
291 static int get_max_pos (state_t);
292 static int get_template (state_t, int);
294 static rtx_insn *get_next_important_insn (rtx_insn *, rtx_insn *);
295 static bool important_for_bundling_p (rtx_insn *);
296 static bool unknown_for_bundling_p (rtx_insn *);
297 static void bundling (FILE *, int, rtx_insn *, rtx_insn *);
299 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
300 HOST_WIDE_INT, tree);
301 static void ia64_file_start (void);
302 static void ia64_globalize_decl_name (FILE *, tree);
304 static int ia64_hpux_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
305 static int ia64_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
306 static section *ia64_select_rtx_section (machine_mode, rtx,
307 unsigned HOST_WIDE_INT);
308 static void ia64_output_dwarf_dtprel (FILE *, int, rtx)
309 ATTRIBUTE_UNUSED;
310 static unsigned int ia64_section_type_flags (tree, const char *, int);
311 static void ia64_init_libfuncs (void)
312 ATTRIBUTE_UNUSED;
313 static void ia64_hpux_init_libfuncs (void)
314 ATTRIBUTE_UNUSED;
315 static void ia64_sysv4_init_libfuncs (void)
316 ATTRIBUTE_UNUSED;
317 static void ia64_vms_init_libfuncs (void)
318 ATTRIBUTE_UNUSED;
319 static void ia64_soft_fp_init_libfuncs (void)
320 ATTRIBUTE_UNUSED;
321 static bool ia64_vms_valid_pointer_mode (machine_mode mode)
322 ATTRIBUTE_UNUSED;
323 static tree ia64_vms_common_object_attribute (tree *, tree, tree, int, bool *)
324 ATTRIBUTE_UNUSED;
326 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
327 static tree ia64_handle_version_id_attribute (tree *, tree, tree, int, bool *);
328 static void ia64_encode_section_info (tree, rtx, int);
329 static rtx ia64_struct_value_rtx (tree, int);
330 static tree ia64_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
331 static bool ia64_scalar_mode_supported_p (machine_mode mode);
332 static bool ia64_vector_mode_supported_p (machine_mode mode);
333 static bool ia64_libgcc_floating_mode_supported_p (machine_mode mode);
334 static bool ia64_legitimate_constant_p (machine_mode, rtx);
335 static bool ia64_legitimate_address_p (machine_mode, rtx, bool);
336 static bool ia64_cannot_force_const_mem (machine_mode, rtx);
337 static const char *ia64_mangle_type (const_tree);
338 static const char *ia64_invalid_conversion (const_tree, const_tree);
339 static const char *ia64_invalid_unary_op (int, const_tree);
340 static const char *ia64_invalid_binary_op (int, const_tree, const_tree);
341 static machine_mode ia64_c_mode_for_suffix (char);
342 static void ia64_trampoline_init (rtx, tree, rtx);
343 static void ia64_override_options_after_change (void);
344 static bool ia64_member_type_forces_blk (const_tree, machine_mode);
346 static tree ia64_builtin_decl (unsigned, bool);
348 static reg_class_t ia64_preferred_reload_class (rtx, reg_class_t);
349 static machine_mode ia64_get_reg_raw_mode (int regno);
350 static section * ia64_hpux_function_section (tree, enum node_frequency,
351 bool, bool);
353 static bool ia64_vectorize_vec_perm_const_ok (machine_mode vmode,
354 const unsigned char *sel);
356 #define MAX_VECT_LEN 8
358 struct expand_vec_perm_d
360 rtx target, op0, op1;
361 unsigned char perm[MAX_VECT_LEN];
362 machine_mode vmode;
363 unsigned char nelt;
364 bool one_operand_p;
365 bool testing_p;
368 static bool ia64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d);
371 /* Table of valid machine attributes. */
372 static const struct attribute_spec ia64_attribute_table[] =
374 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
375 affects_type_identity } */
376 { "syscall_linkage", 0, 0, false, true, true, NULL, false },
377 { "model", 1, 1, true, false, false, ia64_handle_model_attribute,
378 false },
379 #if TARGET_ABI_OPEN_VMS
380 { "common_object", 1, 1, true, false, false,
381 ia64_vms_common_object_attribute, false },
382 #endif
383 { "version_id", 1, 1, true, false, false,
384 ia64_handle_version_id_attribute, false },
385 { NULL, 0, 0, false, false, false, NULL, false }
388 /* Initialize the GCC target structure. */
389 #undef TARGET_ATTRIBUTE_TABLE
390 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
392 #undef TARGET_INIT_BUILTINS
393 #define TARGET_INIT_BUILTINS ia64_init_builtins
395 #undef TARGET_EXPAND_BUILTIN
396 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
398 #undef TARGET_BUILTIN_DECL
399 #define TARGET_BUILTIN_DECL ia64_builtin_decl
401 #undef TARGET_ASM_BYTE_OP
402 #define TARGET_ASM_BYTE_OP "\tdata1\t"
403 #undef TARGET_ASM_ALIGNED_HI_OP
404 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
405 #undef TARGET_ASM_ALIGNED_SI_OP
406 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
407 #undef TARGET_ASM_ALIGNED_DI_OP
408 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
409 #undef TARGET_ASM_UNALIGNED_HI_OP
410 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
411 #undef TARGET_ASM_UNALIGNED_SI_OP
412 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
413 #undef TARGET_ASM_UNALIGNED_DI_OP
414 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
415 #undef TARGET_ASM_INTEGER
416 #define TARGET_ASM_INTEGER ia64_assemble_integer
418 #undef TARGET_OPTION_OVERRIDE
419 #define TARGET_OPTION_OVERRIDE ia64_option_override
421 #undef TARGET_ASM_FUNCTION_PROLOGUE
422 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
423 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
424 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
425 #undef TARGET_ASM_FUNCTION_EPILOGUE
426 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
428 #undef TARGET_PRINT_OPERAND
429 #define TARGET_PRINT_OPERAND ia64_print_operand
430 #undef TARGET_PRINT_OPERAND_ADDRESS
431 #define TARGET_PRINT_OPERAND_ADDRESS ia64_print_operand_address
432 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
433 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P ia64_print_operand_punct_valid_p
435 #undef TARGET_IN_SMALL_DATA_P
436 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
438 #undef TARGET_SCHED_ADJUST_COST_2
439 #define TARGET_SCHED_ADJUST_COST_2 ia64_adjust_cost_2
440 #undef TARGET_SCHED_ISSUE_RATE
441 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
442 #undef TARGET_SCHED_VARIABLE_ISSUE
443 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
444 #undef TARGET_SCHED_INIT
445 #define TARGET_SCHED_INIT ia64_sched_init
446 #undef TARGET_SCHED_FINISH
447 #define TARGET_SCHED_FINISH ia64_sched_finish
448 #undef TARGET_SCHED_INIT_GLOBAL
449 #define TARGET_SCHED_INIT_GLOBAL ia64_sched_init_global
450 #undef TARGET_SCHED_FINISH_GLOBAL
451 #define TARGET_SCHED_FINISH_GLOBAL ia64_sched_finish_global
452 #undef TARGET_SCHED_REORDER
453 #define TARGET_SCHED_REORDER ia64_sched_reorder
454 #undef TARGET_SCHED_REORDER2
455 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
457 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
458 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
460 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
461 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
463 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
464 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
465 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
466 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
468 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
469 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
470 ia64_first_cycle_multipass_dfa_lookahead_guard
472 #undef TARGET_SCHED_DFA_NEW_CYCLE
473 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
475 #undef TARGET_SCHED_H_I_D_EXTENDED
476 #define TARGET_SCHED_H_I_D_EXTENDED ia64_h_i_d_extended
478 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
479 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT ia64_alloc_sched_context
481 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
482 #define TARGET_SCHED_INIT_SCHED_CONTEXT ia64_init_sched_context
484 #undef TARGET_SCHED_SET_SCHED_CONTEXT
485 #define TARGET_SCHED_SET_SCHED_CONTEXT ia64_set_sched_context
487 #undef TARGET_SCHED_CLEAR_SCHED_CONTEXT
488 #define TARGET_SCHED_CLEAR_SCHED_CONTEXT ia64_clear_sched_context
490 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
491 #define TARGET_SCHED_FREE_SCHED_CONTEXT ia64_free_sched_context
493 #undef TARGET_SCHED_SET_SCHED_FLAGS
494 #define TARGET_SCHED_SET_SCHED_FLAGS ia64_set_sched_flags
496 #undef TARGET_SCHED_GET_INSN_SPEC_DS
497 #define TARGET_SCHED_GET_INSN_SPEC_DS ia64_get_insn_spec_ds
499 #undef TARGET_SCHED_GET_INSN_CHECKED_DS
500 #define TARGET_SCHED_GET_INSN_CHECKED_DS ia64_get_insn_checked_ds
502 #undef TARGET_SCHED_SPECULATE_INSN
503 #define TARGET_SCHED_SPECULATE_INSN ia64_speculate_insn
505 #undef TARGET_SCHED_NEEDS_BLOCK_P
506 #define TARGET_SCHED_NEEDS_BLOCK_P ia64_needs_block_p
508 #undef TARGET_SCHED_GEN_SPEC_CHECK
509 #define TARGET_SCHED_GEN_SPEC_CHECK ia64_gen_spec_check
511 #undef TARGET_SCHED_SKIP_RTX_P
512 #define TARGET_SCHED_SKIP_RTX_P ia64_skip_rtx_p
514 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
515 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
516 #undef TARGET_ARG_PARTIAL_BYTES
517 #define TARGET_ARG_PARTIAL_BYTES ia64_arg_partial_bytes
518 #undef TARGET_FUNCTION_ARG
519 #define TARGET_FUNCTION_ARG ia64_function_arg
520 #undef TARGET_FUNCTION_INCOMING_ARG
521 #define TARGET_FUNCTION_INCOMING_ARG ia64_function_incoming_arg
522 #undef TARGET_FUNCTION_ARG_ADVANCE
523 #define TARGET_FUNCTION_ARG_ADVANCE ia64_function_arg_advance
524 #undef TARGET_FUNCTION_ARG_BOUNDARY
525 #define TARGET_FUNCTION_ARG_BOUNDARY ia64_function_arg_boundary
527 #undef TARGET_ASM_OUTPUT_MI_THUNK
528 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
529 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
530 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
532 #undef TARGET_ASM_FILE_START
533 #define TARGET_ASM_FILE_START ia64_file_start
535 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
536 #define TARGET_ASM_GLOBALIZE_DECL_NAME ia64_globalize_decl_name
538 #undef TARGET_REGISTER_MOVE_COST
539 #define TARGET_REGISTER_MOVE_COST ia64_register_move_cost
540 #undef TARGET_MEMORY_MOVE_COST
541 #define TARGET_MEMORY_MOVE_COST ia64_memory_move_cost
542 #undef TARGET_RTX_COSTS
543 #define TARGET_RTX_COSTS ia64_rtx_costs
544 #undef TARGET_ADDRESS_COST
545 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
547 #undef TARGET_UNSPEC_MAY_TRAP_P
548 #define TARGET_UNSPEC_MAY_TRAP_P ia64_unspec_may_trap_p
550 #undef TARGET_MACHINE_DEPENDENT_REORG
551 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
553 #undef TARGET_ENCODE_SECTION_INFO
554 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
556 #undef TARGET_SECTION_TYPE_FLAGS
557 #define TARGET_SECTION_TYPE_FLAGS ia64_section_type_flags
559 #ifdef HAVE_AS_TLS
560 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
561 #define TARGET_ASM_OUTPUT_DWARF_DTPREL ia64_output_dwarf_dtprel
562 #endif
564 /* ??? Investigate. */
565 #if 0
566 #undef TARGET_PROMOTE_PROTOTYPES
567 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
568 #endif
570 #undef TARGET_FUNCTION_VALUE
571 #define TARGET_FUNCTION_VALUE ia64_function_value
572 #undef TARGET_LIBCALL_VALUE
573 #define TARGET_LIBCALL_VALUE ia64_libcall_value
574 #undef TARGET_FUNCTION_VALUE_REGNO_P
575 #define TARGET_FUNCTION_VALUE_REGNO_P ia64_function_value_regno_p
577 #undef TARGET_STRUCT_VALUE_RTX
578 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
579 #undef TARGET_RETURN_IN_MEMORY
580 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
581 #undef TARGET_SETUP_INCOMING_VARARGS
582 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
583 #undef TARGET_STRICT_ARGUMENT_NAMING
584 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
585 #undef TARGET_MUST_PASS_IN_STACK
586 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
587 #undef TARGET_GET_RAW_RESULT_MODE
588 #define TARGET_GET_RAW_RESULT_MODE ia64_get_reg_raw_mode
589 #undef TARGET_GET_RAW_ARG_MODE
590 #define TARGET_GET_RAW_ARG_MODE ia64_get_reg_raw_mode
592 #undef TARGET_MEMBER_TYPE_FORCES_BLK
593 #define TARGET_MEMBER_TYPE_FORCES_BLK ia64_member_type_forces_blk
595 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
596 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
598 #undef TARGET_ASM_UNWIND_EMIT
599 #define TARGET_ASM_UNWIND_EMIT ia64_asm_unwind_emit
600 #undef TARGET_ASM_EMIT_EXCEPT_PERSONALITY
601 #define TARGET_ASM_EMIT_EXCEPT_PERSONALITY ia64_asm_emit_except_personality
602 #undef TARGET_ASM_INIT_SECTIONS
603 #define TARGET_ASM_INIT_SECTIONS ia64_asm_init_sections
605 #undef TARGET_DEBUG_UNWIND_INFO
606 #define TARGET_DEBUG_UNWIND_INFO ia64_debug_unwind_info
608 #undef TARGET_SCALAR_MODE_SUPPORTED_P
609 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
610 #undef TARGET_VECTOR_MODE_SUPPORTED_P
611 #define TARGET_VECTOR_MODE_SUPPORTED_P ia64_vector_mode_supported_p
613 #undef TARGET_LIBGCC_FLOATING_MODE_SUPPORTED_P
614 #define TARGET_LIBGCC_FLOATING_MODE_SUPPORTED_P \
615 ia64_libgcc_floating_mode_supported_p
617 /* ia64 architecture manual 4.4.7: ... reads, writes, and flushes may occur
618 in an order different from the specified program order. */
619 #undef TARGET_RELAXED_ORDERING
620 #define TARGET_RELAXED_ORDERING true
622 #undef TARGET_LEGITIMATE_CONSTANT_P
623 #define TARGET_LEGITIMATE_CONSTANT_P ia64_legitimate_constant_p
624 #undef TARGET_LEGITIMATE_ADDRESS_P
625 #define TARGET_LEGITIMATE_ADDRESS_P ia64_legitimate_address_p
627 #undef TARGET_CANNOT_FORCE_CONST_MEM
628 #define TARGET_CANNOT_FORCE_CONST_MEM ia64_cannot_force_const_mem
630 #undef TARGET_MANGLE_TYPE
631 #define TARGET_MANGLE_TYPE ia64_mangle_type
633 #undef TARGET_INVALID_CONVERSION
634 #define TARGET_INVALID_CONVERSION ia64_invalid_conversion
635 #undef TARGET_INVALID_UNARY_OP
636 #define TARGET_INVALID_UNARY_OP ia64_invalid_unary_op
637 #undef TARGET_INVALID_BINARY_OP
638 #define TARGET_INVALID_BINARY_OP ia64_invalid_binary_op
640 #undef TARGET_C_MODE_FOR_SUFFIX
641 #define TARGET_C_MODE_FOR_SUFFIX ia64_c_mode_for_suffix
643 #undef TARGET_CAN_ELIMINATE
644 #define TARGET_CAN_ELIMINATE ia64_can_eliminate
646 #undef TARGET_TRAMPOLINE_INIT
647 #define TARGET_TRAMPOLINE_INIT ia64_trampoline_init
649 #undef TARGET_CAN_USE_DOLOOP_P
650 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
651 #undef TARGET_INVALID_WITHIN_DOLOOP
652 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null
654 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
655 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE ia64_override_options_after_change
657 #undef TARGET_PREFERRED_RELOAD_CLASS
658 #define TARGET_PREFERRED_RELOAD_CLASS ia64_preferred_reload_class
660 #undef TARGET_DELAY_SCHED2
661 #define TARGET_DELAY_SCHED2 true
663 /* Variable tracking should be run after all optimizations which
664 change order of insns. It also needs a valid CFG. */
665 #undef TARGET_DELAY_VARTRACK
666 #define TARGET_DELAY_VARTRACK true
668 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
669 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK ia64_vectorize_vec_perm_const_ok
671 struct gcc_target targetm = TARGET_INITIALIZER;
673 typedef enum
675 ADDR_AREA_NORMAL, /* normal address area */
676 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
678 ia64_addr_area;
680 static GTY(()) tree small_ident1;
681 static GTY(()) tree small_ident2;
683 static void
684 init_idents (void)
686 if (small_ident1 == 0)
688 small_ident1 = get_identifier ("small");
689 small_ident2 = get_identifier ("__small__");
693 /* Retrieve the address area that has been chosen for the given decl. */
695 static ia64_addr_area
696 ia64_get_addr_area (tree decl)
698 tree model_attr;
700 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
701 if (model_attr)
703 tree id;
705 init_idents ();
706 id = TREE_VALUE (TREE_VALUE (model_attr));
707 if (id == small_ident1 || id == small_ident2)
708 return ADDR_AREA_SMALL;
710 return ADDR_AREA_NORMAL;
713 static tree
714 ia64_handle_model_attribute (tree *node, tree name, tree args,
715 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
717 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
718 ia64_addr_area area;
719 tree arg, decl = *node;
721 init_idents ();
722 arg = TREE_VALUE (args);
723 if (arg == small_ident1 || arg == small_ident2)
725 addr_area = ADDR_AREA_SMALL;
727 else
729 warning (OPT_Wattributes, "invalid argument of %qE attribute",
730 name);
731 *no_add_attrs = true;
734 switch (TREE_CODE (decl))
736 case VAR_DECL:
737 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
738 == FUNCTION_DECL)
739 && !TREE_STATIC (decl))
741 error_at (DECL_SOURCE_LOCATION (decl),
742 "an address area attribute cannot be specified for "
743 "local variables");
744 *no_add_attrs = true;
746 area = ia64_get_addr_area (decl);
747 if (area != ADDR_AREA_NORMAL && addr_area != area)
749 error ("address area of %q+D conflicts with previous "
750 "declaration", decl);
751 *no_add_attrs = true;
753 break;
755 case FUNCTION_DECL:
756 error_at (DECL_SOURCE_LOCATION (decl),
757 "address area attribute cannot be specified for "
758 "functions");
759 *no_add_attrs = true;
760 break;
762 default:
763 warning (OPT_Wattributes, "%qE attribute ignored",
764 name);
765 *no_add_attrs = true;
766 break;
769 return NULL_TREE;
772 /* Part of the low level implementation of DEC Ada pragma Common_Object which
773 enables the shared use of variables stored in overlaid linker areas
774 corresponding to the use of Fortran COMMON. */
776 static tree
777 ia64_vms_common_object_attribute (tree *node, tree name, tree args,
778 int flags ATTRIBUTE_UNUSED,
779 bool *no_add_attrs)
781 tree decl = *node;
782 tree id;
784 gcc_assert (DECL_P (decl));
786 DECL_COMMON (decl) = 1;
787 id = TREE_VALUE (args);
788 if (TREE_CODE (id) != IDENTIFIER_NODE && TREE_CODE (id) != STRING_CST)
790 error ("%qE attribute requires a string constant argument", name);
791 *no_add_attrs = true;
792 return NULL_TREE;
794 return NULL_TREE;
797 /* Part of the low level implementation of DEC Ada pragma Common_Object. */
799 void
800 ia64_vms_output_aligned_decl_common (FILE *file, tree decl, const char *name,
801 unsigned HOST_WIDE_INT size,
802 unsigned int align)
804 tree attr = DECL_ATTRIBUTES (decl);
806 if (attr)
807 attr = lookup_attribute ("common_object", attr);
808 if (attr)
810 tree id = TREE_VALUE (TREE_VALUE (attr));
811 const char *name;
813 if (TREE_CODE (id) == IDENTIFIER_NODE)
814 name = IDENTIFIER_POINTER (id);
815 else if (TREE_CODE (id) == STRING_CST)
816 name = TREE_STRING_POINTER (id);
817 else
818 abort ();
820 fprintf (file, "\t.vms_common\t\"%s\",", name);
822 else
823 fprintf (file, "%s", COMMON_ASM_OP);
825 /* Code from elfos.h. */
826 assemble_name (file, name);
827 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u",
828 size, align / BITS_PER_UNIT);
830 fputc ('\n', file);
833 static void
834 ia64_encode_addr_area (tree decl, rtx symbol)
836 int flags;
838 flags = SYMBOL_REF_FLAGS (symbol);
839 switch (ia64_get_addr_area (decl))
841 case ADDR_AREA_NORMAL: break;
842 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
843 default: gcc_unreachable ();
845 SYMBOL_REF_FLAGS (symbol) = flags;
848 static void
849 ia64_encode_section_info (tree decl, rtx rtl, int first)
851 default_encode_section_info (decl, rtl, first);
853 /* Careful not to prod global register variables. */
854 if (TREE_CODE (decl) == VAR_DECL
855 && GET_CODE (DECL_RTL (decl)) == MEM
856 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
857 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
858 ia64_encode_addr_area (decl, XEXP (rtl, 0));
861 /* Return 1 if the operands of a move are ok. */
864 ia64_move_ok (rtx dst, rtx src)
866 /* If we're under init_recog_no_volatile, we'll not be able to use
867 memory_operand. So check the code directly and don't worry about
868 the validity of the underlying address, which should have been
869 checked elsewhere anyway. */
870 if (GET_CODE (dst) != MEM)
871 return 1;
872 if (GET_CODE (src) == MEM)
873 return 0;
874 if (register_operand (src, VOIDmode))
875 return 1;
877 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
878 if (INTEGRAL_MODE_P (GET_MODE (dst)))
879 return src == const0_rtx;
880 else
881 return satisfies_constraint_G (src);
884 /* Return 1 if the operands are ok for a floating point load pair. */
887 ia64_load_pair_ok (rtx dst, rtx src)
889 /* ??? There is a thinko in the implementation of the "x" constraint and the
890 FP_REGS class. The constraint will also reject (reg f30:TI) so we must
891 also return false for it. */
892 if (GET_CODE (dst) != REG
893 || !(FP_REGNO_P (REGNO (dst)) && FP_REGNO_P (REGNO (dst) + 1)))
894 return 0;
895 if (GET_CODE (src) != MEM || MEM_VOLATILE_P (src))
896 return 0;
897 switch (GET_CODE (XEXP (src, 0)))
899 case REG:
900 case POST_INC:
901 break;
902 case POST_DEC:
903 return 0;
904 case POST_MODIFY:
906 rtx adjust = XEXP (XEXP (XEXP (src, 0), 1), 1);
908 if (GET_CODE (adjust) != CONST_INT
909 || INTVAL (adjust) != GET_MODE_SIZE (GET_MODE (src)))
910 return 0;
912 break;
913 default:
914 abort ();
916 return 1;
920 addp4_optimize_ok (rtx op1, rtx op2)
922 return (basereg_operand (op1, GET_MODE(op1)) !=
923 basereg_operand (op2, GET_MODE(op2)));
926 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
927 Return the length of the field, or <= 0 on failure. */
930 ia64_depz_field_mask (rtx rop, rtx rshift)
932 unsigned HOST_WIDE_INT op = INTVAL (rop);
933 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
935 /* Get rid of the zero bits we're shifting in. */
936 op >>= shift;
938 /* We must now have a solid block of 1's at bit 0. */
939 return exact_log2 (op + 1);
942 /* Return the TLS model to use for ADDR. */
944 static enum tls_model
945 tls_symbolic_operand_type (rtx addr)
947 enum tls_model tls_kind = TLS_MODEL_NONE;
949 if (GET_CODE (addr) == CONST)
951 if (GET_CODE (XEXP (addr, 0)) == PLUS
952 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF)
953 tls_kind = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (addr, 0), 0));
955 else if (GET_CODE (addr) == SYMBOL_REF)
956 tls_kind = SYMBOL_REF_TLS_MODEL (addr);
958 return tls_kind;
961 /* Returns true if REG (assumed to be a `reg' RTX) is valid for use
962 as a base register. */
964 static inline bool
965 ia64_reg_ok_for_base_p (const_rtx reg, bool strict)
967 if (strict
968 && REGNO_OK_FOR_BASE_P (REGNO (reg)))
969 return true;
970 else if (!strict
971 && (GENERAL_REGNO_P (REGNO (reg))
972 || !HARD_REGISTER_P (reg)))
973 return true;
974 else
975 return false;
978 static bool
979 ia64_legitimate_address_reg (const_rtx reg, bool strict)
981 if ((REG_P (reg) && ia64_reg_ok_for_base_p (reg, strict))
982 || (GET_CODE (reg) == SUBREG && REG_P (XEXP (reg, 0))
983 && ia64_reg_ok_for_base_p (XEXP (reg, 0), strict)))
984 return true;
986 return false;
989 static bool
990 ia64_legitimate_address_disp (const_rtx reg, const_rtx disp, bool strict)
992 if (GET_CODE (disp) == PLUS
993 && rtx_equal_p (reg, XEXP (disp, 0))
994 && (ia64_legitimate_address_reg (XEXP (disp, 1), strict)
995 || (CONST_INT_P (XEXP (disp, 1))
996 && IN_RANGE (INTVAL (XEXP (disp, 1)), -256, 255))))
997 return true;
999 return false;
1002 /* Implement TARGET_LEGITIMATE_ADDRESS_P. */
1004 static bool
1005 ia64_legitimate_address_p (machine_mode mode ATTRIBUTE_UNUSED,
1006 rtx x, bool strict)
1008 if (ia64_legitimate_address_reg (x, strict))
1009 return true;
1010 else if ((GET_CODE (x) == POST_INC || GET_CODE (x) == POST_DEC)
1011 && ia64_legitimate_address_reg (XEXP (x, 0), strict)
1012 && XEXP (x, 0) != arg_pointer_rtx)
1013 return true;
1014 else if (GET_CODE (x) == POST_MODIFY
1015 && ia64_legitimate_address_reg (XEXP (x, 0), strict)
1016 && XEXP (x, 0) != arg_pointer_rtx
1017 && ia64_legitimate_address_disp (XEXP (x, 0), XEXP (x, 1), strict))
1018 return true;
1019 else
1020 return false;
1023 /* Return true if X is a constant that is valid for some immediate
1024 field in an instruction. */
1026 static bool
1027 ia64_legitimate_constant_p (machine_mode mode, rtx x)
1029 switch (GET_CODE (x))
1031 case CONST_INT:
1032 case LABEL_REF:
1033 return true;
1035 case CONST_DOUBLE:
1036 if (GET_MODE (x) == VOIDmode || mode == SFmode || mode == DFmode)
1037 return true;
1038 return satisfies_constraint_G (x);
1040 case CONST:
1041 case SYMBOL_REF:
1042 /* ??? Short term workaround for PR 28490. We must make the code here
1043 match the code in ia64_expand_move and move_operand, even though they
1044 are both technically wrong. */
1045 if (tls_symbolic_operand_type (x) == 0)
1047 HOST_WIDE_INT addend = 0;
1048 rtx op = x;
1050 if (GET_CODE (op) == CONST
1051 && GET_CODE (XEXP (op, 0)) == PLUS
1052 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
1054 addend = INTVAL (XEXP (XEXP (op, 0), 1));
1055 op = XEXP (XEXP (op, 0), 0);
1058 if (any_offset_symbol_operand (op, mode)
1059 || function_operand (op, mode))
1060 return true;
1061 if (aligned_offset_symbol_operand (op, mode))
1062 return (addend & 0x3fff) == 0;
1063 return false;
1065 return false;
1067 case CONST_VECTOR:
1068 if (mode == V2SFmode)
1069 return satisfies_constraint_Y (x);
1071 return (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
1072 && GET_MODE_SIZE (mode) <= 8);
1074 default:
1075 return false;
1079 /* Don't allow TLS addresses to get spilled to memory. */
1081 static bool
1082 ia64_cannot_force_const_mem (machine_mode mode, rtx x)
1084 if (mode == RFmode)
1085 return true;
1086 return tls_symbolic_operand_type (x) != 0;
1089 /* Expand a symbolic constant load. */
1091 bool
1092 ia64_expand_load_address (rtx dest, rtx src)
1094 gcc_assert (GET_CODE (dest) == REG);
1096 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
1097 having to pointer-extend the value afterward. Other forms of address
1098 computation below are also more natural to compute as 64-bit quantities.
1099 If we've been given an SImode destination register, change it. */
1100 if (GET_MODE (dest) != Pmode)
1101 dest = gen_rtx_REG_offset (dest, Pmode, REGNO (dest),
1102 byte_lowpart_offset (Pmode, GET_MODE (dest)));
1104 if (TARGET_NO_PIC)
1105 return false;
1106 if (small_addr_symbolic_operand (src, VOIDmode))
1107 return false;
1109 if (TARGET_AUTO_PIC)
1110 emit_insn (gen_load_gprel64 (dest, src));
1111 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
1112 emit_insn (gen_load_fptr (dest, src));
1113 else if (sdata_symbolic_operand (src, VOIDmode))
1114 emit_insn (gen_load_gprel (dest, src));
1115 else
1117 HOST_WIDE_INT addend = 0;
1118 rtx tmp;
1120 /* We did split constant offsets in ia64_expand_move, and we did try
1121 to keep them split in move_operand, but we also allowed reload to
1122 rematerialize arbitrary constants rather than spill the value to
1123 the stack and reload it. So we have to be prepared here to split
1124 them apart again. */
1125 if (GET_CODE (src) == CONST)
1127 HOST_WIDE_INT hi, lo;
1129 hi = INTVAL (XEXP (XEXP (src, 0), 1));
1130 lo = ((hi & 0x3fff) ^ 0x2000) - 0x2000;
1131 hi = hi - lo;
1133 if (lo != 0)
1135 addend = lo;
1136 src = plus_constant (Pmode, XEXP (XEXP (src, 0), 0), hi);
1140 tmp = gen_rtx_HIGH (Pmode, src);
1141 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
1142 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1144 tmp = gen_rtx_LO_SUM (Pmode, gen_const_mem (Pmode, dest), src);
1145 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1147 if (addend)
1149 tmp = gen_rtx_PLUS (Pmode, dest, GEN_INT (addend));
1150 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1154 return true;
1157 static GTY(()) rtx gen_tls_tga;
1158 static rtx
1159 gen_tls_get_addr (void)
1161 if (!gen_tls_tga)
1162 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
1163 return gen_tls_tga;
1166 static GTY(()) rtx thread_pointer_rtx;
1167 static rtx
1168 gen_thread_pointer (void)
1170 if (!thread_pointer_rtx)
1171 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
1172 return thread_pointer_rtx;
1175 static rtx
1176 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1,
1177 rtx orig_op1, HOST_WIDE_INT addend)
1179 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp;
1180 rtx_insn *insns;
1181 rtx orig_op0 = op0;
1182 HOST_WIDE_INT addend_lo, addend_hi;
1184 switch (tls_kind)
1186 case TLS_MODEL_GLOBAL_DYNAMIC:
1187 start_sequence ();
1189 tga_op1 = gen_reg_rtx (Pmode);
1190 emit_insn (gen_load_dtpmod (tga_op1, op1));
1192 tga_op2 = gen_reg_rtx (Pmode);
1193 emit_insn (gen_load_dtprel (tga_op2, op1));
1195 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1196 LCT_CONST, Pmode, 2, tga_op1,
1197 Pmode, tga_op2, Pmode);
1199 insns = get_insns ();
1200 end_sequence ();
1202 if (GET_MODE (op0) != Pmode)
1203 op0 = tga_ret;
1204 emit_libcall_block (insns, op0, tga_ret, op1);
1205 break;
1207 case TLS_MODEL_LOCAL_DYNAMIC:
1208 /* ??? This isn't the completely proper way to do local-dynamic
1209 If the call to __tls_get_addr is used only by a single symbol,
1210 then we should (somehow) move the dtprel to the second arg
1211 to avoid the extra add. */
1212 start_sequence ();
1214 tga_op1 = gen_reg_rtx (Pmode);
1215 emit_insn (gen_load_dtpmod (tga_op1, op1));
1217 tga_op2 = const0_rtx;
1219 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1220 LCT_CONST, Pmode, 2, tga_op1,
1221 Pmode, tga_op2, Pmode);
1223 insns = get_insns ();
1224 end_sequence ();
1226 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1227 UNSPEC_LD_BASE);
1228 tmp = gen_reg_rtx (Pmode);
1229 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
1231 if (!register_operand (op0, Pmode))
1232 op0 = gen_reg_rtx (Pmode);
1233 if (TARGET_TLS64)
1235 emit_insn (gen_load_dtprel (op0, op1));
1236 emit_insn (gen_adddi3 (op0, tmp, op0));
1238 else
1239 emit_insn (gen_add_dtprel (op0, op1, tmp));
1240 break;
1242 case TLS_MODEL_INITIAL_EXEC:
1243 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1244 addend_hi = addend - addend_lo;
1246 op1 = plus_constant (Pmode, op1, addend_hi);
1247 addend = addend_lo;
1249 tmp = gen_reg_rtx (Pmode);
1250 emit_insn (gen_load_tprel (tmp, op1));
1252 if (!register_operand (op0, Pmode))
1253 op0 = gen_reg_rtx (Pmode);
1254 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
1255 break;
1257 case TLS_MODEL_LOCAL_EXEC:
1258 if (!register_operand (op0, Pmode))
1259 op0 = gen_reg_rtx (Pmode);
1261 op1 = orig_op1;
1262 addend = 0;
1263 if (TARGET_TLS64)
1265 emit_insn (gen_load_tprel (op0, op1));
1266 emit_insn (gen_adddi3 (op0, op0, gen_thread_pointer ()));
1268 else
1269 emit_insn (gen_add_tprel (op0, op1, gen_thread_pointer ()));
1270 break;
1272 default:
1273 gcc_unreachable ();
1276 if (addend)
1277 op0 = expand_simple_binop (Pmode, PLUS, op0, GEN_INT (addend),
1278 orig_op0, 1, OPTAB_DIRECT);
1279 if (orig_op0 == op0)
1280 return NULL_RTX;
1281 if (GET_MODE (orig_op0) == Pmode)
1282 return op0;
1283 return gen_lowpart (GET_MODE (orig_op0), op0);
1287 ia64_expand_move (rtx op0, rtx op1)
1289 machine_mode mode = GET_MODE (op0);
1291 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
1292 op1 = force_reg (mode, op1);
1294 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
1296 HOST_WIDE_INT addend = 0;
1297 enum tls_model tls_kind;
1298 rtx sym = op1;
1300 if (GET_CODE (op1) == CONST
1301 && GET_CODE (XEXP (op1, 0)) == PLUS
1302 && GET_CODE (XEXP (XEXP (op1, 0), 1)) == CONST_INT)
1304 addend = INTVAL (XEXP (XEXP (op1, 0), 1));
1305 sym = XEXP (XEXP (op1, 0), 0);
1308 tls_kind = tls_symbolic_operand_type (sym);
1309 if (tls_kind)
1310 return ia64_expand_tls_address (tls_kind, op0, sym, op1, addend);
1312 if (any_offset_symbol_operand (sym, mode))
1313 addend = 0;
1314 else if (aligned_offset_symbol_operand (sym, mode))
1316 HOST_WIDE_INT addend_lo, addend_hi;
1318 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1319 addend_hi = addend - addend_lo;
1321 if (addend_lo != 0)
1323 op1 = plus_constant (mode, sym, addend_hi);
1324 addend = addend_lo;
1326 else
1327 addend = 0;
1329 else
1330 op1 = sym;
1332 if (reload_completed)
1334 /* We really should have taken care of this offset earlier. */
1335 gcc_assert (addend == 0);
1336 if (ia64_expand_load_address (op0, op1))
1337 return NULL_RTX;
1340 if (addend)
1342 rtx subtarget = !can_create_pseudo_p () ? op0 : gen_reg_rtx (mode);
1344 emit_insn (gen_rtx_SET (VOIDmode, subtarget, op1));
1346 op1 = expand_simple_binop (mode, PLUS, subtarget,
1347 GEN_INT (addend), op0, 1, OPTAB_DIRECT);
1348 if (op0 == op1)
1349 return NULL_RTX;
1353 return op1;
1356 /* Split a move from OP1 to OP0 conditional on COND. */
1358 void
1359 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
1361 rtx_insn *insn, *first = get_last_insn ();
1363 emit_move_insn (op0, op1);
1365 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
1366 if (INSN_P (insn))
1367 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
1368 PATTERN (insn));
1371 /* Split a post-reload TImode or TFmode reference into two DImode
1372 components. This is made extra difficult by the fact that we do
1373 not get any scratch registers to work with, because reload cannot
1374 be prevented from giving us a scratch that overlaps the register
1375 pair involved. So instead, when addressing memory, we tweak the
1376 pointer register up and back down with POST_INCs. Or up and not
1377 back down when we can get away with it.
1379 REVERSED is true when the loads must be done in reversed order
1380 (high word first) for correctness. DEAD is true when the pointer
1381 dies with the second insn we generate and therefore the second
1382 address must not carry a postmodify.
1384 May return an insn which is to be emitted after the moves. */
1386 static rtx
1387 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
1389 rtx fixup = 0;
1391 switch (GET_CODE (in))
1393 case REG:
1394 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
1395 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
1396 break;
1398 case CONST_INT:
1399 case CONST_DOUBLE:
1400 /* Cannot occur reversed. */
1401 gcc_assert (!reversed);
1403 if (GET_MODE (in) != TFmode)
1404 split_double (in, &out[0], &out[1]);
1405 else
1406 /* split_double does not understand how to split a TFmode
1407 quantity into a pair of DImode constants. */
1409 REAL_VALUE_TYPE r;
1410 unsigned HOST_WIDE_INT p[2];
1411 long l[4]; /* TFmode is 128 bits */
1413 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
1414 real_to_target (l, &r, TFmode);
1416 if (FLOAT_WORDS_BIG_ENDIAN)
1418 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
1419 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
1421 else
1423 p[0] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
1424 p[1] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
1426 out[0] = GEN_INT (p[0]);
1427 out[1] = GEN_INT (p[1]);
1429 break;
1431 case MEM:
1433 rtx base = XEXP (in, 0);
1434 rtx offset;
1436 switch (GET_CODE (base))
1438 case REG:
1439 if (!reversed)
1441 out[0] = adjust_automodify_address
1442 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1443 out[1] = adjust_automodify_address
1444 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
1446 else
1448 /* Reversal requires a pre-increment, which can only
1449 be done as a separate insn. */
1450 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
1451 out[0] = adjust_automodify_address
1452 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
1453 out[1] = adjust_address (in, DImode, 0);
1455 break;
1457 case POST_INC:
1458 gcc_assert (!reversed && !dead);
1460 /* Just do the increment in two steps. */
1461 out[0] = adjust_automodify_address (in, DImode, 0, 0);
1462 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1463 break;
1465 case POST_DEC:
1466 gcc_assert (!reversed && !dead);
1468 /* Add 8, subtract 24. */
1469 base = XEXP (base, 0);
1470 out[0] = adjust_automodify_address
1471 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1472 out[1] = adjust_automodify_address
1473 (in, DImode,
1474 gen_rtx_POST_MODIFY (Pmode, base,
1475 plus_constant (Pmode, base, -24)),
1477 break;
1479 case POST_MODIFY:
1480 gcc_assert (!reversed && !dead);
1482 /* Extract and adjust the modification. This case is
1483 trickier than the others, because we might have an
1484 index register, or we might have a combined offset that
1485 doesn't fit a signed 9-bit displacement field. We can
1486 assume the incoming expression is already legitimate. */
1487 offset = XEXP (base, 1);
1488 base = XEXP (base, 0);
1490 out[0] = adjust_automodify_address
1491 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1493 if (GET_CODE (XEXP (offset, 1)) == REG)
1495 /* Can't adjust the postmodify to match. Emit the
1496 original, then a separate addition insn. */
1497 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1498 fixup = gen_adddi3 (base, base, GEN_INT (-8));
1500 else
1502 gcc_assert (GET_CODE (XEXP (offset, 1)) == CONST_INT);
1503 if (INTVAL (XEXP (offset, 1)) < -256 + 8)
1505 /* Again the postmodify cannot be made to match,
1506 but in this case it's more efficient to get rid
1507 of the postmodify entirely and fix up with an
1508 add insn. */
1509 out[1] = adjust_automodify_address (in, DImode, base, 8);
1510 fixup = gen_adddi3
1511 (base, base, GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
1513 else
1515 /* Combined offset still fits in the displacement field.
1516 (We cannot overflow it at the high end.) */
1517 out[1] = adjust_automodify_address
1518 (in, DImode, gen_rtx_POST_MODIFY
1519 (Pmode, base, gen_rtx_PLUS
1520 (Pmode, base,
1521 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
1525 break;
1527 default:
1528 gcc_unreachable ();
1530 break;
1533 default:
1534 gcc_unreachable ();
1537 return fixup;
1540 /* Split a TImode or TFmode move instruction after reload.
1541 This is used by *movtf_internal and *movti_internal. */
1542 void
1543 ia64_split_tmode_move (rtx operands[])
1545 rtx in[2], out[2], insn;
1546 rtx fixup[2];
1547 bool dead = false;
1548 bool reversed = false;
1550 /* It is possible for reload to decide to overwrite a pointer with
1551 the value it points to. In that case we have to do the loads in
1552 the appropriate order so that the pointer is not destroyed too
1553 early. Also we must not generate a postmodify for that second
1554 load, or rws_access_regno will die. And we must not generate a
1555 postmodify for the second load if the destination register
1556 overlaps with the base register. */
1557 if (GET_CODE (operands[1]) == MEM
1558 && reg_overlap_mentioned_p (operands[0], operands[1]))
1560 rtx base = XEXP (operands[1], 0);
1561 while (GET_CODE (base) != REG)
1562 base = XEXP (base, 0);
1564 if (REGNO (base) == REGNO (operands[0]))
1565 reversed = true;
1567 if (refers_to_regno_p (REGNO (operands[0]),
1568 REGNO (operands[0])+2,
1569 base, 0))
1570 dead = true;
1572 /* Another reason to do the moves in reversed order is if the first
1573 element of the target register pair is also the second element of
1574 the source register pair. */
1575 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1576 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1577 reversed = true;
1579 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1580 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1582 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1583 if (GET_CODE (EXP) == MEM \
1584 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1585 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1586 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1587 add_reg_note (insn, REG_INC, XEXP (XEXP (EXP, 0), 0))
1589 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1590 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1591 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1593 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1594 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1595 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1597 if (fixup[0])
1598 emit_insn (fixup[0]);
1599 if (fixup[1])
1600 emit_insn (fixup[1]);
1602 #undef MAYBE_ADD_REG_INC_NOTE
1605 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1606 through memory plus an extra GR scratch register. Except that you can
1607 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1608 SECONDARY_RELOAD_CLASS, but not both.
1610 We got into problems in the first place by allowing a construct like
1611 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1612 This solution attempts to prevent this situation from occurring. When
1613 we see something like the above, we spill the inner register to memory. */
1615 static rtx
1616 spill_xfmode_rfmode_operand (rtx in, int force, machine_mode mode)
1618 if (GET_CODE (in) == SUBREG
1619 && GET_MODE (SUBREG_REG (in)) == TImode
1620 && GET_CODE (SUBREG_REG (in)) == REG)
1622 rtx memt = assign_stack_temp (TImode, 16);
1623 emit_move_insn (memt, SUBREG_REG (in));
1624 return adjust_address (memt, mode, 0);
1626 else if (force && GET_CODE (in) == REG)
1628 rtx memx = assign_stack_temp (mode, 16);
1629 emit_move_insn (memx, in);
1630 return memx;
1632 else
1633 return in;
1636 /* Expand the movxf or movrf pattern (MODE says which) with the given
1637 OPERANDS, returning true if the pattern should then invoke
1638 DONE. */
1640 bool
1641 ia64_expand_movxf_movrf (machine_mode mode, rtx operands[])
1643 rtx op0 = operands[0];
1645 if (GET_CODE (op0) == SUBREG)
1646 op0 = SUBREG_REG (op0);
1648 /* We must support XFmode loads into general registers for stdarg/vararg,
1649 unprototyped calls, and a rare case where a long double is passed as
1650 an argument after a float HFA fills the FP registers. We split them into
1651 DImode loads for convenience. We also need to support XFmode stores
1652 for the last case. This case does not happen for stdarg/vararg routines,
1653 because we do a block store to memory of unnamed arguments. */
1655 if (GET_CODE (op0) == REG && GR_REGNO_P (REGNO (op0)))
1657 rtx out[2];
1659 /* We're hoping to transform everything that deals with XFmode
1660 quantities and GR registers early in the compiler. */
1661 gcc_assert (can_create_pseudo_p ());
1663 /* Struct to register can just use TImode instead. */
1664 if ((GET_CODE (operands[1]) == SUBREG
1665 && GET_MODE (SUBREG_REG (operands[1])) == TImode)
1666 || (GET_CODE (operands[1]) == REG
1667 && GR_REGNO_P (REGNO (operands[1]))))
1669 rtx op1 = operands[1];
1671 if (GET_CODE (op1) == SUBREG)
1672 op1 = SUBREG_REG (op1);
1673 else
1674 op1 = gen_rtx_REG (TImode, REGNO (op1));
1676 emit_move_insn (gen_rtx_REG (TImode, REGNO (op0)), op1);
1677 return true;
1680 if (GET_CODE (operands[1]) == CONST_DOUBLE)
1682 /* Don't word-swap when reading in the constant. */
1683 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0)),
1684 operand_subword (operands[1], WORDS_BIG_ENDIAN,
1685 0, mode));
1686 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0) + 1),
1687 operand_subword (operands[1], !WORDS_BIG_ENDIAN,
1688 0, mode));
1689 return true;
1692 /* If the quantity is in a register not known to be GR, spill it. */
1693 if (register_operand (operands[1], mode))
1694 operands[1] = spill_xfmode_rfmode_operand (operands[1], 1, mode);
1696 gcc_assert (GET_CODE (operands[1]) == MEM);
1698 /* Don't word-swap when reading in the value. */
1699 out[0] = gen_rtx_REG (DImode, REGNO (op0));
1700 out[1] = gen_rtx_REG (DImode, REGNO (op0) + 1);
1702 emit_move_insn (out[0], adjust_address (operands[1], DImode, 0));
1703 emit_move_insn (out[1], adjust_address (operands[1], DImode, 8));
1704 return true;
1707 if (GET_CODE (operands[1]) == REG && GR_REGNO_P (REGNO (operands[1])))
1709 /* We're hoping to transform everything that deals with XFmode
1710 quantities and GR registers early in the compiler. */
1711 gcc_assert (can_create_pseudo_p ());
1713 /* Op0 can't be a GR_REG here, as that case is handled above.
1714 If op0 is a register, then we spill op1, so that we now have a
1715 MEM operand. This requires creating an XFmode subreg of a TImode reg
1716 to force the spill. */
1717 if (register_operand (operands[0], mode))
1719 rtx op1 = gen_rtx_REG (TImode, REGNO (operands[1]));
1720 op1 = gen_rtx_SUBREG (mode, op1, 0);
1721 operands[1] = spill_xfmode_rfmode_operand (op1, 0, mode);
1724 else
1726 rtx in[2];
1728 gcc_assert (GET_CODE (operands[0]) == MEM);
1730 /* Don't word-swap when writing out the value. */
1731 in[0] = gen_rtx_REG (DImode, REGNO (operands[1]));
1732 in[1] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
1734 emit_move_insn (adjust_address (operands[0], DImode, 0), in[0]);
1735 emit_move_insn (adjust_address (operands[0], DImode, 8), in[1]);
1736 return true;
1740 if (!reload_in_progress && !reload_completed)
1742 operands[1] = spill_xfmode_rfmode_operand (operands[1], 0, mode);
1744 if (GET_MODE (op0) == TImode && GET_CODE (op0) == REG)
1746 rtx memt, memx, in = operands[1];
1747 if (CONSTANT_P (in))
1748 in = validize_mem (force_const_mem (mode, in));
1749 if (GET_CODE (in) == MEM)
1750 memt = adjust_address (in, TImode, 0);
1751 else
1753 memt = assign_stack_temp (TImode, 16);
1754 memx = adjust_address (memt, mode, 0);
1755 emit_move_insn (memx, in);
1757 emit_move_insn (op0, memt);
1758 return true;
1761 if (!ia64_move_ok (operands[0], operands[1]))
1762 operands[1] = force_reg (mode, operands[1]);
1765 return false;
1768 /* Emit comparison instruction if necessary, replacing *EXPR, *OP0, *OP1
1769 with the expression that holds the compare result (in VOIDmode). */
1771 static GTY(()) rtx cmptf_libfunc;
1773 void
1774 ia64_expand_compare (rtx *expr, rtx *op0, rtx *op1)
1776 enum rtx_code code = GET_CODE (*expr);
1777 rtx cmp;
1779 /* If we have a BImode input, then we already have a compare result, and
1780 do not need to emit another comparison. */
1781 if (GET_MODE (*op0) == BImode)
1783 gcc_assert ((code == NE || code == EQ) && *op1 == const0_rtx);
1784 cmp = *op0;
1786 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1787 magic number as its third argument, that indicates what to do.
1788 The return value is an integer to be compared against zero. */
1789 else if (TARGET_HPUX && GET_MODE (*op0) == TFmode)
1791 enum qfcmp_magic {
1792 QCMP_INV = 1, /* Raise FP_INVALID on NaNs as a side effect. */
1793 QCMP_UNORD = 2,
1794 QCMP_EQ = 4,
1795 QCMP_LT = 8,
1796 QCMP_GT = 16
1798 int magic;
1799 enum rtx_code ncode;
1800 rtx ret, insns;
1802 gcc_assert (cmptf_libfunc && GET_MODE (*op1) == TFmode);
1803 switch (code)
1805 /* 1 = equal, 0 = not equal. Equality operators do
1806 not raise FP_INVALID when given a NaN operand. */
1807 case EQ: magic = QCMP_EQ; ncode = NE; break;
1808 case NE: magic = QCMP_EQ; ncode = EQ; break;
1809 /* isunordered() from C99. */
1810 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1811 case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
1812 /* Relational operators raise FP_INVALID when given
1813 a NaN operand. */
1814 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1815 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1816 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1817 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1818 /* Unordered relational operators do not raise FP_INVALID
1819 when given a NaN operand. */
1820 case UNLT: magic = QCMP_LT |QCMP_UNORD; ncode = NE; break;
1821 case UNLE: magic = QCMP_LT|QCMP_EQ|QCMP_UNORD; ncode = NE; break;
1822 case UNGT: magic = QCMP_GT |QCMP_UNORD; ncode = NE; break;
1823 case UNGE: magic = QCMP_GT|QCMP_EQ|QCMP_UNORD; ncode = NE; break;
1824 /* Not supported. */
1825 case UNEQ:
1826 case LTGT:
1827 default: gcc_unreachable ();
1830 start_sequence ();
1832 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1833 *op0, TFmode, *op1, TFmode,
1834 GEN_INT (magic), DImode);
1835 cmp = gen_reg_rtx (BImode);
1836 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1837 gen_rtx_fmt_ee (ncode, BImode,
1838 ret, const0_rtx)));
1840 insns = get_insns ();
1841 end_sequence ();
1843 emit_libcall_block (insns, cmp, cmp,
1844 gen_rtx_fmt_ee (code, BImode, *op0, *op1));
1845 code = NE;
1847 else
1849 cmp = gen_reg_rtx (BImode);
1850 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1851 gen_rtx_fmt_ee (code, BImode, *op0, *op1)));
1852 code = NE;
1855 *expr = gen_rtx_fmt_ee (code, VOIDmode, cmp, const0_rtx);
1856 *op0 = cmp;
1857 *op1 = const0_rtx;
1860 /* Generate an integral vector comparison. Return true if the condition has
1861 been reversed, and so the sense of the comparison should be inverted. */
1863 static bool
1864 ia64_expand_vecint_compare (enum rtx_code code, machine_mode mode,
1865 rtx dest, rtx op0, rtx op1)
1867 bool negate = false;
1868 rtx x;
1870 /* Canonicalize the comparison to EQ, GT, GTU. */
1871 switch (code)
1873 case EQ:
1874 case GT:
1875 case GTU:
1876 break;
1878 case NE:
1879 case LE:
1880 case LEU:
1881 code = reverse_condition (code);
1882 negate = true;
1883 break;
1885 case GE:
1886 case GEU:
1887 code = reverse_condition (code);
1888 negate = true;
1889 /* FALLTHRU */
1891 case LT:
1892 case LTU:
1893 code = swap_condition (code);
1894 x = op0, op0 = op1, op1 = x;
1895 break;
1897 default:
1898 gcc_unreachable ();
1901 /* Unsigned parallel compare is not supported by the hardware. Play some
1902 tricks to turn this into a signed comparison against 0. */
1903 if (code == GTU)
1905 switch (mode)
1907 case V2SImode:
1909 rtx t1, t2, mask;
1911 /* Subtract (-(INT MAX) - 1) from both operands to make
1912 them signed. */
1913 mask = GEN_INT (0x80000000);
1914 mask = gen_rtx_CONST_VECTOR (V2SImode, gen_rtvec (2, mask, mask));
1915 mask = force_reg (mode, mask);
1916 t1 = gen_reg_rtx (mode);
1917 emit_insn (gen_subv2si3 (t1, op0, mask));
1918 t2 = gen_reg_rtx (mode);
1919 emit_insn (gen_subv2si3 (t2, op1, mask));
1920 op0 = t1;
1921 op1 = t2;
1922 code = GT;
1924 break;
1926 case V8QImode:
1927 case V4HImode:
1928 /* Perform a parallel unsigned saturating subtraction. */
1929 x = gen_reg_rtx (mode);
1930 emit_insn (gen_rtx_SET (VOIDmode, x,
1931 gen_rtx_US_MINUS (mode, op0, op1)));
1933 code = EQ;
1934 op0 = x;
1935 op1 = CONST0_RTX (mode);
1936 negate = !negate;
1937 break;
1939 default:
1940 gcc_unreachable ();
1944 x = gen_rtx_fmt_ee (code, mode, op0, op1);
1945 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
1947 return negate;
1950 /* Emit an integral vector conditional move. */
1952 void
1953 ia64_expand_vecint_cmov (rtx operands[])
1955 machine_mode mode = GET_MODE (operands[0]);
1956 enum rtx_code code = GET_CODE (operands[3]);
1957 bool negate;
1958 rtx cmp, x, ot, of;
1960 cmp = gen_reg_rtx (mode);
1961 negate = ia64_expand_vecint_compare (code, mode, cmp,
1962 operands[4], operands[5]);
1964 ot = operands[1+negate];
1965 of = operands[2-negate];
1967 if (ot == CONST0_RTX (mode))
1969 if (of == CONST0_RTX (mode))
1971 emit_move_insn (operands[0], ot);
1972 return;
1975 x = gen_rtx_NOT (mode, cmp);
1976 x = gen_rtx_AND (mode, x, of);
1977 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1979 else if (of == CONST0_RTX (mode))
1981 x = gen_rtx_AND (mode, cmp, ot);
1982 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1984 else
1986 rtx t, f;
1988 t = gen_reg_rtx (mode);
1989 x = gen_rtx_AND (mode, cmp, operands[1+negate]);
1990 emit_insn (gen_rtx_SET (VOIDmode, t, x));
1992 f = gen_reg_rtx (mode);
1993 x = gen_rtx_NOT (mode, cmp);
1994 x = gen_rtx_AND (mode, x, operands[2-negate]);
1995 emit_insn (gen_rtx_SET (VOIDmode, f, x));
1997 x = gen_rtx_IOR (mode, t, f);
1998 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
2002 /* Emit an integral vector min or max operation. Return true if all done. */
2004 bool
2005 ia64_expand_vecint_minmax (enum rtx_code code, machine_mode mode,
2006 rtx operands[])
2008 rtx xops[6];
2010 /* These four combinations are supported directly. */
2011 if (mode == V8QImode && (code == UMIN || code == UMAX))
2012 return false;
2013 if (mode == V4HImode && (code == SMIN || code == SMAX))
2014 return false;
2016 /* This combination can be implemented with only saturating subtraction. */
2017 if (mode == V4HImode && code == UMAX)
2019 rtx x, tmp = gen_reg_rtx (mode);
2021 x = gen_rtx_US_MINUS (mode, operands[1], operands[2]);
2022 emit_insn (gen_rtx_SET (VOIDmode, tmp, x));
2024 emit_insn (gen_addv4hi3 (operands[0], tmp, operands[2]));
2025 return true;
2028 /* Everything else implemented via vector comparisons. */
2029 xops[0] = operands[0];
2030 xops[4] = xops[1] = operands[1];
2031 xops[5] = xops[2] = operands[2];
2033 switch (code)
2035 case UMIN:
2036 code = LTU;
2037 break;
2038 case UMAX:
2039 code = GTU;
2040 break;
2041 case SMIN:
2042 code = LT;
2043 break;
2044 case SMAX:
2045 code = GT;
2046 break;
2047 default:
2048 gcc_unreachable ();
2050 xops[3] = gen_rtx_fmt_ee (code, VOIDmode, operands[1], operands[2]);
2052 ia64_expand_vecint_cmov (xops);
2053 return true;
2056 /* The vectors LO and HI each contain N halves of a double-wide vector.
2057 Reassemble either the first N/2 or the second N/2 elements. */
2059 void
2060 ia64_unpack_assemble (rtx out, rtx lo, rtx hi, bool highp)
2062 machine_mode vmode = GET_MODE (lo);
2063 unsigned int i, high, nelt = GET_MODE_NUNITS (vmode);
2064 struct expand_vec_perm_d d;
2065 bool ok;
2067 d.target = gen_lowpart (vmode, out);
2068 d.op0 = (TARGET_BIG_ENDIAN ? hi : lo);
2069 d.op1 = (TARGET_BIG_ENDIAN ? lo : hi);
2070 d.vmode = vmode;
2071 d.nelt = nelt;
2072 d.one_operand_p = false;
2073 d.testing_p = false;
2075 high = (highp ? nelt / 2 : 0);
2076 for (i = 0; i < nelt / 2; ++i)
2078 d.perm[i * 2] = i + high;
2079 d.perm[i * 2 + 1] = i + high + nelt;
2082 ok = ia64_expand_vec_perm_const_1 (&d);
2083 gcc_assert (ok);
2086 /* Return a vector of the sign-extension of VEC. */
2088 static rtx
2089 ia64_unpack_sign (rtx vec, bool unsignedp)
2091 machine_mode mode = GET_MODE (vec);
2092 rtx zero = CONST0_RTX (mode);
2094 if (unsignedp)
2095 return zero;
2096 else
2098 rtx sign = gen_reg_rtx (mode);
2099 bool neg;
2101 neg = ia64_expand_vecint_compare (LT, mode, sign, vec, zero);
2102 gcc_assert (!neg);
2104 return sign;
2108 /* Emit an integral vector unpack operation. */
2110 void
2111 ia64_expand_unpack (rtx operands[3], bool unsignedp, bool highp)
2113 rtx sign = ia64_unpack_sign (operands[1], unsignedp);
2114 ia64_unpack_assemble (operands[0], operands[1], sign, highp);
2117 /* Emit an integral vector widening sum operations. */
2119 void
2120 ia64_expand_widen_sum (rtx operands[3], bool unsignedp)
2122 machine_mode wmode;
2123 rtx l, h, t, sign;
2125 sign = ia64_unpack_sign (operands[1], unsignedp);
2127 wmode = GET_MODE (operands[0]);
2128 l = gen_reg_rtx (wmode);
2129 h = gen_reg_rtx (wmode);
2131 ia64_unpack_assemble (l, operands[1], sign, false);
2132 ia64_unpack_assemble (h, operands[1], sign, true);
2134 t = expand_binop (wmode, add_optab, l, operands[2], NULL, 0, OPTAB_DIRECT);
2135 t = expand_binop (wmode, add_optab, h, t, operands[0], 0, OPTAB_DIRECT);
2136 if (t != operands[0])
2137 emit_move_insn (operands[0], t);
2140 /* Emit the appropriate sequence for a call. */
2142 void
2143 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
2144 int sibcall_p)
2146 rtx insn, b0;
2148 addr = XEXP (addr, 0);
2149 addr = convert_memory_address (DImode, addr);
2150 b0 = gen_rtx_REG (DImode, R_BR (0));
2152 /* ??? Should do this for functions known to bind local too. */
2153 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
2155 if (sibcall_p)
2156 insn = gen_sibcall_nogp (addr);
2157 else if (! retval)
2158 insn = gen_call_nogp (addr, b0);
2159 else
2160 insn = gen_call_value_nogp (retval, addr, b0);
2161 insn = emit_call_insn (insn);
2163 else
2165 if (sibcall_p)
2166 insn = gen_sibcall_gp (addr);
2167 else if (! retval)
2168 insn = gen_call_gp (addr, b0);
2169 else
2170 insn = gen_call_value_gp (retval, addr, b0);
2171 insn = emit_call_insn (insn);
2173 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
2176 if (sibcall_p)
2177 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
2179 if (TARGET_ABI_OPEN_VMS)
2180 use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
2181 gen_rtx_REG (DImode, GR_REG (25)));
2184 static void
2185 reg_emitted (enum ia64_frame_regs r)
2187 if (emitted_frame_related_regs[r] == 0)
2188 emitted_frame_related_regs[r] = current_frame_info.r[r];
2189 else
2190 gcc_assert (emitted_frame_related_regs[r] == current_frame_info.r[r]);
2193 static int
2194 get_reg (enum ia64_frame_regs r)
2196 reg_emitted (r);
2197 return current_frame_info.r[r];
2200 static bool
2201 is_emitted (int regno)
2203 unsigned int r;
2205 for (r = reg_fp; r < number_of_ia64_frame_regs; r++)
2206 if (emitted_frame_related_regs[r] == regno)
2207 return true;
2208 return false;
2211 void
2212 ia64_reload_gp (void)
2214 rtx tmp;
2216 if (current_frame_info.r[reg_save_gp])
2218 tmp = gen_rtx_REG (DImode, get_reg (reg_save_gp));
2220 else
2222 HOST_WIDE_INT offset;
2223 rtx offset_r;
2225 offset = (current_frame_info.spill_cfa_off
2226 + current_frame_info.spill_size);
2227 if (frame_pointer_needed)
2229 tmp = hard_frame_pointer_rtx;
2230 offset = -offset;
2232 else
2234 tmp = stack_pointer_rtx;
2235 offset = current_frame_info.total_size - offset;
2238 offset_r = GEN_INT (offset);
2239 if (satisfies_constraint_I (offset_r))
2240 emit_insn (gen_adddi3 (pic_offset_table_rtx, tmp, offset_r));
2241 else
2243 emit_move_insn (pic_offset_table_rtx, offset_r);
2244 emit_insn (gen_adddi3 (pic_offset_table_rtx,
2245 pic_offset_table_rtx, tmp));
2248 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
2251 emit_move_insn (pic_offset_table_rtx, tmp);
2254 void
2255 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
2256 rtx scratch_b, int noreturn_p, int sibcall_p)
2258 rtx insn;
2259 bool is_desc = false;
2261 /* If we find we're calling through a register, then we're actually
2262 calling through a descriptor, so load up the values. */
2263 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
2265 rtx tmp;
2266 bool addr_dead_p;
2268 /* ??? We are currently constrained to *not* use peep2, because
2269 we can legitimately change the global lifetime of the GP
2270 (in the form of killing where previously live). This is
2271 because a call through a descriptor doesn't use the previous
2272 value of the GP, while a direct call does, and we do not
2273 commit to either form until the split here.
2275 That said, this means that we lack precise life info for
2276 whether ADDR is dead after this call. This is not terribly
2277 important, since we can fix things up essentially for free
2278 with the POST_DEC below, but it's nice to not use it when we
2279 can immediately tell it's not necessary. */
2280 addr_dead_p = ((noreturn_p || sibcall_p
2281 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
2282 REGNO (addr)))
2283 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
2285 /* Load the code address into scratch_b. */
2286 tmp = gen_rtx_POST_INC (Pmode, addr);
2287 tmp = gen_rtx_MEM (Pmode, tmp);
2288 emit_move_insn (scratch_r, tmp);
2289 emit_move_insn (scratch_b, scratch_r);
2291 /* Load the GP address. If ADDR is not dead here, then we must
2292 revert the change made above via the POST_INCREMENT. */
2293 if (!addr_dead_p)
2294 tmp = gen_rtx_POST_DEC (Pmode, addr);
2295 else
2296 tmp = addr;
2297 tmp = gen_rtx_MEM (Pmode, tmp);
2298 emit_move_insn (pic_offset_table_rtx, tmp);
2300 is_desc = true;
2301 addr = scratch_b;
2304 if (sibcall_p)
2305 insn = gen_sibcall_nogp (addr);
2306 else if (retval)
2307 insn = gen_call_value_nogp (retval, addr, retaddr);
2308 else
2309 insn = gen_call_nogp (addr, retaddr);
2310 emit_call_insn (insn);
2312 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
2313 ia64_reload_gp ();
2316 /* Expand an atomic operation. We want to perform MEM <CODE>= VAL atomically.
2318 This differs from the generic code in that we know about the zero-extending
2319 properties of cmpxchg, and the zero-extending requirements of ar.ccv. We
2320 also know that ld.acq+cmpxchg.rel equals a full barrier.
2322 The loop we want to generate looks like
2324 cmp_reg = mem;
2325 label:
2326 old_reg = cmp_reg;
2327 new_reg = cmp_reg op val;
2328 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
2329 if (cmp_reg != old_reg)
2330 goto label;
2332 Note that we only do the plain load from memory once. Subsequent
2333 iterations use the value loaded by the compare-and-swap pattern. */
2335 void
2336 ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
2337 rtx old_dst, rtx new_dst, enum memmodel model)
2339 machine_mode mode = GET_MODE (mem);
2340 rtx old_reg, new_reg, cmp_reg, ar_ccv, label;
2341 enum insn_code icode;
2343 /* Special case for using fetchadd. */
2344 if ((mode == SImode || mode == DImode)
2345 && (code == PLUS || code == MINUS)
2346 && fetchadd_operand (val, mode))
2348 if (code == MINUS)
2349 val = GEN_INT (-INTVAL (val));
2351 if (!old_dst)
2352 old_dst = gen_reg_rtx (mode);
2354 switch (model)
2356 case MEMMODEL_ACQ_REL:
2357 case MEMMODEL_SEQ_CST:
2358 emit_insn (gen_memory_barrier ());
2359 /* FALLTHRU */
2360 case MEMMODEL_RELAXED:
2361 case MEMMODEL_ACQUIRE:
2362 case MEMMODEL_CONSUME:
2363 if (mode == SImode)
2364 icode = CODE_FOR_fetchadd_acq_si;
2365 else
2366 icode = CODE_FOR_fetchadd_acq_di;
2367 break;
2368 case MEMMODEL_RELEASE:
2369 if (mode == SImode)
2370 icode = CODE_FOR_fetchadd_rel_si;
2371 else
2372 icode = CODE_FOR_fetchadd_rel_di;
2373 break;
2375 default:
2376 gcc_unreachable ();
2379 emit_insn (GEN_FCN (icode) (old_dst, mem, val));
2381 if (new_dst)
2383 new_reg = expand_simple_binop (mode, PLUS, old_dst, val, new_dst,
2384 true, OPTAB_WIDEN);
2385 if (new_reg != new_dst)
2386 emit_move_insn (new_dst, new_reg);
2388 return;
2391 /* Because of the volatile mem read, we get an ld.acq, which is the
2392 front half of the full barrier. The end half is the cmpxchg.rel.
2393 For relaxed and release memory models, we don't need this. But we
2394 also don't bother trying to prevent it either. */
2395 gcc_assert (model == MEMMODEL_RELAXED
2396 || model == MEMMODEL_RELEASE
2397 || MEM_VOLATILE_P (mem));
2399 old_reg = gen_reg_rtx (DImode);
2400 cmp_reg = gen_reg_rtx (DImode);
2401 label = gen_label_rtx ();
2403 if (mode != DImode)
2405 val = simplify_gen_subreg (DImode, val, mode, 0);
2406 emit_insn (gen_extend_insn (cmp_reg, mem, DImode, mode, 1));
2408 else
2409 emit_move_insn (cmp_reg, mem);
2411 emit_label (label);
2413 ar_ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
2414 emit_move_insn (old_reg, cmp_reg);
2415 emit_move_insn (ar_ccv, cmp_reg);
2417 if (old_dst)
2418 emit_move_insn (old_dst, gen_lowpart (mode, cmp_reg));
2420 new_reg = cmp_reg;
2421 if (code == NOT)
2423 new_reg = expand_simple_binop (DImode, AND, new_reg, val, NULL_RTX,
2424 true, OPTAB_DIRECT);
2425 new_reg = expand_simple_unop (DImode, code, new_reg, NULL_RTX, true);
2427 else
2428 new_reg = expand_simple_binop (DImode, code, new_reg, val, NULL_RTX,
2429 true, OPTAB_DIRECT);
2431 if (mode != DImode)
2432 new_reg = gen_lowpart (mode, new_reg);
2433 if (new_dst)
2434 emit_move_insn (new_dst, new_reg);
2436 switch (model)
2438 case MEMMODEL_RELAXED:
2439 case MEMMODEL_ACQUIRE:
2440 case MEMMODEL_CONSUME:
2441 switch (mode)
2443 case QImode: icode = CODE_FOR_cmpxchg_acq_qi; break;
2444 case HImode: icode = CODE_FOR_cmpxchg_acq_hi; break;
2445 case SImode: icode = CODE_FOR_cmpxchg_acq_si; break;
2446 case DImode: icode = CODE_FOR_cmpxchg_acq_di; break;
2447 default:
2448 gcc_unreachable ();
2450 break;
2452 case MEMMODEL_RELEASE:
2453 case MEMMODEL_ACQ_REL:
2454 case MEMMODEL_SEQ_CST:
2455 switch (mode)
2457 case QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;
2458 case HImode: icode = CODE_FOR_cmpxchg_rel_hi; break;
2459 case SImode: icode = CODE_FOR_cmpxchg_rel_si; break;
2460 case DImode: icode = CODE_FOR_cmpxchg_rel_di; break;
2461 default:
2462 gcc_unreachable ();
2464 break;
2466 default:
2467 gcc_unreachable ();
2470 emit_insn (GEN_FCN (icode) (cmp_reg, mem, ar_ccv, new_reg));
2472 emit_cmp_and_jump_insns (cmp_reg, old_reg, NE, NULL, DImode, true, label);
2475 /* Begin the assembly file. */
2477 static void
2478 ia64_file_start (void)
2480 default_file_start ();
2481 emit_safe_across_calls ();
2484 void
2485 emit_safe_across_calls (void)
2487 unsigned int rs, re;
2488 int out_state;
2490 rs = 1;
2491 out_state = 0;
2492 while (1)
2494 while (rs < 64 && call_used_regs[PR_REG (rs)])
2495 rs++;
2496 if (rs >= 64)
2497 break;
2498 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
2499 continue;
2500 if (out_state == 0)
2502 fputs ("\t.pred.safe_across_calls ", asm_out_file);
2503 out_state = 1;
2505 else
2506 fputc (',', asm_out_file);
2507 if (re == rs + 1)
2508 fprintf (asm_out_file, "p%u", rs);
2509 else
2510 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
2511 rs = re + 1;
2513 if (out_state)
2514 fputc ('\n', asm_out_file);
2517 /* Globalize a declaration. */
2519 static void
2520 ia64_globalize_decl_name (FILE * stream, tree decl)
2522 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2523 tree version_attr = lookup_attribute ("version_id", DECL_ATTRIBUTES (decl));
2524 if (version_attr)
2526 tree v = TREE_VALUE (TREE_VALUE (version_attr));
2527 const char *p = TREE_STRING_POINTER (v);
2528 fprintf (stream, "\t.alias %s#, \"%s{%s}\"\n", name, name, p);
2530 targetm.asm_out.globalize_label (stream, name);
2531 if (TREE_CODE (decl) == FUNCTION_DECL)
2532 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "function");
2535 /* Helper function for ia64_compute_frame_size: find an appropriate general
2536 register to spill some special register to. SPECIAL_SPILL_MASK contains
2537 bits in GR0 to GR31 that have already been allocated by this routine.
2538 TRY_LOCALS is true if we should attempt to locate a local regnum. */
2540 static int
2541 find_gr_spill (enum ia64_frame_regs r, int try_locals)
2543 int regno;
2545 if (emitted_frame_related_regs[r] != 0)
2547 regno = emitted_frame_related_regs[r];
2548 if (regno >= LOC_REG (0) && regno < LOC_REG (80 - frame_pointer_needed)
2549 && current_frame_info.n_local_regs < regno - LOC_REG (0) + 1)
2550 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2551 else if (crtl->is_leaf
2552 && regno >= GR_REG (1) && regno <= GR_REG (31))
2553 current_frame_info.gr_used_mask |= 1 << regno;
2555 return regno;
2558 /* If this is a leaf function, first try an otherwise unused
2559 call-clobbered register. */
2560 if (crtl->is_leaf)
2562 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2563 if (! df_regs_ever_live_p (regno)
2564 && call_used_regs[regno]
2565 && ! fixed_regs[regno]
2566 && ! global_regs[regno]
2567 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0
2568 && ! is_emitted (regno))
2570 current_frame_info.gr_used_mask |= 1 << regno;
2571 return regno;
2575 if (try_locals)
2577 regno = current_frame_info.n_local_regs;
2578 /* If there is a frame pointer, then we can't use loc79, because
2579 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
2580 reg_name switching code in ia64_expand_prologue. */
2581 while (regno < (80 - frame_pointer_needed))
2582 if (! is_emitted (LOC_REG (regno++)))
2584 current_frame_info.n_local_regs = regno;
2585 return LOC_REG (regno - 1);
2589 /* Failed to find a general register to spill to. Must use stack. */
2590 return 0;
2593 /* In order to make for nice schedules, we try to allocate every temporary
2594 to a different register. We must of course stay away from call-saved,
2595 fixed, and global registers. We must also stay away from registers
2596 allocated in current_frame_info.gr_used_mask, since those include regs
2597 used all through the prologue.
2599 Any register allocated here must be used immediately. The idea is to
2600 aid scheduling, not to solve data flow problems. */
2602 static int last_scratch_gr_reg;
2604 static int
2605 next_scratch_gr_reg (void)
2607 int i, regno;
2609 for (i = 0; i < 32; ++i)
2611 regno = (last_scratch_gr_reg + i + 1) & 31;
2612 if (call_used_regs[regno]
2613 && ! fixed_regs[regno]
2614 && ! global_regs[regno]
2615 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
2617 last_scratch_gr_reg = regno;
2618 return regno;
2622 /* There must be _something_ available. */
2623 gcc_unreachable ();
2626 /* Helper function for ia64_compute_frame_size, called through
2627 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
2629 static void
2630 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
2632 unsigned int regno = REGNO (reg);
2633 if (regno < 32)
2635 unsigned int i, n = hard_regno_nregs[regno][GET_MODE (reg)];
2636 for (i = 0; i < n; ++i)
2637 current_frame_info.gr_used_mask |= 1 << (regno + i);
2642 /* Returns the number of bytes offset between the frame pointer and the stack
2643 pointer for the current function. SIZE is the number of bytes of space
2644 needed for local variables. */
2646 static void
2647 ia64_compute_frame_size (HOST_WIDE_INT size)
2649 HOST_WIDE_INT total_size;
2650 HOST_WIDE_INT spill_size = 0;
2651 HOST_WIDE_INT extra_spill_size = 0;
2652 HOST_WIDE_INT pretend_args_size;
2653 HARD_REG_SET mask;
2654 int n_spilled = 0;
2655 int spilled_gr_p = 0;
2656 int spilled_fr_p = 0;
2657 unsigned int regno;
2658 int min_regno;
2659 int max_regno;
2660 int i;
2662 if (current_frame_info.initialized)
2663 return;
2665 memset (&current_frame_info, 0, sizeof current_frame_info);
2666 CLEAR_HARD_REG_SET (mask);
2668 /* Don't allocate scratches to the return register. */
2669 diddle_return_value (mark_reg_gr_used_mask, NULL);
2671 /* Don't allocate scratches to the EH scratch registers. */
2672 if (cfun->machine->ia64_eh_epilogue_sp)
2673 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
2674 if (cfun->machine->ia64_eh_epilogue_bsp)
2675 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
2677 /* Static stack checking uses r2 and r3. */
2678 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
2679 current_frame_info.gr_used_mask |= 0xc;
2681 /* Find the size of the register stack frame. We have only 80 local
2682 registers, because we reserve 8 for the inputs and 8 for the
2683 outputs. */
2685 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
2686 since we'll be adjusting that down later. */
2687 regno = LOC_REG (78) + ! frame_pointer_needed;
2688 for (; regno >= LOC_REG (0); regno--)
2689 if (df_regs_ever_live_p (regno) && !is_emitted (regno))
2690 break;
2691 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2693 /* For functions marked with the syscall_linkage attribute, we must mark
2694 all eight input registers as in use, so that locals aren't visible to
2695 the caller. */
2697 if (cfun->machine->n_varargs > 0
2698 || lookup_attribute ("syscall_linkage",
2699 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
2700 current_frame_info.n_input_regs = 8;
2701 else
2703 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
2704 if (df_regs_ever_live_p (regno))
2705 break;
2706 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
2709 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
2710 if (df_regs_ever_live_p (regno))
2711 break;
2712 i = regno - OUT_REG (0) + 1;
2714 #ifndef PROFILE_HOOK
2715 /* When -p profiling, we need one output register for the mcount argument.
2716 Likewise for -a profiling for the bb_init_func argument. For -ax
2717 profiling, we need two output registers for the two bb_init_trace_func
2718 arguments. */
2719 if (crtl->profile)
2720 i = MAX (i, 1);
2721 #endif
2722 current_frame_info.n_output_regs = i;
2724 /* ??? No rotating register support yet. */
2725 current_frame_info.n_rotate_regs = 0;
2727 /* Discover which registers need spilling, and how much room that
2728 will take. Begin with floating point and general registers,
2729 which will always wind up on the stack. */
2731 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
2732 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2734 SET_HARD_REG_BIT (mask, regno);
2735 spill_size += 16;
2736 n_spilled += 1;
2737 spilled_fr_p = 1;
2740 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2741 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2743 SET_HARD_REG_BIT (mask, regno);
2744 spill_size += 8;
2745 n_spilled += 1;
2746 spilled_gr_p = 1;
2749 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
2750 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2752 SET_HARD_REG_BIT (mask, regno);
2753 spill_size += 8;
2754 n_spilled += 1;
2757 /* Now come all special registers that might get saved in other
2758 general registers. */
2760 if (frame_pointer_needed)
2762 current_frame_info.r[reg_fp] = find_gr_spill (reg_fp, 1);
2763 /* If we did not get a register, then we take LOC79. This is guaranteed
2764 to be free, even if regs_ever_live is already set, because this is
2765 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
2766 as we don't count loc79 above. */
2767 if (current_frame_info.r[reg_fp] == 0)
2769 current_frame_info.r[reg_fp] = LOC_REG (79);
2770 current_frame_info.n_local_regs = LOC_REG (79) - LOC_REG (0) + 1;
2774 if (! crtl->is_leaf)
2776 /* Emit a save of BR0 if we call other functions. Do this even
2777 if this function doesn't return, as EH depends on this to be
2778 able to unwind the stack. */
2779 SET_HARD_REG_BIT (mask, BR_REG (0));
2781 current_frame_info.r[reg_save_b0] = find_gr_spill (reg_save_b0, 1);
2782 if (current_frame_info.r[reg_save_b0] == 0)
2784 extra_spill_size += 8;
2785 n_spilled += 1;
2788 /* Similarly for ar.pfs. */
2789 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2790 current_frame_info.r[reg_save_ar_pfs] = find_gr_spill (reg_save_ar_pfs, 1);
2791 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2793 extra_spill_size += 8;
2794 n_spilled += 1;
2797 /* Similarly for gp. Note that if we're calling setjmp, the stacked
2798 registers are clobbered, so we fall back to the stack. */
2799 current_frame_info.r[reg_save_gp]
2800 = (cfun->calls_setjmp ? 0 : find_gr_spill (reg_save_gp, 1));
2801 if (current_frame_info.r[reg_save_gp] == 0)
2803 SET_HARD_REG_BIT (mask, GR_REG (1));
2804 spill_size += 8;
2805 n_spilled += 1;
2808 else
2810 if (df_regs_ever_live_p (BR_REG (0)) && ! call_used_regs[BR_REG (0)])
2812 SET_HARD_REG_BIT (mask, BR_REG (0));
2813 extra_spill_size += 8;
2814 n_spilled += 1;
2817 if (df_regs_ever_live_p (AR_PFS_REGNUM))
2819 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2820 current_frame_info.r[reg_save_ar_pfs]
2821 = find_gr_spill (reg_save_ar_pfs, 1);
2822 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2824 extra_spill_size += 8;
2825 n_spilled += 1;
2830 /* Unwind descriptor hackery: things are most efficient if we allocate
2831 consecutive GR save registers for RP, PFS, FP in that order. However,
2832 it is absolutely critical that FP get the only hard register that's
2833 guaranteed to be free, so we allocated it first. If all three did
2834 happen to be allocated hard regs, and are consecutive, rearrange them
2835 into the preferred order now.
2837 If we have already emitted code for any of those registers,
2838 then it's already too late to change. */
2839 min_regno = MIN (current_frame_info.r[reg_fp],
2840 MIN (current_frame_info.r[reg_save_b0],
2841 current_frame_info.r[reg_save_ar_pfs]));
2842 max_regno = MAX (current_frame_info.r[reg_fp],
2843 MAX (current_frame_info.r[reg_save_b0],
2844 current_frame_info.r[reg_save_ar_pfs]));
2845 if (min_regno > 0
2846 && min_regno + 2 == max_regno
2847 && (current_frame_info.r[reg_fp] == min_regno + 1
2848 || current_frame_info.r[reg_save_b0] == min_regno + 1
2849 || current_frame_info.r[reg_save_ar_pfs] == min_regno + 1)
2850 && (emitted_frame_related_regs[reg_save_b0] == 0
2851 || emitted_frame_related_regs[reg_save_b0] == min_regno)
2852 && (emitted_frame_related_regs[reg_save_ar_pfs] == 0
2853 || emitted_frame_related_regs[reg_save_ar_pfs] == min_regno + 1)
2854 && (emitted_frame_related_regs[reg_fp] == 0
2855 || emitted_frame_related_regs[reg_fp] == min_regno + 2))
2857 current_frame_info.r[reg_save_b0] = min_regno;
2858 current_frame_info.r[reg_save_ar_pfs] = min_regno + 1;
2859 current_frame_info.r[reg_fp] = min_regno + 2;
2862 /* See if we need to store the predicate register block. */
2863 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2864 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2865 break;
2866 if (regno <= PR_REG (63))
2868 SET_HARD_REG_BIT (mask, PR_REG (0));
2869 current_frame_info.r[reg_save_pr] = find_gr_spill (reg_save_pr, 1);
2870 if (current_frame_info.r[reg_save_pr] == 0)
2872 extra_spill_size += 8;
2873 n_spilled += 1;
2876 /* ??? Mark them all as used so that register renaming and such
2877 are free to use them. */
2878 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2879 df_set_regs_ever_live (regno, true);
2882 /* If we're forced to use st8.spill, we're forced to save and restore
2883 ar.unat as well. The check for existing liveness allows inline asm
2884 to touch ar.unat. */
2885 if (spilled_gr_p || cfun->machine->n_varargs
2886 || df_regs_ever_live_p (AR_UNAT_REGNUM))
2888 df_set_regs_ever_live (AR_UNAT_REGNUM, true);
2889 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
2890 current_frame_info.r[reg_save_ar_unat]
2891 = find_gr_spill (reg_save_ar_unat, spill_size == 0);
2892 if (current_frame_info.r[reg_save_ar_unat] == 0)
2894 extra_spill_size += 8;
2895 n_spilled += 1;
2899 if (df_regs_ever_live_p (AR_LC_REGNUM))
2901 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
2902 current_frame_info.r[reg_save_ar_lc]
2903 = find_gr_spill (reg_save_ar_lc, spill_size == 0);
2904 if (current_frame_info.r[reg_save_ar_lc] == 0)
2906 extra_spill_size += 8;
2907 n_spilled += 1;
2911 /* If we have an odd number of words of pretend arguments written to
2912 the stack, then the FR save area will be unaligned. We round the
2913 size of this area up to keep things 16 byte aligned. */
2914 if (spilled_fr_p)
2915 pretend_args_size = IA64_STACK_ALIGN (crtl->args.pretend_args_size);
2916 else
2917 pretend_args_size = crtl->args.pretend_args_size;
2919 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2920 + crtl->outgoing_args_size);
2921 total_size = IA64_STACK_ALIGN (total_size);
2923 /* We always use the 16-byte scratch area provided by the caller, but
2924 if we are a leaf function, there's no one to which we need to provide
2925 a scratch area. However, if the function allocates dynamic stack space,
2926 the dynamic offset is computed early and contains STACK_POINTER_OFFSET,
2927 so we need to cope. */
2928 if (crtl->is_leaf && !cfun->calls_alloca)
2929 total_size = MAX (0, total_size - 16);
2931 current_frame_info.total_size = total_size;
2932 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2933 current_frame_info.spill_size = spill_size;
2934 current_frame_info.extra_spill_size = extra_spill_size;
2935 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2936 current_frame_info.n_spilled = n_spilled;
2937 current_frame_info.initialized = reload_completed;
2940 /* Worker function for TARGET_CAN_ELIMINATE. */
2942 bool
2943 ia64_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
2945 return (to == BR_REG (0) ? crtl->is_leaf : true);
2948 /* Compute the initial difference between the specified pair of registers. */
2950 HOST_WIDE_INT
2951 ia64_initial_elimination_offset (int from, int to)
2953 HOST_WIDE_INT offset;
2955 ia64_compute_frame_size (get_frame_size ());
2956 switch (from)
2958 case FRAME_POINTER_REGNUM:
2959 switch (to)
2961 case HARD_FRAME_POINTER_REGNUM:
2962 offset = -current_frame_info.total_size;
2963 if (!crtl->is_leaf || cfun->calls_alloca)
2964 offset += 16 + crtl->outgoing_args_size;
2965 break;
2967 case STACK_POINTER_REGNUM:
2968 offset = 0;
2969 if (!crtl->is_leaf || cfun->calls_alloca)
2970 offset += 16 + crtl->outgoing_args_size;
2971 break;
2973 default:
2974 gcc_unreachable ();
2976 break;
2978 case ARG_POINTER_REGNUM:
2979 /* Arguments start above the 16 byte save area, unless stdarg
2980 in which case we store through the 16 byte save area. */
2981 switch (to)
2983 case HARD_FRAME_POINTER_REGNUM:
2984 offset = 16 - crtl->args.pretend_args_size;
2985 break;
2987 case STACK_POINTER_REGNUM:
2988 offset = (current_frame_info.total_size
2989 + 16 - crtl->args.pretend_args_size);
2990 break;
2992 default:
2993 gcc_unreachable ();
2995 break;
2997 default:
2998 gcc_unreachable ();
3001 return offset;
3004 /* If there are more than a trivial number of register spills, we use
3005 two interleaved iterators so that we can get two memory references
3006 per insn group.
3008 In order to simplify things in the prologue and epilogue expanders,
3009 we use helper functions to fix up the memory references after the
3010 fact with the appropriate offsets to a POST_MODIFY memory mode.
3011 The following data structure tracks the state of the two iterators
3012 while insns are being emitted. */
3014 struct spill_fill_data
3016 rtx_insn *init_after; /* point at which to emit initializations */
3017 rtx init_reg[2]; /* initial base register */
3018 rtx iter_reg[2]; /* the iterator registers */
3019 rtx *prev_addr[2]; /* address of last memory use */
3020 rtx_insn *prev_insn[2]; /* the insn corresponding to prev_addr */
3021 HOST_WIDE_INT prev_off[2]; /* last offset */
3022 int n_iter; /* number of iterators in use */
3023 int next_iter; /* next iterator to use */
3024 unsigned int save_gr_used_mask;
3027 static struct spill_fill_data spill_fill_data;
3029 static void
3030 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
3032 int i;
3034 spill_fill_data.init_after = get_last_insn ();
3035 spill_fill_data.init_reg[0] = init_reg;
3036 spill_fill_data.init_reg[1] = init_reg;
3037 spill_fill_data.prev_addr[0] = NULL;
3038 spill_fill_data.prev_addr[1] = NULL;
3039 spill_fill_data.prev_insn[0] = NULL;
3040 spill_fill_data.prev_insn[1] = NULL;
3041 spill_fill_data.prev_off[0] = cfa_off;
3042 spill_fill_data.prev_off[1] = cfa_off;
3043 spill_fill_data.next_iter = 0;
3044 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
3046 spill_fill_data.n_iter = 1 + (n_spills > 2);
3047 for (i = 0; i < spill_fill_data.n_iter; ++i)
3049 int regno = next_scratch_gr_reg ();
3050 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
3051 current_frame_info.gr_used_mask |= 1 << regno;
3055 static void
3056 finish_spill_pointers (void)
3058 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
3061 static rtx
3062 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
3064 int iter = spill_fill_data.next_iter;
3065 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
3066 rtx disp_rtx = GEN_INT (disp);
3067 rtx mem;
3069 if (spill_fill_data.prev_addr[iter])
3071 if (satisfies_constraint_N (disp_rtx))
3073 *spill_fill_data.prev_addr[iter]
3074 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
3075 gen_rtx_PLUS (DImode,
3076 spill_fill_data.iter_reg[iter],
3077 disp_rtx));
3078 add_reg_note (spill_fill_data.prev_insn[iter],
3079 REG_INC, spill_fill_data.iter_reg[iter]);
3081 else
3083 /* ??? Could use register post_modify for loads. */
3084 if (!satisfies_constraint_I (disp_rtx))
3086 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
3087 emit_move_insn (tmp, disp_rtx);
3088 disp_rtx = tmp;
3090 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
3091 spill_fill_data.iter_reg[iter], disp_rtx));
3094 /* Micro-optimization: if we've created a frame pointer, it's at
3095 CFA 0, which may allow the real iterator to be initialized lower,
3096 slightly increasing parallelism. Also, if there are few saves
3097 it may eliminate the iterator entirely. */
3098 else if (disp == 0
3099 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
3100 && frame_pointer_needed)
3102 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
3103 set_mem_alias_set (mem, get_varargs_alias_set ());
3104 return mem;
3106 else
3108 rtx seq;
3109 rtx_insn *insn;
3111 if (disp == 0)
3112 seq = gen_movdi (spill_fill_data.iter_reg[iter],
3113 spill_fill_data.init_reg[iter]);
3114 else
3116 start_sequence ();
3118 if (!satisfies_constraint_I (disp_rtx))
3120 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
3121 emit_move_insn (tmp, disp_rtx);
3122 disp_rtx = tmp;
3125 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
3126 spill_fill_data.init_reg[iter],
3127 disp_rtx));
3129 seq = get_insns ();
3130 end_sequence ();
3133 /* Careful for being the first insn in a sequence. */
3134 if (spill_fill_data.init_after)
3135 insn = emit_insn_after (seq, spill_fill_data.init_after);
3136 else
3138 rtx_insn *first = get_insns ();
3139 if (first)
3140 insn = emit_insn_before (seq, first);
3141 else
3142 insn = emit_insn (seq);
3144 spill_fill_data.init_after = insn;
3147 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
3149 /* ??? Not all of the spills are for varargs, but some of them are.
3150 The rest of the spills belong in an alias set of their own. But
3151 it doesn't actually hurt to include them here. */
3152 set_mem_alias_set (mem, get_varargs_alias_set ());
3154 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
3155 spill_fill_data.prev_off[iter] = cfa_off;
3157 if (++iter >= spill_fill_data.n_iter)
3158 iter = 0;
3159 spill_fill_data.next_iter = iter;
3161 return mem;
3164 static void
3165 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
3166 rtx frame_reg)
3168 int iter = spill_fill_data.next_iter;
3169 rtx mem;
3170 rtx_insn *insn;
3172 mem = spill_restore_mem (reg, cfa_off);
3173 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
3174 spill_fill_data.prev_insn[iter] = insn;
3176 if (frame_reg)
3178 rtx base;
3179 HOST_WIDE_INT off;
3181 RTX_FRAME_RELATED_P (insn) = 1;
3183 /* Don't even pretend that the unwind code can intuit its way
3184 through a pair of interleaved post_modify iterators. Just
3185 provide the correct answer. */
3187 if (frame_pointer_needed)
3189 base = hard_frame_pointer_rtx;
3190 off = - cfa_off;
3192 else
3194 base = stack_pointer_rtx;
3195 off = current_frame_info.total_size - cfa_off;
3198 add_reg_note (insn, REG_CFA_OFFSET,
3199 gen_rtx_SET (VOIDmode,
3200 gen_rtx_MEM (GET_MODE (reg),
3201 plus_constant (Pmode,
3202 base, off)),
3203 frame_reg));
3207 static void
3208 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
3210 int iter = spill_fill_data.next_iter;
3211 rtx_insn *insn;
3213 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
3214 GEN_INT (cfa_off)));
3215 spill_fill_data.prev_insn[iter] = insn;
3218 /* Wrapper functions that discards the CONST_INT spill offset. These
3219 exist so that we can give gr_spill/gr_fill the offset they need and
3220 use a consistent function interface. */
3222 static rtx
3223 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3225 return gen_movdi (dest, src);
3228 static rtx
3229 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3231 return gen_fr_spill (dest, src);
3234 static rtx
3235 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3237 return gen_fr_restore (dest, src);
3240 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
3242 /* See Table 6.2 of the IA-64 Software Developer Manual, Volume 2. */
3243 #define BACKING_STORE_SIZE(N) ((N) > 0 ? ((N) + (N)/63 + 1) * 8 : 0)
3245 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
3246 inclusive. These are offsets from the current stack pointer. BS_SIZE
3247 is the size of the backing store. ??? This clobbers r2 and r3. */
3249 static void
3250 ia64_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size,
3251 int bs_size)
3253 rtx r2 = gen_rtx_REG (Pmode, GR_REG (2));
3254 rtx r3 = gen_rtx_REG (Pmode, GR_REG (3));
3255 rtx p6 = gen_rtx_REG (BImode, PR_REG (6));
3257 /* On the IA-64 there is a second stack in memory, namely the Backing Store
3258 of the Register Stack Engine. We also need to probe it after checking
3259 that the 2 stacks don't overlap. */
3260 emit_insn (gen_bsp_value (r3));
3261 emit_move_insn (r2, GEN_INT (-(first + size)));
3263 /* Compare current value of BSP and SP registers. */
3264 emit_insn (gen_rtx_SET (VOIDmode, p6,
3265 gen_rtx_fmt_ee (LTU, BImode,
3266 r3, stack_pointer_rtx)));
3268 /* Compute the address of the probe for the Backing Store (which grows
3269 towards higher addresses). We probe only at the first offset of
3270 the next page because some OS (eg Linux/ia64) only extend the
3271 backing store when this specific address is hit (but generate a SEGV
3272 on other address). Page size is the worst case (4KB). The reserve
3273 size is at least 4096 - (96 + 2) * 8 = 3312 bytes, which is enough.
3274 Also compute the address of the last probe for the memory stack
3275 (which grows towards lower addresses). */
3276 emit_insn (gen_rtx_SET (VOIDmode, r3, plus_constant (Pmode, r3, 4095)));
3277 emit_insn (gen_rtx_SET (VOIDmode, r2,
3278 gen_rtx_PLUS (Pmode, stack_pointer_rtx, r2)));
3280 /* Compare them and raise SEGV if the former has topped the latter. */
3281 emit_insn (gen_rtx_COND_EXEC (VOIDmode,
3282 gen_rtx_fmt_ee (NE, VOIDmode, p6, const0_rtx),
3283 gen_rtx_SET (VOIDmode, p6,
3284 gen_rtx_fmt_ee (GEU, BImode,
3285 r3, r2))));
3286 emit_insn (gen_rtx_SET (VOIDmode,
3287 gen_rtx_ZERO_EXTRACT (DImode, r3, GEN_INT (12),
3288 const0_rtx),
3289 const0_rtx));
3290 emit_insn (gen_rtx_COND_EXEC (VOIDmode,
3291 gen_rtx_fmt_ee (NE, VOIDmode, p6, const0_rtx),
3292 gen_rtx_TRAP_IF (VOIDmode, const1_rtx,
3293 GEN_INT (11))));
3295 /* Probe the Backing Store if necessary. */
3296 if (bs_size > 0)
3297 emit_stack_probe (r3);
3299 /* Probe the memory stack if necessary. */
3300 if (size == 0)
3303 /* See if we have a constant small number of probes to generate. If so,
3304 that's the easy case. */
3305 else if (size <= PROBE_INTERVAL)
3306 emit_stack_probe (r2);
3308 /* The run-time loop is made up of 8 insns in the generic case while this
3309 compile-time loop is made up of 5+2*(n-2) insns for n # of intervals. */
3310 else if (size <= 4 * PROBE_INTERVAL)
3312 HOST_WIDE_INT i;
3314 emit_move_insn (r2, GEN_INT (-(first + PROBE_INTERVAL)));
3315 emit_insn (gen_rtx_SET (VOIDmode, r2,
3316 gen_rtx_PLUS (Pmode, stack_pointer_rtx, r2)));
3317 emit_stack_probe (r2);
3319 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
3320 it exceeds SIZE. If only two probes are needed, this will not
3321 generate any code. Then probe at FIRST + SIZE. */
3322 for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
3324 emit_insn (gen_rtx_SET (VOIDmode, r2,
3325 plus_constant (Pmode, r2, -PROBE_INTERVAL)));
3326 emit_stack_probe (r2);
3329 emit_insn (gen_rtx_SET (VOIDmode, r2,
3330 plus_constant (Pmode, r2,
3331 (i - PROBE_INTERVAL) - size)));
3332 emit_stack_probe (r2);
3335 /* Otherwise, do the same as above, but in a loop. Note that we must be
3336 extra careful with variables wrapping around because we might be at
3337 the very top (or the very bottom) of the address space and we have
3338 to be able to handle this case properly; in particular, we use an
3339 equality test for the loop condition. */
3340 else
3342 HOST_WIDE_INT rounded_size;
3344 emit_move_insn (r2, GEN_INT (-first));
3347 /* Step 1: round SIZE to the previous multiple of the interval. */
3349 rounded_size = size & -PROBE_INTERVAL;
3352 /* Step 2: compute initial and final value of the loop counter. */
3354 /* TEST_ADDR = SP + FIRST. */
3355 emit_insn (gen_rtx_SET (VOIDmode, r2,
3356 gen_rtx_PLUS (Pmode, stack_pointer_rtx, r2)));
3358 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
3359 if (rounded_size > (1 << 21))
3361 emit_move_insn (r3, GEN_INT (-rounded_size));
3362 emit_insn (gen_rtx_SET (VOIDmode, r3, gen_rtx_PLUS (Pmode, r2, r3)));
3364 else
3365 emit_insn (gen_rtx_SET (VOIDmode, r3,
3366 gen_rtx_PLUS (Pmode, r2,
3367 GEN_INT (-rounded_size))));
3370 /* Step 3: the loop
3372 while (TEST_ADDR != LAST_ADDR)
3374 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
3375 probe at TEST_ADDR
3378 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
3379 until it is equal to ROUNDED_SIZE. */
3381 emit_insn (gen_probe_stack_range (r2, r2, r3));
3384 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
3385 that SIZE is equal to ROUNDED_SIZE. */
3387 /* TEMP = SIZE - ROUNDED_SIZE. */
3388 if (size != rounded_size)
3390 emit_insn (gen_rtx_SET (VOIDmode, r2,
3391 plus_constant (Pmode, r2,
3392 rounded_size - size)));
3393 emit_stack_probe (r2);
3397 /* Make sure nothing is scheduled before we are done. */
3398 emit_insn (gen_blockage ());
3401 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
3402 absolute addresses. */
3404 const char *
3405 output_probe_stack_range (rtx reg1, rtx reg2)
3407 static int labelno = 0;
3408 char loop_lab[32], end_lab[32];
3409 rtx xops[3];
3411 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
3412 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
3414 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
3416 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
3417 xops[0] = reg1;
3418 xops[1] = reg2;
3419 xops[2] = gen_rtx_REG (BImode, PR_REG (6));
3420 output_asm_insn ("cmp.eq %2, %I2 = %0, %1", xops);
3421 fprintf (asm_out_file, "\t(%s) br.cond.dpnt ", reg_names [REGNO (xops[2])]);
3422 assemble_name_raw (asm_out_file, end_lab);
3423 fputc ('\n', asm_out_file);
3425 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
3426 xops[1] = GEN_INT (-PROBE_INTERVAL);
3427 output_asm_insn ("addl %0 = %1, %0", xops);
3428 fputs ("\t;;\n", asm_out_file);
3430 /* Probe at TEST_ADDR and branch. */
3431 output_asm_insn ("probe.w.fault %0, 0", xops);
3432 fprintf (asm_out_file, "\tbr ");
3433 assemble_name_raw (asm_out_file, loop_lab);
3434 fputc ('\n', asm_out_file);
3436 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
3438 return "";
3441 /* Called after register allocation to add any instructions needed for the
3442 prologue. Using a prologue insn is favored compared to putting all of the
3443 instructions in output_function_prologue(), since it allows the scheduler
3444 to intermix instructions with the saves of the caller saved registers. In
3445 some cases, it might be necessary to emit a barrier instruction as the last
3446 insn to prevent such scheduling.
3448 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
3449 so that the debug info generation code can handle them properly.
3451 The register save area is laid out like so:
3452 cfa+16
3453 [ varargs spill area ]
3454 [ fr register spill area ]
3455 [ br register spill area ]
3456 [ ar register spill area ]
3457 [ pr register spill area ]
3458 [ gr register spill area ] */
3460 /* ??? Get inefficient code when the frame size is larger than can fit in an
3461 adds instruction. */
3463 void
3464 ia64_expand_prologue (void)
3466 rtx_insn *insn;
3467 rtx ar_pfs_save_reg, ar_unat_save_reg;
3468 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
3469 rtx reg, alt_reg;
3471 ia64_compute_frame_size (get_frame_size ());
3472 last_scratch_gr_reg = 15;
3474 if (flag_stack_usage_info)
3475 current_function_static_stack_size = current_frame_info.total_size;
3477 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
3479 HOST_WIDE_INT size = current_frame_info.total_size;
3480 int bs_size = BACKING_STORE_SIZE (current_frame_info.n_input_regs
3481 + current_frame_info.n_local_regs);
3483 if (crtl->is_leaf && !cfun->calls_alloca)
3485 if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
3486 ia64_emit_probe_stack_range (STACK_CHECK_PROTECT,
3487 size - STACK_CHECK_PROTECT,
3488 bs_size);
3489 else if (size + bs_size > STACK_CHECK_PROTECT)
3490 ia64_emit_probe_stack_range (STACK_CHECK_PROTECT, 0, bs_size);
3492 else if (size + bs_size > 0)
3493 ia64_emit_probe_stack_range (STACK_CHECK_PROTECT, size, bs_size);
3496 if (dump_file)
3498 fprintf (dump_file, "ia64 frame related registers "
3499 "recorded in current_frame_info.r[]:\n");
3500 #define PRINTREG(a) if (current_frame_info.r[a]) \
3501 fprintf(dump_file, "%s = %d\n", #a, current_frame_info.r[a])
3502 PRINTREG(reg_fp);
3503 PRINTREG(reg_save_b0);
3504 PRINTREG(reg_save_pr);
3505 PRINTREG(reg_save_ar_pfs);
3506 PRINTREG(reg_save_ar_unat);
3507 PRINTREG(reg_save_ar_lc);
3508 PRINTREG(reg_save_gp);
3509 #undef PRINTREG
3512 /* If there is no epilogue, then we don't need some prologue insns.
3513 We need to avoid emitting the dead prologue insns, because flow
3514 will complain about them. */
3515 if (optimize)
3517 edge e;
3518 edge_iterator ei;
3520 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
3521 if ((e->flags & EDGE_FAKE) == 0
3522 && (e->flags & EDGE_FALLTHRU) != 0)
3523 break;
3524 epilogue_p = (e != NULL);
3526 else
3527 epilogue_p = 1;
3529 /* Set the local, input, and output register names. We need to do this
3530 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
3531 half. If we use in/loc/out register names, then we get assembler errors
3532 in crtn.S because there is no alloc insn or regstk directive in there. */
3533 if (! TARGET_REG_NAMES)
3535 int inputs = current_frame_info.n_input_regs;
3536 int locals = current_frame_info.n_local_regs;
3537 int outputs = current_frame_info.n_output_regs;
3539 for (i = 0; i < inputs; i++)
3540 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
3541 for (i = 0; i < locals; i++)
3542 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
3543 for (i = 0; i < outputs; i++)
3544 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
3547 /* Set the frame pointer register name. The regnum is logically loc79,
3548 but of course we'll not have allocated that many locals. Rather than
3549 worrying about renumbering the existing rtxs, we adjust the name. */
3550 /* ??? This code means that we can never use one local register when
3551 there is a frame pointer. loc79 gets wasted in this case, as it is
3552 renamed to a register that will never be used. See also the try_locals
3553 code in find_gr_spill. */
3554 if (current_frame_info.r[reg_fp])
3556 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3557 reg_names[HARD_FRAME_POINTER_REGNUM]
3558 = reg_names[current_frame_info.r[reg_fp]];
3559 reg_names[current_frame_info.r[reg_fp]] = tmp;
3562 /* We don't need an alloc instruction if we've used no outputs or locals. */
3563 if (current_frame_info.n_local_regs == 0
3564 && current_frame_info.n_output_regs == 0
3565 && current_frame_info.n_input_regs <= crtl->args.info.int_regs
3566 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3568 /* If there is no alloc, but there are input registers used, then we
3569 need a .regstk directive. */
3570 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
3571 ar_pfs_save_reg = NULL_RTX;
3573 else
3575 current_frame_info.need_regstk = 0;
3577 if (current_frame_info.r[reg_save_ar_pfs])
3579 regno = current_frame_info.r[reg_save_ar_pfs];
3580 reg_emitted (reg_save_ar_pfs);
3582 else
3583 regno = next_scratch_gr_reg ();
3584 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
3586 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
3587 GEN_INT (current_frame_info.n_input_regs),
3588 GEN_INT (current_frame_info.n_local_regs),
3589 GEN_INT (current_frame_info.n_output_regs),
3590 GEN_INT (current_frame_info.n_rotate_regs)));
3591 if (current_frame_info.r[reg_save_ar_pfs])
3593 RTX_FRAME_RELATED_P (insn) = 1;
3594 add_reg_note (insn, REG_CFA_REGISTER,
3595 gen_rtx_SET (VOIDmode,
3596 ar_pfs_save_reg,
3597 gen_rtx_REG (DImode, AR_PFS_REGNUM)));
3601 /* Set up frame pointer, stack pointer, and spill iterators. */
3603 n_varargs = cfun->machine->n_varargs;
3604 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
3605 stack_pointer_rtx, 0);
3607 if (frame_pointer_needed)
3609 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3610 RTX_FRAME_RELATED_P (insn) = 1;
3612 /* Force the unwind info to recognize this as defining a new CFA,
3613 rather than some temp register setup. */
3614 add_reg_note (insn, REG_CFA_ADJUST_CFA, NULL_RTX);
3617 if (current_frame_info.total_size != 0)
3619 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
3620 rtx offset;
3622 if (satisfies_constraint_I (frame_size_rtx))
3623 offset = frame_size_rtx;
3624 else
3626 regno = next_scratch_gr_reg ();
3627 offset = gen_rtx_REG (DImode, regno);
3628 emit_move_insn (offset, frame_size_rtx);
3631 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
3632 stack_pointer_rtx, offset));
3634 if (! frame_pointer_needed)
3636 RTX_FRAME_RELATED_P (insn) = 1;
3637 add_reg_note (insn, REG_CFA_ADJUST_CFA,
3638 gen_rtx_SET (VOIDmode,
3639 stack_pointer_rtx,
3640 gen_rtx_PLUS (DImode,
3641 stack_pointer_rtx,
3642 frame_size_rtx)));
3645 /* ??? At this point we must generate a magic insn that appears to
3646 modify the stack pointer, the frame pointer, and all spill
3647 iterators. This would allow the most scheduling freedom. For
3648 now, just hard stop. */
3649 emit_insn (gen_blockage ());
3652 /* Must copy out ar.unat before doing any integer spills. */
3653 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3655 if (current_frame_info.r[reg_save_ar_unat])
3657 ar_unat_save_reg
3658 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3659 reg_emitted (reg_save_ar_unat);
3661 else
3663 alt_regno = next_scratch_gr_reg ();
3664 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3665 current_frame_info.gr_used_mask |= 1 << alt_regno;
3668 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3669 insn = emit_move_insn (ar_unat_save_reg, reg);
3670 if (current_frame_info.r[reg_save_ar_unat])
3672 RTX_FRAME_RELATED_P (insn) = 1;
3673 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3676 /* Even if we're not going to generate an epilogue, we still
3677 need to save the register so that EH works. */
3678 if (! epilogue_p && current_frame_info.r[reg_save_ar_unat])
3679 emit_insn (gen_prologue_use (ar_unat_save_reg));
3681 else
3682 ar_unat_save_reg = NULL_RTX;
3684 /* Spill all varargs registers. Do this before spilling any GR registers,
3685 since we want the UNAT bits for the GR registers to override the UNAT
3686 bits from varargs, which we don't care about. */
3688 cfa_off = -16;
3689 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
3691 reg = gen_rtx_REG (DImode, regno);
3692 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
3695 /* Locate the bottom of the register save area. */
3696 cfa_off = (current_frame_info.spill_cfa_off
3697 + current_frame_info.spill_size
3698 + current_frame_info.extra_spill_size);
3700 /* Save the predicate register block either in a register or in memory. */
3701 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3703 reg = gen_rtx_REG (DImode, PR_REG (0));
3704 if (current_frame_info.r[reg_save_pr] != 0)
3706 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3707 reg_emitted (reg_save_pr);
3708 insn = emit_move_insn (alt_reg, reg);
3710 /* ??? Denote pr spill/fill by a DImode move that modifies all
3711 64 hard registers. */
3712 RTX_FRAME_RELATED_P (insn) = 1;
3713 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3715 /* Even if we're not going to generate an epilogue, we still
3716 need to save the register so that EH works. */
3717 if (! epilogue_p)
3718 emit_insn (gen_prologue_use (alt_reg));
3720 else
3722 alt_regno = next_scratch_gr_reg ();
3723 alt_reg = gen_rtx_REG (DImode, alt_regno);
3724 insn = emit_move_insn (alt_reg, reg);
3725 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3726 cfa_off -= 8;
3730 /* Handle AR regs in numerical order. All of them get special handling. */
3731 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
3732 && current_frame_info.r[reg_save_ar_unat] == 0)
3734 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3735 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
3736 cfa_off -= 8;
3739 /* The alloc insn already copied ar.pfs into a general register. The
3740 only thing we have to do now is copy that register to a stack slot
3741 if we'd not allocated a local register for the job. */
3742 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
3743 && current_frame_info.r[reg_save_ar_pfs] == 0)
3745 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3746 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
3747 cfa_off -= 8;
3750 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3752 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3753 if (current_frame_info.r[reg_save_ar_lc] != 0)
3755 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3756 reg_emitted (reg_save_ar_lc);
3757 insn = emit_move_insn (alt_reg, reg);
3758 RTX_FRAME_RELATED_P (insn) = 1;
3759 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3761 /* Even if we're not going to generate an epilogue, we still
3762 need to save the register so that EH works. */
3763 if (! epilogue_p)
3764 emit_insn (gen_prologue_use (alt_reg));
3766 else
3768 alt_regno = next_scratch_gr_reg ();
3769 alt_reg = gen_rtx_REG (DImode, alt_regno);
3770 emit_move_insn (alt_reg, reg);
3771 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3772 cfa_off -= 8;
3776 /* Save the return pointer. */
3777 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3779 reg = gen_rtx_REG (DImode, BR_REG (0));
3780 if (current_frame_info.r[reg_save_b0] != 0)
3782 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3783 reg_emitted (reg_save_b0);
3784 insn = emit_move_insn (alt_reg, reg);
3785 RTX_FRAME_RELATED_P (insn) = 1;
3786 add_reg_note (insn, REG_CFA_REGISTER,
3787 gen_rtx_SET (VOIDmode, alt_reg, pc_rtx));
3789 /* Even if we're not going to generate an epilogue, we still
3790 need to save the register so that EH works. */
3791 if (! epilogue_p)
3792 emit_insn (gen_prologue_use (alt_reg));
3794 else
3796 alt_regno = next_scratch_gr_reg ();
3797 alt_reg = gen_rtx_REG (DImode, alt_regno);
3798 emit_move_insn (alt_reg, reg);
3799 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3800 cfa_off -= 8;
3804 if (current_frame_info.r[reg_save_gp])
3806 reg_emitted (reg_save_gp);
3807 insn = emit_move_insn (gen_rtx_REG (DImode,
3808 current_frame_info.r[reg_save_gp]),
3809 pic_offset_table_rtx);
3812 /* We should now be at the base of the gr/br/fr spill area. */
3813 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3814 + current_frame_info.spill_size));
3816 /* Spill all general registers. */
3817 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3818 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3820 reg = gen_rtx_REG (DImode, regno);
3821 do_spill (gen_gr_spill, reg, cfa_off, reg);
3822 cfa_off -= 8;
3825 /* Spill the rest of the BR registers. */
3826 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3827 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3829 alt_regno = next_scratch_gr_reg ();
3830 alt_reg = gen_rtx_REG (DImode, alt_regno);
3831 reg = gen_rtx_REG (DImode, regno);
3832 emit_move_insn (alt_reg, reg);
3833 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3834 cfa_off -= 8;
3837 /* Align the frame and spill all FR registers. */
3838 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3839 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3841 gcc_assert (!(cfa_off & 15));
3842 reg = gen_rtx_REG (XFmode, regno);
3843 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
3844 cfa_off -= 16;
3847 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3849 finish_spill_pointers ();
3852 /* Output the textual info surrounding the prologue. */
3854 void
3855 ia64_start_function (FILE *file, const char *fnname,
3856 tree decl ATTRIBUTE_UNUSED)
3858 #if TARGET_ABI_OPEN_VMS
3859 vms_start_function (fnname);
3860 #endif
3862 fputs ("\t.proc ", file);
3863 assemble_name (file, fnname);
3864 fputc ('\n', file);
3865 ASM_OUTPUT_LABEL (file, fnname);
3868 /* Called after register allocation to add any instructions needed for the
3869 epilogue. Using an epilogue insn is favored compared to putting all of the
3870 instructions in output_function_prologue(), since it allows the scheduler
3871 to intermix instructions with the saves of the caller saved registers. In
3872 some cases, it might be necessary to emit a barrier instruction as the last
3873 insn to prevent such scheduling. */
3875 void
3876 ia64_expand_epilogue (int sibcall_p)
3878 rtx_insn *insn;
3879 rtx reg, alt_reg, ar_unat_save_reg;
3880 int regno, alt_regno, cfa_off;
3882 ia64_compute_frame_size (get_frame_size ());
3884 /* If there is a frame pointer, then we use it instead of the stack
3885 pointer, so that the stack pointer does not need to be valid when
3886 the epilogue starts. See EXIT_IGNORE_STACK. */
3887 if (frame_pointer_needed)
3888 setup_spill_pointers (current_frame_info.n_spilled,
3889 hard_frame_pointer_rtx, 0);
3890 else
3891 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
3892 current_frame_info.total_size);
3894 if (current_frame_info.total_size != 0)
3896 /* ??? At this point we must generate a magic insn that appears to
3897 modify the spill iterators and the frame pointer. This would
3898 allow the most scheduling freedom. For now, just hard stop. */
3899 emit_insn (gen_blockage ());
3902 /* Locate the bottom of the register save area. */
3903 cfa_off = (current_frame_info.spill_cfa_off
3904 + current_frame_info.spill_size
3905 + current_frame_info.extra_spill_size);
3907 /* Restore the predicate registers. */
3908 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3910 if (current_frame_info.r[reg_save_pr] != 0)
3912 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3913 reg_emitted (reg_save_pr);
3915 else
3917 alt_regno = next_scratch_gr_reg ();
3918 alt_reg = gen_rtx_REG (DImode, alt_regno);
3919 do_restore (gen_movdi_x, alt_reg, cfa_off);
3920 cfa_off -= 8;
3922 reg = gen_rtx_REG (DImode, PR_REG (0));
3923 emit_move_insn (reg, alt_reg);
3926 /* Restore the application registers. */
3928 /* Load the saved unat from the stack, but do not restore it until
3929 after the GRs have been restored. */
3930 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3932 if (current_frame_info.r[reg_save_ar_unat] != 0)
3934 ar_unat_save_reg
3935 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3936 reg_emitted (reg_save_ar_unat);
3938 else
3940 alt_regno = next_scratch_gr_reg ();
3941 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3942 current_frame_info.gr_used_mask |= 1 << alt_regno;
3943 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
3944 cfa_off -= 8;
3947 else
3948 ar_unat_save_reg = NULL_RTX;
3950 if (current_frame_info.r[reg_save_ar_pfs] != 0)
3952 reg_emitted (reg_save_ar_pfs);
3953 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_pfs]);
3954 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3955 emit_move_insn (reg, alt_reg);
3957 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3959 alt_regno = next_scratch_gr_reg ();
3960 alt_reg = gen_rtx_REG (DImode, alt_regno);
3961 do_restore (gen_movdi_x, alt_reg, cfa_off);
3962 cfa_off -= 8;
3963 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3964 emit_move_insn (reg, alt_reg);
3967 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3969 if (current_frame_info.r[reg_save_ar_lc] != 0)
3971 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3972 reg_emitted (reg_save_ar_lc);
3974 else
3976 alt_regno = next_scratch_gr_reg ();
3977 alt_reg = gen_rtx_REG (DImode, alt_regno);
3978 do_restore (gen_movdi_x, alt_reg, cfa_off);
3979 cfa_off -= 8;
3981 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3982 emit_move_insn (reg, alt_reg);
3985 /* Restore the return pointer. */
3986 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3988 if (current_frame_info.r[reg_save_b0] != 0)
3990 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3991 reg_emitted (reg_save_b0);
3993 else
3995 alt_regno = next_scratch_gr_reg ();
3996 alt_reg = gen_rtx_REG (DImode, alt_regno);
3997 do_restore (gen_movdi_x, alt_reg, cfa_off);
3998 cfa_off -= 8;
4000 reg = gen_rtx_REG (DImode, BR_REG (0));
4001 emit_move_insn (reg, alt_reg);
4004 /* We should now be at the base of the gr/br/fr spill area. */
4005 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
4006 + current_frame_info.spill_size));
4008 /* The GP may be stored on the stack in the prologue, but it's
4009 never restored in the epilogue. Skip the stack slot. */
4010 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
4011 cfa_off -= 8;
4013 /* Restore all general registers. */
4014 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
4015 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
4017 reg = gen_rtx_REG (DImode, regno);
4018 do_restore (gen_gr_restore, reg, cfa_off);
4019 cfa_off -= 8;
4022 /* Restore the branch registers. */
4023 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
4024 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
4026 alt_regno = next_scratch_gr_reg ();
4027 alt_reg = gen_rtx_REG (DImode, alt_regno);
4028 do_restore (gen_movdi_x, alt_reg, cfa_off);
4029 cfa_off -= 8;
4030 reg = gen_rtx_REG (DImode, regno);
4031 emit_move_insn (reg, alt_reg);
4034 /* Restore floating point registers. */
4035 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
4036 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
4038 gcc_assert (!(cfa_off & 15));
4039 reg = gen_rtx_REG (XFmode, regno);
4040 do_restore (gen_fr_restore_x, reg, cfa_off);
4041 cfa_off -= 16;
4044 /* Restore ar.unat for real. */
4045 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
4047 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
4048 emit_move_insn (reg, ar_unat_save_reg);
4051 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
4053 finish_spill_pointers ();
4055 if (current_frame_info.total_size
4056 || cfun->machine->ia64_eh_epilogue_sp
4057 || frame_pointer_needed)
4059 /* ??? At this point we must generate a magic insn that appears to
4060 modify the spill iterators, the stack pointer, and the frame
4061 pointer. This would allow the most scheduling freedom. For now,
4062 just hard stop. */
4063 emit_insn (gen_blockage ());
4066 if (cfun->machine->ia64_eh_epilogue_sp)
4067 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
4068 else if (frame_pointer_needed)
4070 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
4071 RTX_FRAME_RELATED_P (insn) = 1;
4072 add_reg_note (insn, REG_CFA_ADJUST_CFA, NULL);
4074 else if (current_frame_info.total_size)
4076 rtx offset, frame_size_rtx;
4078 frame_size_rtx = GEN_INT (current_frame_info.total_size);
4079 if (satisfies_constraint_I (frame_size_rtx))
4080 offset = frame_size_rtx;
4081 else
4083 regno = next_scratch_gr_reg ();
4084 offset = gen_rtx_REG (DImode, regno);
4085 emit_move_insn (offset, frame_size_rtx);
4088 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
4089 offset));
4091 RTX_FRAME_RELATED_P (insn) = 1;
4092 add_reg_note (insn, REG_CFA_ADJUST_CFA,
4093 gen_rtx_SET (VOIDmode,
4094 stack_pointer_rtx,
4095 gen_rtx_PLUS (DImode,
4096 stack_pointer_rtx,
4097 frame_size_rtx)));
4100 if (cfun->machine->ia64_eh_epilogue_bsp)
4101 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
4103 if (! sibcall_p)
4104 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
4105 else
4107 int fp = GR_REG (2);
4108 /* We need a throw away register here, r0 and r1 are reserved,
4109 so r2 is the first available call clobbered register. If
4110 there was a frame_pointer register, we may have swapped the
4111 names of r2 and HARD_FRAME_POINTER_REGNUM, so we have to make
4112 sure we're using the string "r2" when emitting the register
4113 name for the assembler. */
4114 if (current_frame_info.r[reg_fp]
4115 && current_frame_info.r[reg_fp] == GR_REG (2))
4116 fp = HARD_FRAME_POINTER_REGNUM;
4118 /* We must emit an alloc to force the input registers to become output
4119 registers. Otherwise, if the callee tries to pass its parameters
4120 through to another call without an intervening alloc, then these
4121 values get lost. */
4122 /* ??? We don't need to preserve all input registers. We only need to
4123 preserve those input registers used as arguments to the sibling call.
4124 It is unclear how to compute that number here. */
4125 if (current_frame_info.n_input_regs != 0)
4127 rtx n_inputs = GEN_INT (current_frame_info.n_input_regs);
4129 insn = emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
4130 const0_rtx, const0_rtx,
4131 n_inputs, const0_rtx));
4132 RTX_FRAME_RELATED_P (insn) = 1;
4134 /* ??? We need to mark the alloc as frame-related so that it gets
4135 passed into ia64_asm_unwind_emit for ia64-specific unwinding.
4136 But there's nothing dwarf2 related to be done wrt the register
4137 windows. If we do nothing, dwarf2out will abort on the UNSPEC;
4138 the empty parallel means dwarf2out will not see anything. */
4139 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4140 gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (0)));
4145 /* Return 1 if br.ret can do all the work required to return from a
4146 function. */
4149 ia64_direct_return (void)
4151 if (reload_completed && ! frame_pointer_needed)
4153 ia64_compute_frame_size (get_frame_size ());
4155 return (current_frame_info.total_size == 0
4156 && current_frame_info.n_spilled == 0
4157 && current_frame_info.r[reg_save_b0] == 0
4158 && current_frame_info.r[reg_save_pr] == 0
4159 && current_frame_info.r[reg_save_ar_pfs] == 0
4160 && current_frame_info.r[reg_save_ar_unat] == 0
4161 && current_frame_info.r[reg_save_ar_lc] == 0);
4163 return 0;
4166 /* Return the magic cookie that we use to hold the return address
4167 during early compilation. */
4170 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
4172 if (count != 0)
4173 return NULL;
4174 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
4177 /* Split this value after reload, now that we know where the return
4178 address is saved. */
4180 void
4181 ia64_split_return_addr_rtx (rtx dest)
4183 rtx src;
4185 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
4187 if (current_frame_info.r[reg_save_b0] != 0)
4189 src = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
4190 reg_emitted (reg_save_b0);
4192 else
4194 HOST_WIDE_INT off;
4195 unsigned int regno;
4196 rtx off_r;
4198 /* Compute offset from CFA for BR0. */
4199 /* ??? Must be kept in sync with ia64_expand_prologue. */
4200 off = (current_frame_info.spill_cfa_off
4201 + current_frame_info.spill_size);
4202 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
4203 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
4204 off -= 8;
4206 /* Convert CFA offset to a register based offset. */
4207 if (frame_pointer_needed)
4208 src = hard_frame_pointer_rtx;
4209 else
4211 src = stack_pointer_rtx;
4212 off += current_frame_info.total_size;
4215 /* Load address into scratch register. */
4216 off_r = GEN_INT (off);
4217 if (satisfies_constraint_I (off_r))
4218 emit_insn (gen_adddi3 (dest, src, off_r));
4219 else
4221 emit_move_insn (dest, off_r);
4222 emit_insn (gen_adddi3 (dest, src, dest));
4225 src = gen_rtx_MEM (Pmode, dest);
4228 else
4229 src = gen_rtx_REG (DImode, BR_REG (0));
4231 emit_move_insn (dest, src);
4235 ia64_hard_regno_rename_ok (int from, int to)
4237 /* Don't clobber any of the registers we reserved for the prologue. */
4238 unsigned int r;
4240 for (r = reg_fp; r <= reg_save_ar_lc; r++)
4241 if (to == current_frame_info.r[r]
4242 || from == current_frame_info.r[r]
4243 || to == emitted_frame_related_regs[r]
4244 || from == emitted_frame_related_regs[r])
4245 return 0;
4247 /* Don't use output registers outside the register frame. */
4248 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
4249 return 0;
4251 /* Retain even/oddness on predicate register pairs. */
4252 if (PR_REGNO_P (from) && PR_REGNO_P (to))
4253 return (from & 1) == (to & 1);
4255 return 1;
4258 /* Target hook for assembling integer objects. Handle word-sized
4259 aligned objects and detect the cases when @fptr is needed. */
4261 static bool
4262 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
4264 if (size == POINTER_SIZE / BITS_PER_UNIT
4265 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
4266 && GET_CODE (x) == SYMBOL_REF
4267 && SYMBOL_REF_FUNCTION_P (x))
4269 static const char * const directive[2][2] = {
4270 /* 64-bit pointer */ /* 32-bit pointer */
4271 { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
4272 { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
4274 fputs (directive[(aligned_p != 0)][POINTER_SIZE == 32], asm_out_file);
4275 output_addr_const (asm_out_file, x);
4276 fputs (")\n", asm_out_file);
4277 return true;
4279 return default_assemble_integer (x, size, aligned_p);
4282 /* Emit the function prologue. */
4284 static void
4285 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4287 int mask, grsave, grsave_prev;
4289 if (current_frame_info.need_regstk)
4290 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
4291 current_frame_info.n_input_regs,
4292 current_frame_info.n_local_regs,
4293 current_frame_info.n_output_regs,
4294 current_frame_info.n_rotate_regs);
4296 if (ia64_except_unwind_info (&global_options) != UI_TARGET)
4297 return;
4299 /* Emit the .prologue directive. */
4301 mask = 0;
4302 grsave = grsave_prev = 0;
4303 if (current_frame_info.r[reg_save_b0] != 0)
4305 mask |= 8;
4306 grsave = grsave_prev = current_frame_info.r[reg_save_b0];
4308 if (current_frame_info.r[reg_save_ar_pfs] != 0
4309 && (grsave_prev == 0
4310 || current_frame_info.r[reg_save_ar_pfs] == grsave_prev + 1))
4312 mask |= 4;
4313 if (grsave_prev == 0)
4314 grsave = current_frame_info.r[reg_save_ar_pfs];
4315 grsave_prev = current_frame_info.r[reg_save_ar_pfs];
4317 if (current_frame_info.r[reg_fp] != 0
4318 && (grsave_prev == 0
4319 || current_frame_info.r[reg_fp] == grsave_prev + 1))
4321 mask |= 2;
4322 if (grsave_prev == 0)
4323 grsave = HARD_FRAME_POINTER_REGNUM;
4324 grsave_prev = current_frame_info.r[reg_fp];
4326 if (current_frame_info.r[reg_save_pr] != 0
4327 && (grsave_prev == 0
4328 || current_frame_info.r[reg_save_pr] == grsave_prev + 1))
4330 mask |= 1;
4331 if (grsave_prev == 0)
4332 grsave = current_frame_info.r[reg_save_pr];
4335 if (mask && TARGET_GNU_AS)
4336 fprintf (file, "\t.prologue %d, %d\n", mask,
4337 ia64_dbx_register_number (grsave));
4338 else
4339 fputs ("\t.prologue\n", file);
4341 /* Emit a .spill directive, if necessary, to relocate the base of
4342 the register spill area. */
4343 if (current_frame_info.spill_cfa_off != -16)
4344 fprintf (file, "\t.spill %ld\n",
4345 (long) (current_frame_info.spill_cfa_off
4346 + current_frame_info.spill_size));
4349 /* Emit the .body directive at the scheduled end of the prologue. */
4351 static void
4352 ia64_output_function_end_prologue (FILE *file)
4354 if (ia64_except_unwind_info (&global_options) != UI_TARGET)
4355 return;
4357 fputs ("\t.body\n", file);
4360 /* Emit the function epilogue. */
4362 static void
4363 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
4364 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4366 int i;
4368 if (current_frame_info.r[reg_fp])
4370 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
4371 reg_names[HARD_FRAME_POINTER_REGNUM]
4372 = reg_names[current_frame_info.r[reg_fp]];
4373 reg_names[current_frame_info.r[reg_fp]] = tmp;
4374 reg_emitted (reg_fp);
4376 if (! TARGET_REG_NAMES)
4378 for (i = 0; i < current_frame_info.n_input_regs; i++)
4379 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
4380 for (i = 0; i < current_frame_info.n_local_regs; i++)
4381 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
4382 for (i = 0; i < current_frame_info.n_output_regs; i++)
4383 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
4386 current_frame_info.initialized = 0;
4390 ia64_dbx_register_number (int regno)
4392 /* In ia64_expand_prologue we quite literally renamed the frame pointer
4393 from its home at loc79 to something inside the register frame. We
4394 must perform the same renumbering here for the debug info. */
4395 if (current_frame_info.r[reg_fp])
4397 if (regno == HARD_FRAME_POINTER_REGNUM)
4398 regno = current_frame_info.r[reg_fp];
4399 else if (regno == current_frame_info.r[reg_fp])
4400 regno = HARD_FRAME_POINTER_REGNUM;
4403 if (IN_REGNO_P (regno))
4404 return 32 + regno - IN_REG (0);
4405 else if (LOC_REGNO_P (regno))
4406 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
4407 else if (OUT_REGNO_P (regno))
4408 return (32 + current_frame_info.n_input_regs
4409 + current_frame_info.n_local_regs + regno - OUT_REG (0));
4410 else
4411 return regno;
4414 /* Implement TARGET_TRAMPOLINE_INIT.
4416 The trampoline should set the static chain pointer to value placed
4417 into the trampoline and should branch to the specified routine.
4418 To make the normal indirect-subroutine calling convention work,
4419 the trampoline must look like a function descriptor; the first
4420 word being the target address and the second being the target's
4421 global pointer.
4423 We abuse the concept of a global pointer by arranging for it
4424 to point to the data we need to load. The complete trampoline
4425 has the following form:
4427 +-------------------+ \
4428 TRAMP: | __ia64_trampoline | |
4429 +-------------------+ > fake function descriptor
4430 | TRAMP+16 | |
4431 +-------------------+ /
4432 | target descriptor |
4433 +-------------------+
4434 | static link |
4435 +-------------------+
4438 static void
4439 ia64_trampoline_init (rtx m_tramp, tree fndecl, rtx static_chain)
4441 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
4442 rtx addr, addr_reg, tramp, eight = GEN_INT (8);
4444 /* The Intel assembler requires that the global __ia64_trampoline symbol
4445 be declared explicitly */
4446 if (!TARGET_GNU_AS)
4448 static bool declared_ia64_trampoline = false;
4450 if (!declared_ia64_trampoline)
4452 declared_ia64_trampoline = true;
4453 (*targetm.asm_out.globalize_label) (asm_out_file,
4454 "__ia64_trampoline");
4458 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
4459 addr = convert_memory_address (Pmode, XEXP (m_tramp, 0));
4460 fnaddr = convert_memory_address (Pmode, fnaddr);
4461 static_chain = convert_memory_address (Pmode, static_chain);
4463 /* Load up our iterator. */
4464 addr_reg = copy_to_reg (addr);
4465 m_tramp = adjust_automodify_address (m_tramp, Pmode, addr_reg, 0);
4467 /* The first two words are the fake descriptor:
4468 __ia64_trampoline, ADDR+16. */
4469 tramp = gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline");
4470 if (TARGET_ABI_OPEN_VMS)
4472 /* HP decided to break the ELF ABI on VMS (to deal with an ambiguity
4473 in the Macro-32 compiler) and changed the semantics of the LTOFF22
4474 relocation against function symbols to make it identical to the
4475 LTOFF_FPTR22 relocation. Emit the latter directly to stay within
4476 strict ELF and dereference to get the bare code address. */
4477 rtx reg = gen_reg_rtx (Pmode);
4478 SYMBOL_REF_FLAGS (tramp) |= SYMBOL_FLAG_FUNCTION;
4479 emit_move_insn (reg, tramp);
4480 emit_move_insn (reg, gen_rtx_MEM (Pmode, reg));
4481 tramp = reg;
4483 emit_move_insn (m_tramp, tramp);
4484 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4485 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4487 emit_move_insn (m_tramp, force_reg (Pmode, plus_constant (Pmode, addr, 16)));
4488 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4489 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4491 /* The third word is the target descriptor. */
4492 emit_move_insn (m_tramp, force_reg (Pmode, fnaddr));
4493 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4494 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4496 /* The fourth word is the static chain. */
4497 emit_move_insn (m_tramp, static_chain);
4500 /* Do any needed setup for a variadic function. CUM has not been updated
4501 for the last named argument which has type TYPE and mode MODE.
4503 We generate the actual spill instructions during prologue generation. */
4505 static void
4506 ia64_setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
4507 tree type, int * pretend_size,
4508 int second_time ATTRIBUTE_UNUSED)
4510 CUMULATIVE_ARGS next_cum = *get_cumulative_args (cum);
4512 /* Skip the current argument. */
4513 ia64_function_arg_advance (pack_cumulative_args (&next_cum), mode, type, 1);
4515 if (next_cum.words < MAX_ARGUMENT_SLOTS)
4517 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
4518 *pretend_size = n * UNITS_PER_WORD;
4519 cfun->machine->n_varargs = n;
4523 /* Check whether TYPE is a homogeneous floating point aggregate. If
4524 it is, return the mode of the floating point type that appears
4525 in all leafs. If it is not, return VOIDmode.
4527 An aggregate is a homogeneous floating point aggregate is if all
4528 fields/elements in it have the same floating point type (e.g,
4529 SFmode). 128-bit quad-precision floats are excluded.
4531 Variable sized aggregates should never arrive here, since we should
4532 have already decided to pass them by reference. Top-level zero-sized
4533 aggregates are excluded because our parallels crash the middle-end. */
4535 static machine_mode
4536 hfa_element_mode (const_tree type, bool nested)
4538 machine_mode element_mode = VOIDmode;
4539 machine_mode mode;
4540 enum tree_code code = TREE_CODE (type);
4541 int know_element_mode = 0;
4542 tree t;
4544 if (!nested && (!TYPE_SIZE (type) || integer_zerop (TYPE_SIZE (type))))
4545 return VOIDmode;
4547 switch (code)
4549 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
4550 case BOOLEAN_TYPE: case POINTER_TYPE:
4551 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
4552 case LANG_TYPE: case FUNCTION_TYPE:
4553 return VOIDmode;
4555 /* Fortran complex types are supposed to be HFAs, so we need to handle
4556 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
4557 types though. */
4558 case COMPLEX_TYPE:
4559 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
4560 && TYPE_MODE (type) != TCmode)
4561 return GET_MODE_INNER (TYPE_MODE (type));
4562 else
4563 return VOIDmode;
4565 case REAL_TYPE:
4566 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
4567 mode if this is contained within an aggregate. */
4568 if (nested && TYPE_MODE (type) != TFmode)
4569 return TYPE_MODE (type);
4570 else
4571 return VOIDmode;
4573 case ARRAY_TYPE:
4574 return hfa_element_mode (TREE_TYPE (type), 1);
4576 case RECORD_TYPE:
4577 case UNION_TYPE:
4578 case QUAL_UNION_TYPE:
4579 for (t = TYPE_FIELDS (type); t; t = DECL_CHAIN (t))
4581 if (TREE_CODE (t) != FIELD_DECL)
4582 continue;
4584 mode = hfa_element_mode (TREE_TYPE (t), 1);
4585 if (know_element_mode)
4587 if (mode != element_mode)
4588 return VOIDmode;
4590 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
4591 return VOIDmode;
4592 else
4594 know_element_mode = 1;
4595 element_mode = mode;
4598 return element_mode;
4600 default:
4601 /* If we reach here, we probably have some front-end specific type
4602 that the backend doesn't know about. This can happen via the
4603 aggregate_value_p call in init_function_start. All we can do is
4604 ignore unknown tree types. */
4605 return VOIDmode;
4608 return VOIDmode;
4611 /* Return the number of words required to hold a quantity of TYPE and MODE
4612 when passed as an argument. */
4613 static int
4614 ia64_function_arg_words (const_tree type, machine_mode mode)
4616 int words;
4618 if (mode == BLKmode)
4619 words = int_size_in_bytes (type);
4620 else
4621 words = GET_MODE_SIZE (mode);
4623 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
4626 /* Return the number of registers that should be skipped so the current
4627 argument (described by TYPE and WORDS) will be properly aligned.
4629 Integer and float arguments larger than 8 bytes start at the next
4630 even boundary. Aggregates larger than 8 bytes start at the next
4631 even boundary if the aggregate has 16 byte alignment. Note that
4632 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
4633 but are still to be aligned in registers.
4635 ??? The ABI does not specify how to handle aggregates with
4636 alignment from 9 to 15 bytes, or greater than 16. We handle them
4637 all as if they had 16 byte alignment. Such aggregates can occur
4638 only if gcc extensions are used. */
4639 static int
4640 ia64_function_arg_offset (const CUMULATIVE_ARGS *cum,
4641 const_tree type, int words)
4643 /* No registers are skipped on VMS. */
4644 if (TARGET_ABI_OPEN_VMS || (cum->words & 1) == 0)
4645 return 0;
4647 if (type
4648 && TREE_CODE (type) != INTEGER_TYPE
4649 && TREE_CODE (type) != REAL_TYPE)
4650 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
4651 else
4652 return words > 1;
4655 /* Return rtx for register where argument is passed, or zero if it is passed
4656 on the stack. */
4657 /* ??? 128-bit quad-precision floats are always passed in general
4658 registers. */
4660 static rtx
4661 ia64_function_arg_1 (cumulative_args_t cum_v, machine_mode mode,
4662 const_tree type, bool named, bool incoming)
4664 const CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
4666 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
4667 int words = ia64_function_arg_words (type, mode);
4668 int offset = ia64_function_arg_offset (cum, type, words);
4669 machine_mode hfa_mode = VOIDmode;
4671 /* For OPEN VMS, emit the instruction setting up the argument register here,
4672 when we know this will be together with the other arguments setup related
4673 insns. This is not the conceptually best place to do this, but this is
4674 the easiest as we have convenient access to cumulative args info. */
4676 if (TARGET_ABI_OPEN_VMS && mode == VOIDmode && type == void_type_node
4677 && named == 1)
4679 unsigned HOST_WIDE_INT regval = cum->words;
4680 int i;
4682 for (i = 0; i < 8; i++)
4683 regval |= ((int) cum->atypes[i]) << (i * 3 + 8);
4685 emit_move_insn (gen_rtx_REG (DImode, GR_REG (25)),
4686 GEN_INT (regval));
4689 /* If all argument slots are used, then it must go on the stack. */
4690 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4691 return 0;
4693 /* On OpenVMS argument is either in Rn or Fn. */
4694 if (TARGET_ABI_OPEN_VMS)
4696 if (FLOAT_MODE_P (mode))
4697 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->words);
4698 else
4699 return gen_rtx_REG (mode, basereg + cum->words);
4702 /* Check for and handle homogeneous FP aggregates. */
4703 if (type)
4704 hfa_mode = hfa_element_mode (type, 0);
4706 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4707 and unprototyped hfas are passed specially. */
4708 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4710 rtx loc[16];
4711 int i = 0;
4712 int fp_regs = cum->fp_regs;
4713 int int_regs = cum->words + offset;
4714 int hfa_size = GET_MODE_SIZE (hfa_mode);
4715 int byte_size;
4716 int args_byte_size;
4718 /* If prototyped, pass it in FR regs then GR regs.
4719 If not prototyped, pass it in both FR and GR regs.
4721 If this is an SFmode aggregate, then it is possible to run out of
4722 FR regs while GR regs are still left. In that case, we pass the
4723 remaining part in the GR regs. */
4725 /* Fill the FP regs. We do this always. We stop if we reach the end
4726 of the argument, the last FP register, or the last argument slot. */
4728 byte_size = ((mode == BLKmode)
4729 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4730 args_byte_size = int_regs * UNITS_PER_WORD;
4731 offset = 0;
4732 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4733 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
4735 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4736 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
4737 + fp_regs)),
4738 GEN_INT (offset));
4739 offset += hfa_size;
4740 args_byte_size += hfa_size;
4741 fp_regs++;
4744 /* If no prototype, then the whole thing must go in GR regs. */
4745 if (! cum->prototype)
4746 offset = 0;
4747 /* If this is an SFmode aggregate, then we might have some left over
4748 that needs to go in GR regs. */
4749 else if (byte_size != offset)
4750 int_regs += offset / UNITS_PER_WORD;
4752 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
4754 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
4756 machine_mode gr_mode = DImode;
4757 unsigned int gr_size;
4759 /* If we have an odd 4 byte hunk because we ran out of FR regs,
4760 then this goes in a GR reg left adjusted/little endian, right
4761 adjusted/big endian. */
4762 /* ??? Currently this is handled wrong, because 4-byte hunks are
4763 always right adjusted/little endian. */
4764 if (offset & 0x4)
4765 gr_mode = SImode;
4766 /* If we have an even 4 byte hunk because the aggregate is a
4767 multiple of 4 bytes in size, then this goes in a GR reg right
4768 adjusted/little endian. */
4769 else if (byte_size - offset == 4)
4770 gr_mode = SImode;
4772 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4773 gen_rtx_REG (gr_mode, (basereg
4774 + int_regs)),
4775 GEN_INT (offset));
4777 gr_size = GET_MODE_SIZE (gr_mode);
4778 offset += gr_size;
4779 if (gr_size == UNITS_PER_WORD
4780 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
4781 int_regs++;
4782 else if (gr_size > UNITS_PER_WORD)
4783 int_regs += gr_size / UNITS_PER_WORD;
4785 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4788 /* Integral and aggregates go in general registers. If we have run out of
4789 FR registers, then FP values must also go in general registers. This can
4790 happen when we have a SFmode HFA. */
4791 else if (mode == TFmode || mode == TCmode
4792 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4794 int byte_size = ((mode == BLKmode)
4795 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4796 if (BYTES_BIG_ENDIAN
4797 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
4798 && byte_size < UNITS_PER_WORD
4799 && byte_size > 0)
4801 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4802 gen_rtx_REG (DImode,
4803 (basereg + cum->words
4804 + offset)),
4805 const0_rtx);
4806 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
4808 else
4809 return gen_rtx_REG (mode, basereg + cum->words + offset);
4813 /* If there is a prototype, then FP values go in a FR register when
4814 named, and in a GR register when unnamed. */
4815 else if (cum->prototype)
4817 if (named)
4818 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
4819 /* In big-endian mode, an anonymous SFmode value must be represented
4820 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
4821 the value into the high half of the general register. */
4822 else if (BYTES_BIG_ENDIAN && mode == SFmode)
4823 return gen_rtx_PARALLEL (mode,
4824 gen_rtvec (1,
4825 gen_rtx_EXPR_LIST (VOIDmode,
4826 gen_rtx_REG (DImode, basereg + cum->words + offset),
4827 const0_rtx)));
4828 else
4829 return gen_rtx_REG (mode, basereg + cum->words + offset);
4831 /* If there is no prototype, then FP values go in both FR and GR
4832 registers. */
4833 else
4835 /* See comment above. */
4836 machine_mode inner_mode =
4837 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
4839 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
4840 gen_rtx_REG (mode, (FR_ARG_FIRST
4841 + cum->fp_regs)),
4842 const0_rtx);
4843 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4844 gen_rtx_REG (inner_mode,
4845 (basereg + cum->words
4846 + offset)),
4847 const0_rtx);
4849 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
4853 /* Implement TARGET_FUNCION_ARG target hook. */
4855 static rtx
4856 ia64_function_arg (cumulative_args_t cum, machine_mode mode,
4857 const_tree type, bool named)
4859 return ia64_function_arg_1 (cum, mode, type, named, false);
4862 /* Implement TARGET_FUNCION_INCOMING_ARG target hook. */
4864 static rtx
4865 ia64_function_incoming_arg (cumulative_args_t cum,
4866 machine_mode mode,
4867 const_tree type, bool named)
4869 return ia64_function_arg_1 (cum, mode, type, named, true);
4872 /* Return number of bytes, at the beginning of the argument, that must be
4873 put in registers. 0 is the argument is entirely in registers or entirely
4874 in memory. */
4876 static int
4877 ia64_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
4878 tree type, bool named ATTRIBUTE_UNUSED)
4880 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
4882 int words = ia64_function_arg_words (type, mode);
4883 int offset = ia64_function_arg_offset (cum, type, words);
4885 /* If all argument slots are used, then it must go on the stack. */
4886 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4887 return 0;
4889 /* It doesn't matter whether the argument goes in FR or GR regs. If
4890 it fits within the 8 argument slots, then it goes entirely in
4891 registers. If it extends past the last argument slot, then the rest
4892 goes on the stack. */
4894 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
4895 return 0;
4897 return (MAX_ARGUMENT_SLOTS - cum->words - offset) * UNITS_PER_WORD;
4900 /* Return ivms_arg_type based on machine_mode. */
4902 static enum ivms_arg_type
4903 ia64_arg_type (machine_mode mode)
4905 switch (mode)
4907 case SFmode:
4908 return FS;
4909 case DFmode:
4910 return FT;
4911 default:
4912 return I64;
4916 /* Update CUM to point after this argument. This is patterned after
4917 ia64_function_arg. */
4919 static void
4920 ia64_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
4921 const_tree type, bool named)
4923 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
4924 int words = ia64_function_arg_words (type, mode);
4925 int offset = ia64_function_arg_offset (cum, type, words);
4926 machine_mode hfa_mode = VOIDmode;
4928 /* If all arg slots are already full, then there is nothing to do. */
4929 if (cum->words >= MAX_ARGUMENT_SLOTS)
4931 cum->words += words + offset;
4932 return;
4935 cum->atypes[cum->words] = ia64_arg_type (mode);
4936 cum->words += words + offset;
4938 /* On OpenVMS argument is either in Rn or Fn. */
4939 if (TARGET_ABI_OPEN_VMS)
4941 cum->int_regs = cum->words;
4942 cum->fp_regs = cum->words;
4943 return;
4946 /* Check for and handle homogeneous FP aggregates. */
4947 if (type)
4948 hfa_mode = hfa_element_mode (type, 0);
4950 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4951 and unprototyped hfas are passed specially. */
4952 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4954 int fp_regs = cum->fp_regs;
4955 /* This is the original value of cum->words + offset. */
4956 int int_regs = cum->words - words;
4957 int hfa_size = GET_MODE_SIZE (hfa_mode);
4958 int byte_size;
4959 int args_byte_size;
4961 /* If prototyped, pass it in FR regs then GR regs.
4962 If not prototyped, pass it in both FR and GR regs.
4964 If this is an SFmode aggregate, then it is possible to run out of
4965 FR regs while GR regs are still left. In that case, we pass the
4966 remaining part in the GR regs. */
4968 /* Fill the FP regs. We do this always. We stop if we reach the end
4969 of the argument, the last FP register, or the last argument slot. */
4971 byte_size = ((mode == BLKmode)
4972 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4973 args_byte_size = int_regs * UNITS_PER_WORD;
4974 offset = 0;
4975 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4976 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
4978 offset += hfa_size;
4979 args_byte_size += hfa_size;
4980 fp_regs++;
4983 cum->fp_regs = fp_regs;
4986 /* Integral and aggregates go in general registers. So do TFmode FP values.
4987 If we have run out of FR registers, then other FP values must also go in
4988 general registers. This can happen when we have a SFmode HFA. */
4989 else if (mode == TFmode || mode == TCmode
4990 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4991 cum->int_regs = cum->words;
4993 /* If there is a prototype, then FP values go in a FR register when
4994 named, and in a GR register when unnamed. */
4995 else if (cum->prototype)
4997 if (! named)
4998 cum->int_regs = cum->words;
4999 else
5000 /* ??? Complex types should not reach here. */
5001 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
5003 /* If there is no prototype, then FP values go in both FR and GR
5004 registers. */
5005 else
5007 /* ??? Complex types should not reach here. */
5008 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
5009 cum->int_regs = cum->words;
5013 /* Arguments with alignment larger than 8 bytes start at the next even
5014 boundary. On ILP32 HPUX, TFmode arguments start on next even boundary
5015 even though their normal alignment is 8 bytes. See ia64_function_arg. */
5017 static unsigned int
5018 ia64_function_arg_boundary (machine_mode mode, const_tree type)
5020 if (mode == TFmode && TARGET_HPUX && TARGET_ILP32)
5021 return PARM_BOUNDARY * 2;
5023 if (type)
5025 if (TYPE_ALIGN (type) > PARM_BOUNDARY)
5026 return PARM_BOUNDARY * 2;
5027 else
5028 return PARM_BOUNDARY;
5031 if (GET_MODE_BITSIZE (mode) > PARM_BOUNDARY)
5032 return PARM_BOUNDARY * 2;
5033 else
5034 return PARM_BOUNDARY;
5037 /* True if it is OK to do sibling call optimization for the specified
5038 call expression EXP. DECL will be the called function, or NULL if
5039 this is an indirect call. */
5040 static bool
5041 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
5043 /* We can't perform a sibcall if the current function has the syscall_linkage
5044 attribute. */
5045 if (lookup_attribute ("syscall_linkage",
5046 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
5047 return false;
5049 /* We must always return with our current GP. This means we can
5050 only sibcall to functions defined in the current module unless
5051 TARGET_CONST_GP is set to true. */
5052 return (decl && (*targetm.binds_local_p) (decl)) || TARGET_CONST_GP;
5056 /* Implement va_arg. */
5058 static tree
5059 ia64_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
5060 gimple_seq *post_p)
5062 /* Variable sized types are passed by reference. */
5063 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
5065 tree ptrtype = build_pointer_type (type);
5066 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
5067 return build_va_arg_indirect_ref (addr);
5070 /* Aggregate arguments with alignment larger than 8 bytes start at
5071 the next even boundary. Integer and floating point arguments
5072 do so if they are larger than 8 bytes, whether or not they are
5073 also aligned larger than 8 bytes. */
5074 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
5075 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
5077 tree t = fold_build_pointer_plus_hwi (valist, 2 * UNITS_PER_WORD - 1);
5078 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
5079 build_int_cst (TREE_TYPE (t), -2 * UNITS_PER_WORD));
5080 gimplify_assign (unshare_expr (valist), t, pre_p);
5083 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5086 /* Return 1 if function return value returned in memory. Return 0 if it is
5087 in a register. */
5089 static bool
5090 ia64_return_in_memory (const_tree valtype, const_tree fntype ATTRIBUTE_UNUSED)
5092 machine_mode mode;
5093 machine_mode hfa_mode;
5094 HOST_WIDE_INT byte_size;
5096 mode = TYPE_MODE (valtype);
5097 byte_size = GET_MODE_SIZE (mode);
5098 if (mode == BLKmode)
5100 byte_size = int_size_in_bytes (valtype);
5101 if (byte_size < 0)
5102 return true;
5105 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
5107 hfa_mode = hfa_element_mode (valtype, 0);
5108 if (hfa_mode != VOIDmode)
5110 int hfa_size = GET_MODE_SIZE (hfa_mode);
5112 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
5113 return true;
5114 else
5115 return false;
5117 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
5118 return true;
5119 else
5120 return false;
5123 /* Return rtx for register that holds the function return value. */
5125 static rtx
5126 ia64_function_value (const_tree valtype,
5127 const_tree fn_decl_or_type,
5128 bool outgoing ATTRIBUTE_UNUSED)
5130 machine_mode mode;
5131 machine_mode hfa_mode;
5132 int unsignedp;
5133 const_tree func = fn_decl_or_type;
5135 if (fn_decl_or_type
5136 && !DECL_P (fn_decl_or_type))
5137 func = NULL;
5139 mode = TYPE_MODE (valtype);
5140 hfa_mode = hfa_element_mode (valtype, 0);
5142 if (hfa_mode != VOIDmode)
5144 rtx loc[8];
5145 int i;
5146 int hfa_size;
5147 int byte_size;
5148 int offset;
5150 hfa_size = GET_MODE_SIZE (hfa_mode);
5151 byte_size = ((mode == BLKmode)
5152 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
5153 offset = 0;
5154 for (i = 0; offset < byte_size; i++)
5156 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
5157 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
5158 GEN_INT (offset));
5159 offset += hfa_size;
5161 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
5163 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
5164 return gen_rtx_REG (mode, FR_ARG_FIRST);
5165 else
5167 bool need_parallel = false;
5169 /* In big-endian mode, we need to manage the layout of aggregates
5170 in the registers so that we get the bits properly aligned in
5171 the highpart of the registers. */
5172 if (BYTES_BIG_ENDIAN
5173 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
5174 need_parallel = true;
5176 /* Something like struct S { long double x; char a[0] } is not an
5177 HFA structure, and therefore doesn't go in fp registers. But
5178 the middle-end will give it XFmode anyway, and XFmode values
5179 don't normally fit in integer registers. So we need to smuggle
5180 the value inside a parallel. */
5181 else if (mode == XFmode || mode == XCmode || mode == RFmode)
5182 need_parallel = true;
5184 if (need_parallel)
5186 rtx loc[8];
5187 int offset;
5188 int bytesize;
5189 int i;
5191 offset = 0;
5192 bytesize = int_size_in_bytes (valtype);
5193 /* An empty PARALLEL is invalid here, but the return value
5194 doesn't matter for empty structs. */
5195 if (bytesize == 0)
5196 return gen_rtx_REG (mode, GR_RET_FIRST);
5197 for (i = 0; offset < bytesize; i++)
5199 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
5200 gen_rtx_REG (DImode,
5201 GR_RET_FIRST + i),
5202 GEN_INT (offset));
5203 offset += UNITS_PER_WORD;
5205 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
5208 mode = promote_function_mode (valtype, mode, &unsignedp,
5209 func ? TREE_TYPE (func) : NULL_TREE,
5210 true);
5212 return gen_rtx_REG (mode, GR_RET_FIRST);
5216 /* Worker function for TARGET_LIBCALL_VALUE. */
5218 static rtx
5219 ia64_libcall_value (machine_mode mode,
5220 const_rtx fun ATTRIBUTE_UNUSED)
5222 return gen_rtx_REG (mode,
5223 (((GET_MODE_CLASS (mode) == MODE_FLOAT
5224 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5225 && (mode) != TFmode)
5226 ? FR_RET_FIRST : GR_RET_FIRST));
5229 /* Worker function for FUNCTION_VALUE_REGNO_P. */
5231 static bool
5232 ia64_function_value_regno_p (const unsigned int regno)
5234 return ((regno >= GR_RET_FIRST && regno <= GR_RET_LAST)
5235 || (regno >= FR_RET_FIRST && regno <= FR_RET_LAST));
5238 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
5239 We need to emit DTP-relative relocations. */
5241 static void
5242 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
5244 gcc_assert (size == 4 || size == 8);
5245 if (size == 4)
5246 fputs ("\tdata4.ua\t@dtprel(", file);
5247 else
5248 fputs ("\tdata8.ua\t@dtprel(", file);
5249 output_addr_const (file, x);
5250 fputs (")", file);
5253 /* Print a memory address as an operand to reference that memory location. */
5255 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
5256 also call this from ia64_print_operand for memory addresses. */
5258 static void
5259 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
5260 rtx address ATTRIBUTE_UNUSED)
5264 /* Print an operand to an assembler instruction.
5265 C Swap and print a comparison operator.
5266 D Print an FP comparison operator.
5267 E Print 32 - constant, for SImode shifts as extract.
5268 e Print 64 - constant, for DImode rotates.
5269 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
5270 a floating point register emitted normally.
5271 G A floating point constant.
5272 I Invert a predicate register by adding 1.
5273 J Select the proper predicate register for a condition.
5274 j Select the inverse predicate register for a condition.
5275 O Append .acq for volatile load.
5276 P Postincrement of a MEM.
5277 Q Append .rel for volatile store.
5278 R Print .s .d or nothing for a single, double or no truncation.
5279 S Shift amount for shladd instruction.
5280 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
5281 for Intel assembler.
5282 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
5283 for Intel assembler.
5284 X A pair of floating point registers.
5285 r Print register name, or constant 0 as r0. HP compatibility for
5286 Linux kernel.
5287 v Print vector constant value as an 8-byte integer value. */
5289 static void
5290 ia64_print_operand (FILE * file, rtx x, int code)
5292 const char *str;
5294 switch (code)
5296 case 0:
5297 /* Handled below. */
5298 break;
5300 case 'C':
5302 enum rtx_code c = swap_condition (GET_CODE (x));
5303 fputs (GET_RTX_NAME (c), file);
5304 return;
5307 case 'D':
5308 switch (GET_CODE (x))
5310 case NE:
5311 str = "neq";
5312 break;
5313 case UNORDERED:
5314 str = "unord";
5315 break;
5316 case ORDERED:
5317 str = "ord";
5318 break;
5319 case UNLT:
5320 str = "nge";
5321 break;
5322 case UNLE:
5323 str = "ngt";
5324 break;
5325 case UNGT:
5326 str = "nle";
5327 break;
5328 case UNGE:
5329 str = "nlt";
5330 break;
5331 case UNEQ:
5332 case LTGT:
5333 gcc_unreachable ();
5334 default:
5335 str = GET_RTX_NAME (GET_CODE (x));
5336 break;
5338 fputs (str, file);
5339 return;
5341 case 'E':
5342 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
5343 return;
5345 case 'e':
5346 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
5347 return;
5349 case 'F':
5350 if (x == CONST0_RTX (GET_MODE (x)))
5351 str = reg_names [FR_REG (0)];
5352 else if (x == CONST1_RTX (GET_MODE (x)))
5353 str = reg_names [FR_REG (1)];
5354 else
5356 gcc_assert (GET_CODE (x) == REG);
5357 str = reg_names [REGNO (x)];
5359 fputs (str, file);
5360 return;
5362 case 'G':
5364 long val[4];
5365 REAL_VALUE_TYPE rv;
5366 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
5367 real_to_target (val, &rv, GET_MODE (x));
5368 if (GET_MODE (x) == SFmode)
5369 fprintf (file, "0x%08lx", val[0] & 0xffffffff);
5370 else if (GET_MODE (x) == DFmode)
5371 fprintf (file, "0x%08lx%08lx", (WORDS_BIG_ENDIAN ? val[0] : val[1])
5372 & 0xffffffff,
5373 (WORDS_BIG_ENDIAN ? val[1] : val[0])
5374 & 0xffffffff);
5375 else
5376 output_operand_lossage ("invalid %%G mode");
5378 return;
5380 case 'I':
5381 fputs (reg_names [REGNO (x) + 1], file);
5382 return;
5384 case 'J':
5385 case 'j':
5387 unsigned int regno = REGNO (XEXP (x, 0));
5388 if (GET_CODE (x) == EQ)
5389 regno += 1;
5390 if (code == 'j')
5391 regno ^= 1;
5392 fputs (reg_names [regno], file);
5394 return;
5396 case 'O':
5397 if (MEM_VOLATILE_P (x))
5398 fputs(".acq", file);
5399 return;
5401 case 'P':
5403 HOST_WIDE_INT value;
5405 switch (GET_CODE (XEXP (x, 0)))
5407 default:
5408 return;
5410 case POST_MODIFY:
5411 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
5412 if (GET_CODE (x) == CONST_INT)
5413 value = INTVAL (x);
5414 else
5416 gcc_assert (GET_CODE (x) == REG);
5417 fprintf (file, ", %s", reg_names[REGNO (x)]);
5418 return;
5420 break;
5422 case POST_INC:
5423 value = GET_MODE_SIZE (GET_MODE (x));
5424 break;
5426 case POST_DEC:
5427 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
5428 break;
5431 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
5432 return;
5435 case 'Q':
5436 if (MEM_VOLATILE_P (x))
5437 fputs(".rel", file);
5438 return;
5440 case 'R':
5441 if (x == CONST0_RTX (GET_MODE (x)))
5442 fputs(".s", file);
5443 else if (x == CONST1_RTX (GET_MODE (x)))
5444 fputs(".d", file);
5445 else if (x == CONST2_RTX (GET_MODE (x)))
5447 else
5448 output_operand_lossage ("invalid %%R value");
5449 return;
5451 case 'S':
5452 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5453 return;
5455 case 'T':
5456 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
5458 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
5459 return;
5461 break;
5463 case 'U':
5464 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
5466 const char *prefix = "0x";
5467 if (INTVAL (x) & 0x80000000)
5469 fprintf (file, "0xffffffff");
5470 prefix = "";
5472 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
5473 return;
5475 break;
5477 case 'X':
5479 unsigned int regno = REGNO (x);
5480 fprintf (file, "%s, %s", reg_names [regno], reg_names [regno + 1]);
5482 return;
5484 case 'r':
5485 /* If this operand is the constant zero, write it as register zero.
5486 Any register, zero, or CONST_INT value is OK here. */
5487 if (GET_CODE (x) == REG)
5488 fputs (reg_names[REGNO (x)], file);
5489 else if (x == CONST0_RTX (GET_MODE (x)))
5490 fputs ("r0", file);
5491 else if (GET_CODE (x) == CONST_INT)
5492 output_addr_const (file, x);
5493 else
5494 output_operand_lossage ("invalid %%r value");
5495 return;
5497 case 'v':
5498 gcc_assert (GET_CODE (x) == CONST_VECTOR);
5499 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
5500 break;
5502 case '+':
5504 const char *which;
5506 /* For conditional branches, returns or calls, substitute
5507 sptk, dptk, dpnt, or spnt for %s. */
5508 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
5509 if (x)
5511 int pred_val = XINT (x, 0);
5513 /* Guess top and bottom 10% statically predicted. */
5514 if (pred_val < REG_BR_PROB_BASE / 50
5515 && br_prob_note_reliable_p (x))
5516 which = ".spnt";
5517 else if (pred_val < REG_BR_PROB_BASE / 2)
5518 which = ".dpnt";
5519 else if (pred_val < REG_BR_PROB_BASE / 100 * 98
5520 || !br_prob_note_reliable_p (x))
5521 which = ".dptk";
5522 else
5523 which = ".sptk";
5525 else if (CALL_P (current_output_insn))
5526 which = ".sptk";
5527 else
5528 which = ".dptk";
5530 fputs (which, file);
5531 return;
5534 case ',':
5535 x = current_insn_predicate;
5536 if (x)
5538 unsigned int regno = REGNO (XEXP (x, 0));
5539 if (GET_CODE (x) == EQ)
5540 regno += 1;
5541 fprintf (file, "(%s) ", reg_names [regno]);
5543 return;
5545 default:
5546 output_operand_lossage ("ia64_print_operand: unknown code");
5547 return;
5550 switch (GET_CODE (x))
5552 /* This happens for the spill/restore instructions. */
5553 case POST_INC:
5554 case POST_DEC:
5555 case POST_MODIFY:
5556 x = XEXP (x, 0);
5557 /* ... fall through ... */
5559 case REG:
5560 fputs (reg_names [REGNO (x)], file);
5561 break;
5563 case MEM:
5565 rtx addr = XEXP (x, 0);
5566 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
5567 addr = XEXP (addr, 0);
5568 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
5569 break;
5572 default:
5573 output_addr_const (file, x);
5574 break;
5577 return;
5580 /* Worker function for TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
5582 static bool
5583 ia64_print_operand_punct_valid_p (unsigned char code)
5585 return (code == '+' || code == ',');
5588 /* Compute a (partial) cost for rtx X. Return true if the complete
5589 cost has been computed, and false if subexpressions should be
5590 scanned. In either case, *TOTAL contains the cost result. */
5591 /* ??? This is incomplete. */
5593 static bool
5594 ia64_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
5595 int *total, bool speed ATTRIBUTE_UNUSED)
5597 switch (code)
5599 case CONST_INT:
5600 switch (outer_code)
5602 case SET:
5603 *total = satisfies_constraint_J (x) ? 0 : COSTS_N_INSNS (1);
5604 return true;
5605 case PLUS:
5606 if (satisfies_constraint_I (x))
5607 *total = 0;
5608 else if (satisfies_constraint_J (x))
5609 *total = 1;
5610 else
5611 *total = COSTS_N_INSNS (1);
5612 return true;
5613 default:
5614 if (satisfies_constraint_K (x) || satisfies_constraint_L (x))
5615 *total = 0;
5616 else
5617 *total = COSTS_N_INSNS (1);
5618 return true;
5621 case CONST_DOUBLE:
5622 *total = COSTS_N_INSNS (1);
5623 return true;
5625 case CONST:
5626 case SYMBOL_REF:
5627 case LABEL_REF:
5628 *total = COSTS_N_INSNS (3);
5629 return true;
5631 case FMA:
5632 *total = COSTS_N_INSNS (4);
5633 return true;
5635 case MULT:
5636 /* For multiplies wider than HImode, we have to go to the FPU,
5637 which normally involves copies. Plus there's the latency
5638 of the multiply itself, and the latency of the instructions to
5639 transfer integer regs to FP regs. */
5640 if (FLOAT_MODE_P (GET_MODE (x)))
5641 *total = COSTS_N_INSNS (4);
5642 else if (GET_MODE_SIZE (GET_MODE (x)) > 2)
5643 *total = COSTS_N_INSNS (10);
5644 else
5645 *total = COSTS_N_INSNS (2);
5646 return true;
5648 case PLUS:
5649 case MINUS:
5650 if (FLOAT_MODE_P (GET_MODE (x)))
5652 *total = COSTS_N_INSNS (4);
5653 return true;
5655 /* FALLTHRU */
5657 case ASHIFT:
5658 case ASHIFTRT:
5659 case LSHIFTRT:
5660 *total = COSTS_N_INSNS (1);
5661 return true;
5663 case DIV:
5664 case UDIV:
5665 case MOD:
5666 case UMOD:
5667 /* We make divide expensive, so that divide-by-constant will be
5668 optimized to a multiply. */
5669 *total = COSTS_N_INSNS (60);
5670 return true;
5672 default:
5673 return false;
5677 /* Calculate the cost of moving data from a register in class FROM to
5678 one in class TO, using MODE. */
5680 static int
5681 ia64_register_move_cost (machine_mode mode, reg_class_t from,
5682 reg_class_t to)
5684 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
5685 if (to == ADDL_REGS)
5686 to = GR_REGS;
5687 if (from == ADDL_REGS)
5688 from = GR_REGS;
5690 /* All costs are symmetric, so reduce cases by putting the
5691 lower number class as the destination. */
5692 if (from < to)
5694 reg_class_t tmp = to;
5695 to = from, from = tmp;
5698 /* Moving from FR<->GR in XFmode must be more expensive than 2,
5699 so that we get secondary memory reloads. Between FR_REGS,
5700 we have to make this at least as expensive as memory_move_cost
5701 to avoid spectacularly poor register class preferencing. */
5702 if (mode == XFmode || mode == RFmode)
5704 if (to != GR_REGS || from != GR_REGS)
5705 return memory_move_cost (mode, to, false);
5706 else
5707 return 3;
5710 switch (to)
5712 case PR_REGS:
5713 /* Moving between PR registers takes two insns. */
5714 if (from == PR_REGS)
5715 return 3;
5716 /* Moving between PR and anything but GR is impossible. */
5717 if (from != GR_REGS)
5718 return memory_move_cost (mode, to, false);
5719 break;
5721 case BR_REGS:
5722 /* Moving between BR and anything but GR is impossible. */
5723 if (from != GR_REGS && from != GR_AND_BR_REGS)
5724 return memory_move_cost (mode, to, false);
5725 break;
5727 case AR_I_REGS:
5728 case AR_M_REGS:
5729 /* Moving between AR and anything but GR is impossible. */
5730 if (from != GR_REGS)
5731 return memory_move_cost (mode, to, false);
5732 break;
5734 case GR_REGS:
5735 case FR_REGS:
5736 case FP_REGS:
5737 case GR_AND_FR_REGS:
5738 case GR_AND_BR_REGS:
5739 case ALL_REGS:
5740 break;
5742 default:
5743 gcc_unreachable ();
5746 return 2;
5749 /* Calculate the cost of moving data of MODE from a register to or from
5750 memory. */
5752 static int
5753 ia64_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
5754 reg_class_t rclass,
5755 bool in ATTRIBUTE_UNUSED)
5757 if (rclass == GENERAL_REGS
5758 || rclass == FR_REGS
5759 || rclass == FP_REGS
5760 || rclass == GR_AND_FR_REGS)
5761 return 4;
5762 else
5763 return 10;
5766 /* Implement TARGET_PREFERRED_RELOAD_CLASS. Place additional restrictions
5767 on RCLASS to use when copying X into that class. */
5769 static reg_class_t
5770 ia64_preferred_reload_class (rtx x, reg_class_t rclass)
5772 switch (rclass)
5774 case FR_REGS:
5775 case FP_REGS:
5776 /* Don't allow volatile mem reloads into floating point registers.
5777 This is defined to force reload to choose the r/m case instead
5778 of the f/f case when reloading (set (reg fX) (mem/v)). */
5779 if (MEM_P (x) && MEM_VOLATILE_P (x))
5780 return NO_REGS;
5782 /* Force all unrecognized constants into the constant pool. */
5783 if (CONSTANT_P (x))
5784 return NO_REGS;
5785 break;
5787 case AR_M_REGS:
5788 case AR_I_REGS:
5789 if (!OBJECT_P (x))
5790 return NO_REGS;
5791 break;
5793 default:
5794 break;
5797 return rclass;
5800 /* This function returns the register class required for a secondary
5801 register when copying between one of the registers in RCLASS, and X,
5802 using MODE. A return value of NO_REGS means that no secondary register
5803 is required. */
5805 enum reg_class
5806 ia64_secondary_reload_class (enum reg_class rclass,
5807 machine_mode mode ATTRIBUTE_UNUSED, rtx x)
5809 int regno = -1;
5811 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
5812 regno = true_regnum (x);
5814 switch (rclass)
5816 case BR_REGS:
5817 case AR_M_REGS:
5818 case AR_I_REGS:
5819 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
5820 interaction. We end up with two pseudos with overlapping lifetimes
5821 both of which are equiv to the same constant, and both which need
5822 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
5823 changes depending on the path length, which means the qty_first_reg
5824 check in make_regs_eqv can give different answers at different times.
5825 At some point I'll probably need a reload_indi pattern to handle
5826 this.
5828 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
5829 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
5830 non-general registers for good measure. */
5831 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
5832 return GR_REGS;
5834 /* This is needed if a pseudo used as a call_operand gets spilled to a
5835 stack slot. */
5836 if (GET_CODE (x) == MEM)
5837 return GR_REGS;
5838 break;
5840 case FR_REGS:
5841 case FP_REGS:
5842 /* Need to go through general registers to get to other class regs. */
5843 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
5844 return GR_REGS;
5846 /* This can happen when a paradoxical subreg is an operand to the
5847 muldi3 pattern. */
5848 /* ??? This shouldn't be necessary after instruction scheduling is
5849 enabled, because paradoxical subregs are not accepted by
5850 register_operand when INSN_SCHEDULING is defined. Or alternatively,
5851 stop the paradoxical subreg stupidity in the *_operand functions
5852 in recog.c. */
5853 if (GET_CODE (x) == MEM
5854 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
5855 || GET_MODE (x) == QImode))
5856 return GR_REGS;
5858 /* This can happen because of the ior/and/etc patterns that accept FP
5859 registers as operands. If the third operand is a constant, then it
5860 needs to be reloaded into a FP register. */
5861 if (GET_CODE (x) == CONST_INT)
5862 return GR_REGS;
5864 /* This can happen because of register elimination in a muldi3 insn.
5865 E.g. `26107 * (unsigned long)&u'. */
5866 if (GET_CODE (x) == PLUS)
5867 return GR_REGS;
5868 break;
5870 case PR_REGS:
5871 /* ??? This happens if we cse/gcse a BImode value across a call,
5872 and the function has a nonlocal goto. This is because global
5873 does not allocate call crossing pseudos to hard registers when
5874 crtl->has_nonlocal_goto is true. This is relatively
5875 common for C++ programs that use exceptions. To reproduce,
5876 return NO_REGS and compile libstdc++. */
5877 if (GET_CODE (x) == MEM)
5878 return GR_REGS;
5880 /* This can happen when we take a BImode subreg of a DImode value,
5881 and that DImode value winds up in some non-GR register. */
5882 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
5883 return GR_REGS;
5884 break;
5886 default:
5887 break;
5890 return NO_REGS;
5894 /* Implement targetm.unspec_may_trap_p hook. */
5895 static int
5896 ia64_unspec_may_trap_p (const_rtx x, unsigned flags)
5898 switch (XINT (x, 1))
5900 case UNSPEC_LDA:
5901 case UNSPEC_LDS:
5902 case UNSPEC_LDSA:
5903 case UNSPEC_LDCCLR:
5904 case UNSPEC_CHKACLR:
5905 case UNSPEC_CHKS:
5906 /* These unspecs are just wrappers. */
5907 return may_trap_p_1 (XVECEXP (x, 0, 0), flags);
5910 return default_unspec_may_trap_p (x, flags);
5914 /* Parse the -mfixed-range= option string. */
5916 static void
5917 fix_range (const char *const_str)
5919 int i, first, last;
5920 char *str, *dash, *comma;
5922 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
5923 REG2 are either register names or register numbers. The effect
5924 of this option is to mark the registers in the range from REG1 to
5925 REG2 as ``fixed'' so they won't be used by the compiler. This is
5926 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
5928 i = strlen (const_str);
5929 str = (char *) alloca (i + 1);
5930 memcpy (str, const_str, i + 1);
5932 while (1)
5934 dash = strchr (str, '-');
5935 if (!dash)
5937 warning (0, "value of -mfixed-range must have form REG1-REG2");
5938 return;
5940 *dash = '\0';
5942 comma = strchr (dash + 1, ',');
5943 if (comma)
5944 *comma = '\0';
5946 first = decode_reg_name (str);
5947 if (first < 0)
5949 warning (0, "unknown register name: %s", str);
5950 return;
5953 last = decode_reg_name (dash + 1);
5954 if (last < 0)
5956 warning (0, "unknown register name: %s", dash + 1);
5957 return;
5960 *dash = '-';
5962 if (first > last)
5964 warning (0, "%s-%s is an empty range", str, dash + 1);
5965 return;
5968 for (i = first; i <= last; ++i)
5969 fixed_regs[i] = call_used_regs[i] = 1;
5971 if (!comma)
5972 break;
5974 *comma = ',';
5975 str = comma + 1;
5979 /* Implement TARGET_OPTION_OVERRIDE. */
5981 static void
5982 ia64_option_override (void)
5984 unsigned int i;
5985 cl_deferred_option *opt;
5986 vec<cl_deferred_option> *v
5987 = (vec<cl_deferred_option> *) ia64_deferred_options;
5989 if (v)
5990 FOR_EACH_VEC_ELT (*v, i, opt)
5992 switch (opt->opt_index)
5994 case OPT_mfixed_range_:
5995 fix_range (opt->arg);
5996 break;
5998 default:
5999 gcc_unreachable ();
6003 if (TARGET_AUTO_PIC)
6004 target_flags |= MASK_CONST_GP;
6006 /* Numerous experiment shows that IRA based loop pressure
6007 calculation works better for RTL loop invariant motion on targets
6008 with enough (>= 32) registers. It is an expensive optimization.
6009 So it is on only for peak performance. */
6010 if (optimize >= 3)
6011 flag_ira_loop_pressure = 1;
6014 ia64_section_threshold = (global_options_set.x_g_switch_value
6015 ? g_switch_value
6016 : IA64_DEFAULT_GVALUE);
6018 init_machine_status = ia64_init_machine_status;
6020 if (align_functions <= 0)
6021 align_functions = 64;
6022 if (align_loops <= 0)
6023 align_loops = 32;
6024 if (TARGET_ABI_OPEN_VMS)
6025 flag_no_common = 1;
6027 ia64_override_options_after_change();
6030 /* Implement targetm.override_options_after_change. */
6032 static void
6033 ia64_override_options_after_change (void)
6035 if (optimize >= 3
6036 && !global_options_set.x_flag_selective_scheduling
6037 && !global_options_set.x_flag_selective_scheduling2)
6039 flag_selective_scheduling2 = 1;
6040 flag_sel_sched_pipelining = 1;
6042 if (mflag_sched_control_spec == 2)
6044 /* Control speculation is on by default for the selective scheduler,
6045 but not for the Haifa scheduler. */
6046 mflag_sched_control_spec = flag_selective_scheduling2 ? 1 : 0;
6048 if (flag_sel_sched_pipelining && flag_auto_inc_dec)
6050 /* FIXME: remove this when we'd implement breaking autoinsns as
6051 a transformation. */
6052 flag_auto_inc_dec = 0;
6056 /* Initialize the record of emitted frame related registers. */
6058 void ia64_init_expanders (void)
6060 memset (&emitted_frame_related_regs, 0, sizeof (emitted_frame_related_regs));
6063 static struct machine_function *
6064 ia64_init_machine_status (void)
6066 return ggc_cleared_alloc<machine_function> ();
6069 static enum attr_itanium_class ia64_safe_itanium_class (rtx_insn *);
6070 static enum attr_type ia64_safe_type (rtx_insn *);
6072 static enum attr_itanium_class
6073 ia64_safe_itanium_class (rtx_insn *insn)
6075 if (recog_memoized (insn) >= 0)
6076 return get_attr_itanium_class (insn);
6077 else if (DEBUG_INSN_P (insn))
6078 return ITANIUM_CLASS_IGNORE;
6079 else
6080 return ITANIUM_CLASS_UNKNOWN;
6083 static enum attr_type
6084 ia64_safe_type (rtx_insn *insn)
6086 if (recog_memoized (insn) >= 0)
6087 return get_attr_type (insn);
6088 else
6089 return TYPE_UNKNOWN;
6092 /* The following collection of routines emit instruction group stop bits as
6093 necessary to avoid dependencies. */
6095 /* Need to track some additional registers as far as serialization is
6096 concerned so we can properly handle br.call and br.ret. We could
6097 make these registers visible to gcc, but since these registers are
6098 never explicitly used in gcc generated code, it seems wasteful to
6099 do so (plus it would make the call and return patterns needlessly
6100 complex). */
6101 #define REG_RP (BR_REG (0))
6102 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
6103 /* This is used for volatile asms which may require a stop bit immediately
6104 before and after them. */
6105 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
6106 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
6107 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
6109 /* For each register, we keep track of how it has been written in the
6110 current instruction group.
6112 If a register is written unconditionally (no qualifying predicate),
6113 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
6115 If a register is written if its qualifying predicate P is true, we
6116 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
6117 may be written again by the complement of P (P^1) and when this happens,
6118 WRITE_COUNT gets set to 2.
6120 The result of this is that whenever an insn attempts to write a register
6121 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
6123 If a predicate register is written by a floating-point insn, we set
6124 WRITTEN_BY_FP to true.
6126 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
6127 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
6129 #if GCC_VERSION >= 4000
6130 #define RWS_FIELD_TYPE __extension__ unsigned short
6131 #else
6132 #define RWS_FIELD_TYPE unsigned int
6133 #endif
6134 struct reg_write_state
6136 RWS_FIELD_TYPE write_count : 2;
6137 RWS_FIELD_TYPE first_pred : 10;
6138 RWS_FIELD_TYPE written_by_fp : 1;
6139 RWS_FIELD_TYPE written_by_and : 1;
6140 RWS_FIELD_TYPE written_by_or : 1;
6143 /* Cumulative info for the current instruction group. */
6144 struct reg_write_state rws_sum[NUM_REGS];
6145 #ifdef ENABLE_CHECKING
6146 /* Bitmap whether a register has been written in the current insn. */
6147 HARD_REG_ELT_TYPE rws_insn[(NUM_REGS + HOST_BITS_PER_WIDEST_FAST_INT - 1)
6148 / HOST_BITS_PER_WIDEST_FAST_INT];
6150 static inline void
6151 rws_insn_set (int regno)
6153 gcc_assert (!TEST_HARD_REG_BIT (rws_insn, regno));
6154 SET_HARD_REG_BIT (rws_insn, regno);
6157 static inline int
6158 rws_insn_test (int regno)
6160 return TEST_HARD_REG_BIT (rws_insn, regno);
6162 #else
6163 /* When not checking, track just REG_AR_CFM and REG_VOLATILE. */
6164 unsigned char rws_insn[2];
6166 static inline void
6167 rws_insn_set (int regno)
6169 if (regno == REG_AR_CFM)
6170 rws_insn[0] = 1;
6171 else if (regno == REG_VOLATILE)
6172 rws_insn[1] = 1;
6175 static inline int
6176 rws_insn_test (int regno)
6178 if (regno == REG_AR_CFM)
6179 return rws_insn[0];
6180 if (regno == REG_VOLATILE)
6181 return rws_insn[1];
6182 return 0;
6184 #endif
6186 /* Indicates whether this is the first instruction after a stop bit,
6187 in which case we don't need another stop bit. Without this,
6188 ia64_variable_issue will die when scheduling an alloc. */
6189 static int first_instruction;
6191 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
6192 RTL for one instruction. */
6193 struct reg_flags
6195 unsigned int is_write : 1; /* Is register being written? */
6196 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
6197 unsigned int is_branch : 1; /* Is register used as part of a branch? */
6198 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
6199 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
6200 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
6203 static void rws_update (int, struct reg_flags, int);
6204 static int rws_access_regno (int, struct reg_flags, int);
6205 static int rws_access_reg (rtx, struct reg_flags, int);
6206 static void update_set_flags (rtx, struct reg_flags *);
6207 static int set_src_needs_barrier (rtx, struct reg_flags, int);
6208 static int rtx_needs_barrier (rtx, struct reg_flags, int);
6209 static void init_insn_group_barriers (void);
6210 static int group_barrier_needed (rtx_insn *);
6211 static int safe_group_barrier_needed (rtx_insn *);
6212 static int in_safe_group_barrier;
6214 /* Update *RWS for REGNO, which is being written by the current instruction,
6215 with predicate PRED, and associated register flags in FLAGS. */
6217 static void
6218 rws_update (int regno, struct reg_flags flags, int pred)
6220 if (pred)
6221 rws_sum[regno].write_count++;
6222 else
6223 rws_sum[regno].write_count = 2;
6224 rws_sum[regno].written_by_fp |= flags.is_fp;
6225 /* ??? Not tracking and/or across differing predicates. */
6226 rws_sum[regno].written_by_and = flags.is_and;
6227 rws_sum[regno].written_by_or = flags.is_or;
6228 rws_sum[regno].first_pred = pred;
6231 /* Handle an access to register REGNO of type FLAGS using predicate register
6232 PRED. Update rws_sum array. Return 1 if this access creates
6233 a dependency with an earlier instruction in the same group. */
6235 static int
6236 rws_access_regno (int regno, struct reg_flags flags, int pred)
6238 int need_barrier = 0;
6240 gcc_assert (regno < NUM_REGS);
6242 if (! PR_REGNO_P (regno))
6243 flags.is_and = flags.is_or = 0;
6245 if (flags.is_write)
6247 int write_count;
6249 rws_insn_set (regno);
6250 write_count = rws_sum[regno].write_count;
6252 switch (write_count)
6254 case 0:
6255 /* The register has not been written yet. */
6256 if (!in_safe_group_barrier)
6257 rws_update (regno, flags, pred);
6258 break;
6260 case 1:
6261 /* The register has been written via a predicate. Treat
6262 it like a unconditional write and do not try to check
6263 for complementary pred reg in earlier write. */
6264 if (flags.is_and && rws_sum[regno].written_by_and)
6266 else if (flags.is_or && rws_sum[regno].written_by_or)
6268 else
6269 need_barrier = 1;
6270 if (!in_safe_group_barrier)
6271 rws_update (regno, flags, pred);
6272 break;
6274 case 2:
6275 /* The register has been unconditionally written already. We
6276 need a barrier. */
6277 if (flags.is_and && rws_sum[regno].written_by_and)
6279 else if (flags.is_or && rws_sum[regno].written_by_or)
6281 else
6282 need_barrier = 1;
6283 if (!in_safe_group_barrier)
6285 rws_sum[regno].written_by_and = flags.is_and;
6286 rws_sum[regno].written_by_or = flags.is_or;
6288 break;
6290 default:
6291 gcc_unreachable ();
6294 else
6296 if (flags.is_branch)
6298 /* Branches have several RAW exceptions that allow to avoid
6299 barriers. */
6301 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
6302 /* RAW dependencies on branch regs are permissible as long
6303 as the writer is a non-branch instruction. Since we
6304 never generate code that uses a branch register written
6305 by a branch instruction, handling this case is
6306 easy. */
6307 return 0;
6309 if (REGNO_REG_CLASS (regno) == PR_REGS
6310 && ! rws_sum[regno].written_by_fp)
6311 /* The predicates of a branch are available within the
6312 same insn group as long as the predicate was written by
6313 something other than a floating-point instruction. */
6314 return 0;
6317 if (flags.is_and && rws_sum[regno].written_by_and)
6318 return 0;
6319 if (flags.is_or && rws_sum[regno].written_by_or)
6320 return 0;
6322 switch (rws_sum[regno].write_count)
6324 case 0:
6325 /* The register has not been written yet. */
6326 break;
6328 case 1:
6329 /* The register has been written via a predicate, assume we
6330 need a barrier (don't check for complementary regs). */
6331 need_barrier = 1;
6332 break;
6334 case 2:
6335 /* The register has been unconditionally written already. We
6336 need a barrier. */
6337 need_barrier = 1;
6338 break;
6340 default:
6341 gcc_unreachable ();
6345 return need_barrier;
6348 static int
6349 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
6351 int regno = REGNO (reg);
6352 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
6354 if (n == 1)
6355 return rws_access_regno (regno, flags, pred);
6356 else
6358 int need_barrier = 0;
6359 while (--n >= 0)
6360 need_barrier |= rws_access_regno (regno + n, flags, pred);
6361 return need_barrier;
6365 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
6366 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
6368 static void
6369 update_set_flags (rtx x, struct reg_flags *pflags)
6371 rtx src = SET_SRC (x);
6373 switch (GET_CODE (src))
6375 case CALL:
6376 return;
6378 case IF_THEN_ELSE:
6379 /* There are four cases here:
6380 (1) The destination is (pc), in which case this is a branch,
6381 nothing here applies.
6382 (2) The destination is ar.lc, in which case this is a
6383 doloop_end_internal,
6384 (3) The destination is an fp register, in which case this is
6385 an fselect instruction.
6386 (4) The condition has (unspec [(reg)] UNSPEC_LDC), in which case
6387 this is a check load.
6388 In all cases, nothing we do in this function applies. */
6389 return;
6391 default:
6392 if (COMPARISON_P (src)
6393 && SCALAR_FLOAT_MODE_P (GET_MODE (XEXP (src, 0))))
6394 /* Set pflags->is_fp to 1 so that we know we're dealing
6395 with a floating point comparison when processing the
6396 destination of the SET. */
6397 pflags->is_fp = 1;
6399 /* Discover if this is a parallel comparison. We only handle
6400 and.orcm and or.andcm at present, since we must retain a
6401 strict inverse on the predicate pair. */
6402 else if (GET_CODE (src) == AND)
6403 pflags->is_and = 1;
6404 else if (GET_CODE (src) == IOR)
6405 pflags->is_or = 1;
6407 break;
6411 /* Subroutine of rtx_needs_barrier; this function determines whether the
6412 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
6413 are as in rtx_needs_barrier. COND is an rtx that holds the condition
6414 for this insn. */
6416 static int
6417 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred)
6419 int need_barrier = 0;
6420 rtx dst;
6421 rtx src = SET_SRC (x);
6423 if (GET_CODE (src) == CALL)
6424 /* We don't need to worry about the result registers that
6425 get written by subroutine call. */
6426 return rtx_needs_barrier (src, flags, pred);
6427 else if (SET_DEST (x) == pc_rtx)
6429 /* X is a conditional branch. */
6430 /* ??? This seems redundant, as the caller sets this bit for
6431 all JUMP_INSNs. */
6432 if (!ia64_spec_check_src_p (src))
6433 flags.is_branch = 1;
6434 return rtx_needs_barrier (src, flags, pred);
6437 if (ia64_spec_check_src_p (src))
6438 /* Avoid checking one register twice (in condition
6439 and in 'then' section) for ldc pattern. */
6441 gcc_assert (REG_P (XEXP (src, 2)));
6442 need_barrier = rtx_needs_barrier (XEXP (src, 2), flags, pred);
6444 /* We process MEM below. */
6445 src = XEXP (src, 1);
6448 need_barrier |= rtx_needs_barrier (src, flags, pred);
6450 dst = SET_DEST (x);
6451 if (GET_CODE (dst) == ZERO_EXTRACT)
6453 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
6454 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
6456 return need_barrier;
6459 /* Handle an access to rtx X of type FLAGS using predicate register
6460 PRED. Return 1 if this access creates a dependency with an earlier
6461 instruction in the same group. */
6463 static int
6464 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
6466 int i, j;
6467 int is_complemented = 0;
6468 int need_barrier = 0;
6469 const char *format_ptr;
6470 struct reg_flags new_flags;
6471 rtx cond;
6473 if (! x)
6474 return 0;
6476 new_flags = flags;
6478 switch (GET_CODE (x))
6480 case SET:
6481 update_set_flags (x, &new_flags);
6482 need_barrier = set_src_needs_barrier (x, new_flags, pred);
6483 if (GET_CODE (SET_SRC (x)) != CALL)
6485 new_flags.is_write = 1;
6486 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
6488 break;
6490 case CALL:
6491 new_flags.is_write = 0;
6492 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
6494 /* Avoid multiple register writes, in case this is a pattern with
6495 multiple CALL rtx. This avoids a failure in rws_access_reg. */
6496 if (! flags.is_sibcall && ! rws_insn_test (REG_AR_CFM))
6498 new_flags.is_write = 1;
6499 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
6500 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
6501 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
6503 break;
6505 case COND_EXEC:
6506 /* X is a predicated instruction. */
6508 cond = COND_EXEC_TEST (x);
6509 gcc_assert (!pred);
6510 need_barrier = rtx_needs_barrier (cond, flags, 0);
6512 if (GET_CODE (cond) == EQ)
6513 is_complemented = 1;
6514 cond = XEXP (cond, 0);
6515 gcc_assert (GET_CODE (cond) == REG
6516 && REGNO_REG_CLASS (REGNO (cond)) == PR_REGS);
6517 pred = REGNO (cond);
6518 if (is_complemented)
6519 ++pred;
6521 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
6522 return need_barrier;
6524 case CLOBBER:
6525 case USE:
6526 /* Clobber & use are for earlier compiler-phases only. */
6527 break;
6529 case ASM_OPERANDS:
6530 case ASM_INPUT:
6531 /* We always emit stop bits for traditional asms. We emit stop bits
6532 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
6533 if (GET_CODE (x) != ASM_OPERANDS
6534 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
6536 /* Avoid writing the register multiple times if we have multiple
6537 asm outputs. This avoids a failure in rws_access_reg. */
6538 if (! rws_insn_test (REG_VOLATILE))
6540 new_flags.is_write = 1;
6541 rws_access_regno (REG_VOLATILE, new_flags, pred);
6543 return 1;
6546 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
6547 We cannot just fall through here since then we would be confused
6548 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
6549 traditional asms unlike their normal usage. */
6551 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
6552 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
6553 need_barrier = 1;
6554 break;
6556 case PARALLEL:
6557 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
6559 rtx pat = XVECEXP (x, 0, i);
6560 switch (GET_CODE (pat))
6562 case SET:
6563 update_set_flags (pat, &new_flags);
6564 need_barrier |= set_src_needs_barrier (pat, new_flags, pred);
6565 break;
6567 case USE:
6568 case CALL:
6569 case ASM_OPERANDS:
6570 need_barrier |= rtx_needs_barrier (pat, flags, pred);
6571 break;
6573 case CLOBBER:
6574 if (REG_P (XEXP (pat, 0))
6575 && extract_asm_operands (x) != NULL_RTX
6576 && REGNO (XEXP (pat, 0)) != AR_UNAT_REGNUM)
6578 new_flags.is_write = 1;
6579 need_barrier |= rtx_needs_barrier (XEXP (pat, 0),
6580 new_flags, pred);
6581 new_flags = flags;
6583 break;
6585 case RETURN:
6586 break;
6588 default:
6589 gcc_unreachable ();
6592 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
6594 rtx pat = XVECEXP (x, 0, i);
6595 if (GET_CODE (pat) == SET)
6597 if (GET_CODE (SET_SRC (pat)) != CALL)
6599 new_flags.is_write = 1;
6600 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
6601 pred);
6604 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
6605 need_barrier |= rtx_needs_barrier (pat, flags, pred);
6607 break;
6609 case SUBREG:
6610 need_barrier |= rtx_needs_barrier (SUBREG_REG (x), flags, pred);
6611 break;
6612 case REG:
6613 if (REGNO (x) == AR_UNAT_REGNUM)
6615 for (i = 0; i < 64; ++i)
6616 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
6618 else
6619 need_barrier = rws_access_reg (x, flags, pred);
6620 break;
6622 case MEM:
6623 /* Find the regs used in memory address computation. */
6624 new_flags.is_write = 0;
6625 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
6626 break;
6628 case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR:
6629 case SYMBOL_REF: case LABEL_REF: case CONST:
6630 break;
6632 /* Operators with side-effects. */
6633 case POST_INC: case POST_DEC:
6634 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
6636 new_flags.is_write = 0;
6637 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
6638 new_flags.is_write = 1;
6639 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
6640 break;
6642 case POST_MODIFY:
6643 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
6645 new_flags.is_write = 0;
6646 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
6647 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
6648 new_flags.is_write = 1;
6649 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
6650 break;
6652 /* Handle common unary and binary ops for efficiency. */
6653 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
6654 case MOD: case UDIV: case UMOD: case AND: case IOR:
6655 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
6656 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
6657 case NE: case EQ: case GE: case GT: case LE:
6658 case LT: case GEU: case GTU: case LEU: case LTU:
6659 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
6660 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
6661 break;
6663 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
6664 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
6665 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
6666 case SQRT: case FFS: case POPCOUNT:
6667 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
6668 break;
6670 case VEC_SELECT:
6671 /* VEC_SELECT's second argument is a PARALLEL with integers that
6672 describe the elements selected. On ia64, those integers are
6673 always constants. Avoid walking the PARALLEL so that we don't
6674 get confused with "normal" parallels and then die. */
6675 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
6676 break;
6678 case UNSPEC:
6679 switch (XINT (x, 1))
6681 case UNSPEC_LTOFF_DTPMOD:
6682 case UNSPEC_LTOFF_DTPREL:
6683 case UNSPEC_DTPREL:
6684 case UNSPEC_LTOFF_TPREL:
6685 case UNSPEC_TPREL:
6686 case UNSPEC_PRED_REL_MUTEX:
6687 case UNSPEC_PIC_CALL:
6688 case UNSPEC_MF:
6689 case UNSPEC_FETCHADD_ACQ:
6690 case UNSPEC_FETCHADD_REL:
6691 case UNSPEC_BSP_VALUE:
6692 case UNSPEC_FLUSHRS:
6693 case UNSPEC_BUNDLE_SELECTOR:
6694 break;
6696 case UNSPEC_GR_SPILL:
6697 case UNSPEC_GR_RESTORE:
6699 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
6700 HOST_WIDE_INT bit = (offset >> 3) & 63;
6702 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6703 new_flags.is_write = (XINT (x, 1) == UNSPEC_GR_SPILL);
6704 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
6705 new_flags, pred);
6706 break;
6709 case UNSPEC_FR_SPILL:
6710 case UNSPEC_FR_RESTORE:
6711 case UNSPEC_GETF_EXP:
6712 case UNSPEC_SETF_EXP:
6713 case UNSPEC_ADDP4:
6714 case UNSPEC_FR_SQRT_RECIP_APPROX:
6715 case UNSPEC_FR_SQRT_RECIP_APPROX_RES:
6716 case UNSPEC_LDA:
6717 case UNSPEC_LDS:
6718 case UNSPEC_LDS_A:
6719 case UNSPEC_LDSA:
6720 case UNSPEC_CHKACLR:
6721 case UNSPEC_CHKS:
6722 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6723 break;
6725 case UNSPEC_FR_RECIP_APPROX:
6726 case UNSPEC_SHRP:
6727 case UNSPEC_COPYSIGN:
6728 case UNSPEC_FR_RECIP_APPROX_RES:
6729 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6730 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
6731 break;
6733 case UNSPEC_CMPXCHG_ACQ:
6734 case UNSPEC_CMPXCHG_REL:
6735 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
6736 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
6737 break;
6739 default:
6740 gcc_unreachable ();
6742 break;
6744 case UNSPEC_VOLATILE:
6745 switch (XINT (x, 1))
6747 case UNSPECV_ALLOC:
6748 /* Alloc must always be the first instruction of a group.
6749 We force this by always returning true. */
6750 /* ??? We might get better scheduling if we explicitly check for
6751 input/local/output register dependencies, and modify the
6752 scheduler so that alloc is always reordered to the start of
6753 the current group. We could then eliminate all of the
6754 first_instruction code. */
6755 rws_access_regno (AR_PFS_REGNUM, flags, pred);
6757 new_flags.is_write = 1;
6758 rws_access_regno (REG_AR_CFM, new_flags, pred);
6759 return 1;
6761 case UNSPECV_SET_BSP:
6762 case UNSPECV_PROBE_STACK_RANGE:
6763 need_barrier = 1;
6764 break;
6766 case UNSPECV_BLOCKAGE:
6767 case UNSPECV_INSN_GROUP_BARRIER:
6768 case UNSPECV_BREAK:
6769 case UNSPECV_PSAC_ALL:
6770 case UNSPECV_PSAC_NORMAL:
6771 return 0;
6773 case UNSPECV_PROBE_STACK_ADDRESS:
6774 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6775 break;
6777 default:
6778 gcc_unreachable ();
6780 break;
6782 case RETURN:
6783 new_flags.is_write = 0;
6784 need_barrier = rws_access_regno (REG_RP, flags, pred);
6785 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
6787 new_flags.is_write = 1;
6788 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
6789 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
6790 break;
6792 default:
6793 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
6794 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6795 switch (format_ptr[i])
6797 case '0': /* unused field */
6798 case 'i': /* integer */
6799 case 'n': /* note */
6800 case 'w': /* wide integer */
6801 case 's': /* pointer to string */
6802 case 'S': /* optional pointer to string */
6803 break;
6805 case 'e':
6806 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
6807 need_barrier = 1;
6808 break;
6810 case 'E':
6811 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
6812 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
6813 need_barrier = 1;
6814 break;
6816 default:
6817 gcc_unreachable ();
6819 break;
6821 return need_barrier;
6824 /* Clear out the state for group_barrier_needed at the start of a
6825 sequence of insns. */
6827 static void
6828 init_insn_group_barriers (void)
6830 memset (rws_sum, 0, sizeof (rws_sum));
6831 first_instruction = 1;
6834 /* Given the current state, determine whether a group barrier (a stop bit) is
6835 necessary before INSN. Return nonzero if so. This modifies the state to
6836 include the effects of INSN as a side-effect. */
6838 static int
6839 group_barrier_needed (rtx_insn *insn)
6841 rtx pat;
6842 int need_barrier = 0;
6843 struct reg_flags flags;
6845 memset (&flags, 0, sizeof (flags));
6846 switch (GET_CODE (insn))
6848 case NOTE:
6849 case DEBUG_INSN:
6850 break;
6852 case BARRIER:
6853 /* A barrier doesn't imply an instruction group boundary. */
6854 break;
6856 case CODE_LABEL:
6857 memset (rws_insn, 0, sizeof (rws_insn));
6858 return 1;
6860 case CALL_INSN:
6861 flags.is_branch = 1;
6862 flags.is_sibcall = SIBLING_CALL_P (insn);
6863 memset (rws_insn, 0, sizeof (rws_insn));
6865 /* Don't bundle a call following another call. */
6866 if ((pat = prev_active_insn (insn)) && CALL_P (pat))
6868 need_barrier = 1;
6869 break;
6872 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
6873 break;
6875 case JUMP_INSN:
6876 if (!ia64_spec_check_p (insn))
6877 flags.is_branch = 1;
6879 /* Don't bundle a jump following a call. */
6880 if ((pat = prev_active_insn (insn)) && CALL_P (pat))
6882 need_barrier = 1;
6883 break;
6885 /* FALLTHRU */
6887 case INSN:
6888 if (GET_CODE (PATTERN (insn)) == USE
6889 || GET_CODE (PATTERN (insn)) == CLOBBER)
6890 /* Don't care about USE and CLOBBER "insns"---those are used to
6891 indicate to the optimizer that it shouldn't get rid of
6892 certain operations. */
6893 break;
6895 pat = PATTERN (insn);
6897 /* Ug. Hack hacks hacked elsewhere. */
6898 switch (recog_memoized (insn))
6900 /* We play dependency tricks with the epilogue in order
6901 to get proper schedules. Undo this for dv analysis. */
6902 case CODE_FOR_epilogue_deallocate_stack:
6903 case CODE_FOR_prologue_allocate_stack:
6904 pat = XVECEXP (pat, 0, 0);
6905 break;
6907 /* The pattern we use for br.cloop confuses the code above.
6908 The second element of the vector is representative. */
6909 case CODE_FOR_doloop_end_internal:
6910 pat = XVECEXP (pat, 0, 1);
6911 break;
6913 /* Doesn't generate code. */
6914 case CODE_FOR_pred_rel_mutex:
6915 case CODE_FOR_prologue_use:
6916 return 0;
6918 default:
6919 break;
6922 memset (rws_insn, 0, sizeof (rws_insn));
6923 need_barrier = rtx_needs_barrier (pat, flags, 0);
6925 /* Check to see if the previous instruction was a volatile
6926 asm. */
6927 if (! need_barrier)
6928 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
6930 break;
6932 default:
6933 gcc_unreachable ();
6936 if (first_instruction && important_for_bundling_p (insn))
6938 need_barrier = 0;
6939 first_instruction = 0;
6942 return need_barrier;
6945 /* Like group_barrier_needed, but do not clobber the current state. */
6947 static int
6948 safe_group_barrier_needed (rtx_insn *insn)
6950 int saved_first_instruction;
6951 int t;
6953 saved_first_instruction = first_instruction;
6954 in_safe_group_barrier = 1;
6956 t = group_barrier_needed (insn);
6958 first_instruction = saved_first_instruction;
6959 in_safe_group_barrier = 0;
6961 return t;
6964 /* Scan the current function and insert stop bits as necessary to
6965 eliminate dependencies. This function assumes that a final
6966 instruction scheduling pass has been run which has already
6967 inserted most of the necessary stop bits. This function only
6968 inserts new ones at basic block boundaries, since these are
6969 invisible to the scheduler. */
6971 static void
6972 emit_insn_group_barriers (FILE *dump)
6974 rtx_insn *insn;
6975 rtx_insn *last_label = 0;
6976 int insns_since_last_label = 0;
6978 init_insn_group_barriers ();
6980 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6982 if (LABEL_P (insn))
6984 if (insns_since_last_label)
6985 last_label = insn;
6986 insns_since_last_label = 0;
6988 else if (NOTE_P (insn)
6989 && NOTE_KIND (insn) == NOTE_INSN_BASIC_BLOCK)
6991 if (insns_since_last_label)
6992 last_label = insn;
6993 insns_since_last_label = 0;
6995 else if (NONJUMP_INSN_P (insn)
6996 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
6997 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
6999 init_insn_group_barriers ();
7000 last_label = 0;
7002 else if (NONDEBUG_INSN_P (insn))
7004 insns_since_last_label = 1;
7006 if (group_barrier_needed (insn))
7008 if (last_label)
7010 if (dump)
7011 fprintf (dump, "Emitting stop before label %d\n",
7012 INSN_UID (last_label));
7013 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
7014 insn = last_label;
7016 init_insn_group_barriers ();
7017 last_label = 0;
7024 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
7025 This function has to emit all necessary group barriers. */
7027 static void
7028 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
7030 rtx_insn *insn;
7032 init_insn_group_barriers ();
7034 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7036 if (BARRIER_P (insn))
7038 rtx_insn *last = prev_active_insn (insn);
7040 if (! last)
7041 continue;
7042 if (JUMP_TABLE_DATA_P (last))
7043 last = prev_active_insn (last);
7044 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
7045 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
7047 init_insn_group_barriers ();
7049 else if (NONDEBUG_INSN_P (insn))
7051 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
7052 init_insn_group_barriers ();
7053 else if (group_barrier_needed (insn))
7055 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
7056 init_insn_group_barriers ();
7057 group_barrier_needed (insn);
7065 /* Instruction scheduling support. */
7067 #define NR_BUNDLES 10
7069 /* A list of names of all available bundles. */
7071 static const char *bundle_name [NR_BUNDLES] =
7073 ".mii",
7074 ".mmi",
7075 ".mfi",
7076 ".mmf",
7077 #if NR_BUNDLES == 10
7078 ".bbb",
7079 ".mbb",
7080 #endif
7081 ".mib",
7082 ".mmb",
7083 ".mfb",
7084 ".mlx"
7087 /* Nonzero if we should insert stop bits into the schedule. */
7089 int ia64_final_schedule = 0;
7091 /* Codes of the corresponding queried units: */
7093 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
7094 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
7096 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
7097 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
7099 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
7101 /* The following variable value is an insn group barrier. */
7103 static rtx_insn *dfa_stop_insn;
7105 /* The following variable value is the last issued insn. */
7107 static rtx_insn *last_scheduled_insn;
7109 /* The following variable value is pointer to a DFA state used as
7110 temporary variable. */
7112 static state_t temp_dfa_state = NULL;
7114 /* The following variable value is DFA state after issuing the last
7115 insn. */
7117 static state_t prev_cycle_state = NULL;
7119 /* The following array element values are TRUE if the corresponding
7120 insn requires to add stop bits before it. */
7122 static char *stops_p = NULL;
7124 /* The following variable is used to set up the mentioned above array. */
7126 static int stop_before_p = 0;
7128 /* The following variable value is length of the arrays `clocks' and
7129 `add_cycles'. */
7131 static int clocks_length;
7133 /* The following variable value is number of data speculations in progress. */
7134 static int pending_data_specs = 0;
7136 /* Number of memory references on current and three future processor cycles. */
7137 static char mem_ops_in_group[4];
7139 /* Number of current processor cycle (from scheduler's point of view). */
7140 static int current_cycle;
7142 static rtx ia64_single_set (rtx_insn *);
7143 static void ia64_emit_insn_before (rtx, rtx);
7145 /* Map a bundle number to its pseudo-op. */
7147 const char *
7148 get_bundle_name (int b)
7150 return bundle_name[b];
7154 /* Return the maximum number of instructions a cpu can issue. */
7156 static int
7157 ia64_issue_rate (void)
7159 return 6;
7162 /* Helper function - like single_set, but look inside COND_EXEC. */
7164 static rtx
7165 ia64_single_set (rtx_insn *insn)
7167 rtx x = PATTERN (insn), ret;
7168 if (GET_CODE (x) == COND_EXEC)
7169 x = COND_EXEC_CODE (x);
7170 if (GET_CODE (x) == SET)
7171 return x;
7173 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
7174 Although they are not classical single set, the second set is there just
7175 to protect it from moving past FP-relative stack accesses. */
7176 switch (recog_memoized (insn))
7178 case CODE_FOR_prologue_allocate_stack:
7179 case CODE_FOR_prologue_allocate_stack_pr:
7180 case CODE_FOR_epilogue_deallocate_stack:
7181 case CODE_FOR_epilogue_deallocate_stack_pr:
7182 ret = XVECEXP (x, 0, 0);
7183 break;
7185 default:
7186 ret = single_set_2 (insn, x);
7187 break;
7190 return ret;
7193 /* Adjust the cost of a scheduling dependency.
7194 Return the new cost of a dependency of type DEP_TYPE or INSN on DEP_INSN.
7195 COST is the current cost, DW is dependency weakness. */
7196 static int
7197 ia64_adjust_cost_2 (rtx_insn *insn, int dep_type1, rtx_insn *dep_insn,
7198 int cost, dw_t dw)
7200 enum reg_note dep_type = (enum reg_note) dep_type1;
7201 enum attr_itanium_class dep_class;
7202 enum attr_itanium_class insn_class;
7204 insn_class = ia64_safe_itanium_class (insn);
7205 dep_class = ia64_safe_itanium_class (dep_insn);
7207 /* Treat true memory dependencies separately. Ignore apparent true
7208 dependence between store and call (call has a MEM inside a SYMBOL_REF). */
7209 if (dep_type == REG_DEP_TRUE
7210 && (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF)
7211 && (insn_class == ITANIUM_CLASS_BR || insn_class == ITANIUM_CLASS_SCALL))
7212 return 0;
7214 if (dw == MIN_DEP_WEAK)
7215 /* Store and load are likely to alias, use higher cost to avoid stall. */
7216 return PARAM_VALUE (PARAM_SCHED_MEM_TRUE_DEP_COST);
7217 else if (dw > MIN_DEP_WEAK)
7219 /* Store and load are less likely to alias. */
7220 if (mflag_sched_fp_mem_deps_zero_cost && dep_class == ITANIUM_CLASS_STF)
7221 /* Assume there will be no cache conflict for floating-point data.
7222 For integer data, L1 conflict penalty is huge (17 cycles), so we
7223 never assume it will not cause a conflict. */
7224 return 0;
7225 else
7226 return cost;
7229 if (dep_type != REG_DEP_OUTPUT)
7230 return cost;
7232 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
7233 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
7234 return 0;
7236 return cost;
7239 /* Like emit_insn_before, but skip cycle_display notes.
7240 ??? When cycle display notes are implemented, update this. */
7242 static void
7243 ia64_emit_insn_before (rtx insn, rtx before)
7245 emit_insn_before (insn, before);
7248 /* The following function marks insns who produce addresses for load
7249 and store insns. Such insns will be placed into M slots because it
7250 decrease latency time for Itanium1 (see function
7251 `ia64_produce_address_p' and the DFA descriptions). */
7253 static void
7254 ia64_dependencies_evaluation_hook (rtx_insn *head, rtx_insn *tail)
7256 rtx_insn *insn, *next, *next_tail;
7258 /* Before reload, which_alternative is not set, which means that
7259 ia64_safe_itanium_class will produce wrong results for (at least)
7260 move instructions. */
7261 if (!reload_completed)
7262 return;
7264 next_tail = NEXT_INSN (tail);
7265 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
7266 if (INSN_P (insn))
7267 insn->call = 0;
7268 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
7269 if (INSN_P (insn)
7270 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
7272 sd_iterator_def sd_it;
7273 dep_t dep;
7274 bool has_mem_op_consumer_p = false;
7276 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
7278 enum attr_itanium_class c;
7280 if (DEP_TYPE (dep) != REG_DEP_TRUE)
7281 continue;
7283 next = DEP_CON (dep);
7284 c = ia64_safe_itanium_class (next);
7285 if ((c == ITANIUM_CLASS_ST
7286 || c == ITANIUM_CLASS_STF)
7287 && ia64_st_address_bypass_p (insn, next))
7289 has_mem_op_consumer_p = true;
7290 break;
7292 else if ((c == ITANIUM_CLASS_LD
7293 || c == ITANIUM_CLASS_FLD
7294 || c == ITANIUM_CLASS_FLDP)
7295 && ia64_ld_address_bypass_p (insn, next))
7297 has_mem_op_consumer_p = true;
7298 break;
7302 insn->call = has_mem_op_consumer_p;
7306 /* We're beginning a new block. Initialize data structures as necessary. */
7308 static void
7309 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7310 int sched_verbose ATTRIBUTE_UNUSED,
7311 int max_ready ATTRIBUTE_UNUSED)
7313 #ifdef ENABLE_CHECKING
7314 rtx_insn *insn;
7316 if (!sel_sched_p () && reload_completed)
7317 for (insn = NEXT_INSN (current_sched_info->prev_head);
7318 insn != current_sched_info->next_tail;
7319 insn = NEXT_INSN (insn))
7320 gcc_assert (!SCHED_GROUP_P (insn));
7321 #endif
7322 last_scheduled_insn = NULL;
7323 init_insn_group_barriers ();
7325 current_cycle = 0;
7326 memset (mem_ops_in_group, 0, sizeof (mem_ops_in_group));
7329 /* We're beginning a scheduling pass. Check assertion. */
7331 static void
7332 ia64_sched_init_global (FILE *dump ATTRIBUTE_UNUSED,
7333 int sched_verbose ATTRIBUTE_UNUSED,
7334 int max_ready ATTRIBUTE_UNUSED)
7336 gcc_assert (pending_data_specs == 0);
7339 /* Scheduling pass is now finished. Free/reset static variable. */
7340 static void
7341 ia64_sched_finish_global (FILE *dump ATTRIBUTE_UNUSED,
7342 int sched_verbose ATTRIBUTE_UNUSED)
7344 gcc_assert (pending_data_specs == 0);
7347 /* Return TRUE if INSN is a load (either normal or speculative, but not a
7348 speculation check), FALSE otherwise. */
7349 static bool
7350 is_load_p (rtx_insn *insn)
7352 enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn);
7354 return
7355 ((insn_class == ITANIUM_CLASS_LD || insn_class == ITANIUM_CLASS_FLD)
7356 && get_attr_check_load (insn) == CHECK_LOAD_NO);
7359 /* If INSN is a memory reference, memoize it in MEM_OPS_IN_GROUP global array
7360 (taking account for 3-cycle cache reference postponing for stores: Intel
7361 Itanium 2 Reference Manual for Software Development and Optimization,
7362 6.7.3.1). */
7363 static void
7364 record_memory_reference (rtx_insn *insn)
7366 enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn);
7368 switch (insn_class) {
7369 case ITANIUM_CLASS_FLD:
7370 case ITANIUM_CLASS_LD:
7371 mem_ops_in_group[current_cycle % 4]++;
7372 break;
7373 case ITANIUM_CLASS_STF:
7374 case ITANIUM_CLASS_ST:
7375 mem_ops_in_group[(current_cycle + 3) % 4]++;
7376 break;
7377 default:;
7381 /* We are about to being issuing insns for this clock cycle.
7382 Override the default sort algorithm to better slot instructions. */
7384 static int
7385 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx_insn **ready,
7386 int *pn_ready, int clock_var,
7387 int reorder_type)
7389 int n_asms;
7390 int n_ready = *pn_ready;
7391 rtx_insn **e_ready = ready + n_ready;
7392 rtx_insn **insnp;
7394 if (sched_verbose)
7395 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
7397 if (reorder_type == 0)
7399 /* First, move all USEs, CLOBBERs and other crud out of the way. */
7400 n_asms = 0;
7401 for (insnp = ready; insnp < e_ready; insnp++)
7402 if (insnp < e_ready)
7404 rtx_insn *insn = *insnp;
7405 enum attr_type t = ia64_safe_type (insn);
7406 if (t == TYPE_UNKNOWN)
7408 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
7409 || asm_noperands (PATTERN (insn)) >= 0)
7411 rtx_insn *lowest = ready[n_asms];
7412 ready[n_asms] = insn;
7413 *insnp = lowest;
7414 n_asms++;
7416 else
7418 rtx_insn *highest = ready[n_ready - 1];
7419 ready[n_ready - 1] = insn;
7420 *insnp = highest;
7421 return 1;
7426 if (n_asms < n_ready)
7428 /* Some normal insns to process. Skip the asms. */
7429 ready += n_asms;
7430 n_ready -= n_asms;
7432 else if (n_ready > 0)
7433 return 1;
7436 if (ia64_final_schedule)
7438 int deleted = 0;
7439 int nr_need_stop = 0;
7441 for (insnp = ready; insnp < e_ready; insnp++)
7442 if (safe_group_barrier_needed (*insnp))
7443 nr_need_stop++;
7445 if (reorder_type == 1 && n_ready == nr_need_stop)
7446 return 0;
7447 if (reorder_type == 0)
7448 return 1;
7449 insnp = e_ready;
7450 /* Move down everything that needs a stop bit, preserving
7451 relative order. */
7452 while (insnp-- > ready + deleted)
7453 while (insnp >= ready + deleted)
7455 rtx_insn *insn = *insnp;
7456 if (! safe_group_barrier_needed (insn))
7457 break;
7458 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
7459 *ready = insn;
7460 deleted++;
7462 n_ready -= deleted;
7463 ready += deleted;
7466 current_cycle = clock_var;
7467 if (reload_completed && mem_ops_in_group[clock_var % 4] >= ia64_max_memory_insns)
7469 int moved = 0;
7471 insnp = e_ready;
7472 /* Move down loads/stores, preserving relative order. */
7473 while (insnp-- > ready + moved)
7474 while (insnp >= ready + moved)
7476 rtx_insn *insn = *insnp;
7477 if (! is_load_p (insn))
7478 break;
7479 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
7480 *ready = insn;
7481 moved++;
7483 n_ready -= moved;
7484 ready += moved;
7487 return 1;
7490 /* We are about to being issuing insns for this clock cycle. Override
7491 the default sort algorithm to better slot instructions. */
7493 static int
7494 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx_insn **ready,
7495 int *pn_ready, int clock_var)
7497 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
7498 pn_ready, clock_var, 0);
7501 /* Like ia64_sched_reorder, but called after issuing each insn.
7502 Override the default sort algorithm to better slot instructions. */
7504 static int
7505 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
7506 int sched_verbose ATTRIBUTE_UNUSED, rtx_insn **ready,
7507 int *pn_ready, int clock_var)
7509 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
7510 clock_var, 1);
7513 /* We are about to issue INSN. Return the number of insns left on the
7514 ready queue that can be issued this cycle. */
7516 static int
7517 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
7518 int sched_verbose ATTRIBUTE_UNUSED,
7519 rtx_insn *insn,
7520 int can_issue_more ATTRIBUTE_UNUSED)
7522 if (sched_deps_info->generate_spec_deps && !sel_sched_p ())
7523 /* Modulo scheduling does not extend h_i_d when emitting
7524 new instructions. Don't use h_i_d, if we don't have to. */
7526 if (DONE_SPEC (insn) & BEGIN_DATA)
7527 pending_data_specs++;
7528 if (CHECK_SPEC (insn) & BEGIN_DATA)
7529 pending_data_specs--;
7532 if (DEBUG_INSN_P (insn))
7533 return 1;
7535 last_scheduled_insn = insn;
7536 memcpy (prev_cycle_state, curr_state, dfa_state_size);
7537 if (reload_completed)
7539 int needed = group_barrier_needed (insn);
7541 gcc_assert (!needed);
7542 if (CALL_P (insn))
7543 init_insn_group_barriers ();
7544 stops_p [INSN_UID (insn)] = stop_before_p;
7545 stop_before_p = 0;
7547 record_memory_reference (insn);
7549 return 1;
7552 /* We are choosing insn from the ready queue. Return zero if INSN
7553 can be chosen. */
7555 static int
7556 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx_insn *insn, int ready_index)
7558 gcc_assert (insn && INSN_P (insn));
7560 /* Size of ALAT is 32. As far as we perform conservative
7561 data speculation, we keep ALAT half-empty. */
7562 if (pending_data_specs >= 16 && (TODO_SPEC (insn) & BEGIN_DATA))
7563 return ready_index == 0 ? -1 : 1;
7565 if (ready_index == 0)
7566 return 0;
7568 if ((!reload_completed
7569 || !safe_group_barrier_needed (insn))
7570 && (!mflag_sched_mem_insns_hard_limit
7571 || !is_load_p (insn)
7572 || mem_ops_in_group[current_cycle % 4] < ia64_max_memory_insns))
7573 return 0;
7575 return 1;
7578 /* The following variable value is pseudo-insn used by the DFA insn
7579 scheduler to change the DFA state when the simulated clock is
7580 increased. */
7582 static rtx_insn *dfa_pre_cycle_insn;
7584 /* Returns 1 when a meaningful insn was scheduled between the last group
7585 barrier and LAST. */
7586 static int
7587 scheduled_good_insn (rtx_insn *last)
7589 if (last && recog_memoized (last) >= 0)
7590 return 1;
7592 for ( ;
7593 last != NULL && !NOTE_INSN_BASIC_BLOCK_P (last)
7594 && !stops_p[INSN_UID (last)];
7595 last = PREV_INSN (last))
7596 /* We could hit a NOTE_INSN_DELETED here which is actually outside
7597 the ebb we're scheduling. */
7598 if (INSN_P (last) && recog_memoized (last) >= 0)
7599 return 1;
7601 return 0;
7604 /* We are about to being issuing INSN. Return nonzero if we cannot
7605 issue it on given cycle CLOCK and return zero if we should not sort
7606 the ready queue on the next clock start. */
7608 static int
7609 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx_insn *insn, int last_clock,
7610 int clock, int *sort_p)
7612 gcc_assert (insn && INSN_P (insn));
7614 if (DEBUG_INSN_P (insn))
7615 return 0;
7617 /* When a group barrier is needed for insn, last_scheduled_insn
7618 should be set. */
7619 gcc_assert (!(reload_completed && safe_group_barrier_needed (insn))
7620 || last_scheduled_insn);
7622 if ((reload_completed
7623 && (safe_group_barrier_needed (insn)
7624 || (mflag_sched_stop_bits_after_every_cycle
7625 && last_clock != clock
7626 && last_scheduled_insn
7627 && scheduled_good_insn (last_scheduled_insn))))
7628 || (last_scheduled_insn
7629 && (CALL_P (last_scheduled_insn)
7630 || unknown_for_bundling_p (last_scheduled_insn))))
7632 init_insn_group_barriers ();
7634 if (verbose && dump)
7635 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
7636 last_clock == clock ? " + cycle advance" : "");
7638 stop_before_p = 1;
7639 current_cycle = clock;
7640 mem_ops_in_group[current_cycle % 4] = 0;
7642 if (last_clock == clock)
7644 state_transition (curr_state, dfa_stop_insn);
7645 if (TARGET_EARLY_STOP_BITS)
7646 *sort_p = (last_scheduled_insn == NULL_RTX
7647 || ! CALL_P (last_scheduled_insn));
7648 else
7649 *sort_p = 0;
7650 return 1;
7653 if (last_scheduled_insn)
7655 if (unknown_for_bundling_p (last_scheduled_insn))
7656 state_reset (curr_state);
7657 else
7659 memcpy (curr_state, prev_cycle_state, dfa_state_size);
7660 state_transition (curr_state, dfa_stop_insn);
7661 state_transition (curr_state, dfa_pre_cycle_insn);
7662 state_transition (curr_state, NULL);
7666 return 0;
7669 /* Implement targetm.sched.h_i_d_extended hook.
7670 Extend internal data structures. */
7671 static void
7672 ia64_h_i_d_extended (void)
7674 if (stops_p != NULL)
7676 int new_clocks_length = get_max_uid () * 3 / 2;
7677 stops_p = (char *) xrecalloc (stops_p, new_clocks_length, clocks_length, 1);
7678 clocks_length = new_clocks_length;
7683 /* This structure describes the data used by the backend to guide scheduling.
7684 When the current scheduling point is switched, this data should be saved
7685 and restored later, if the scheduler returns to this point. */
7686 struct _ia64_sched_context
7688 state_t prev_cycle_state;
7689 rtx_insn *last_scheduled_insn;
7690 struct reg_write_state rws_sum[NUM_REGS];
7691 struct reg_write_state rws_insn[NUM_REGS];
7692 int first_instruction;
7693 int pending_data_specs;
7694 int current_cycle;
7695 char mem_ops_in_group[4];
7697 typedef struct _ia64_sched_context *ia64_sched_context_t;
7699 /* Allocates a scheduling context. */
7700 static void *
7701 ia64_alloc_sched_context (void)
7703 return xmalloc (sizeof (struct _ia64_sched_context));
7706 /* Initializes the _SC context with clean data, if CLEAN_P, and from
7707 the global context otherwise. */
7708 static void
7709 ia64_init_sched_context (void *_sc, bool clean_p)
7711 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7713 sc->prev_cycle_state = xmalloc (dfa_state_size);
7714 if (clean_p)
7716 state_reset (sc->prev_cycle_state);
7717 sc->last_scheduled_insn = NULL;
7718 memset (sc->rws_sum, 0, sizeof (rws_sum));
7719 memset (sc->rws_insn, 0, sizeof (rws_insn));
7720 sc->first_instruction = 1;
7721 sc->pending_data_specs = 0;
7722 sc->current_cycle = 0;
7723 memset (sc->mem_ops_in_group, 0, sizeof (mem_ops_in_group));
7725 else
7727 memcpy (sc->prev_cycle_state, prev_cycle_state, dfa_state_size);
7728 sc->last_scheduled_insn = last_scheduled_insn;
7729 memcpy (sc->rws_sum, rws_sum, sizeof (rws_sum));
7730 memcpy (sc->rws_insn, rws_insn, sizeof (rws_insn));
7731 sc->first_instruction = first_instruction;
7732 sc->pending_data_specs = pending_data_specs;
7733 sc->current_cycle = current_cycle;
7734 memcpy (sc->mem_ops_in_group, mem_ops_in_group, sizeof (mem_ops_in_group));
7738 /* Sets the global scheduling context to the one pointed to by _SC. */
7739 static void
7740 ia64_set_sched_context (void *_sc)
7742 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7744 gcc_assert (sc != NULL);
7746 memcpy (prev_cycle_state, sc->prev_cycle_state, dfa_state_size);
7747 last_scheduled_insn = sc->last_scheduled_insn;
7748 memcpy (rws_sum, sc->rws_sum, sizeof (rws_sum));
7749 memcpy (rws_insn, sc->rws_insn, sizeof (rws_insn));
7750 first_instruction = sc->first_instruction;
7751 pending_data_specs = sc->pending_data_specs;
7752 current_cycle = sc->current_cycle;
7753 memcpy (mem_ops_in_group, sc->mem_ops_in_group, sizeof (mem_ops_in_group));
7756 /* Clears the data in the _SC scheduling context. */
7757 static void
7758 ia64_clear_sched_context (void *_sc)
7760 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7762 free (sc->prev_cycle_state);
7763 sc->prev_cycle_state = NULL;
7766 /* Frees the _SC scheduling context. */
7767 static void
7768 ia64_free_sched_context (void *_sc)
7770 gcc_assert (_sc != NULL);
7772 free (_sc);
7775 typedef rtx (* gen_func_t) (rtx, rtx);
7777 /* Return a function that will generate a load of mode MODE_NO
7778 with speculation types TS. */
7779 static gen_func_t
7780 get_spec_load_gen_function (ds_t ts, int mode_no)
7782 static gen_func_t gen_ld_[] = {
7783 gen_movbi,
7784 gen_movqi_internal,
7785 gen_movhi_internal,
7786 gen_movsi_internal,
7787 gen_movdi_internal,
7788 gen_movsf_internal,
7789 gen_movdf_internal,
7790 gen_movxf_internal,
7791 gen_movti_internal,
7792 gen_zero_extendqidi2,
7793 gen_zero_extendhidi2,
7794 gen_zero_extendsidi2,
7797 static gen_func_t gen_ld_a[] = {
7798 gen_movbi_advanced,
7799 gen_movqi_advanced,
7800 gen_movhi_advanced,
7801 gen_movsi_advanced,
7802 gen_movdi_advanced,
7803 gen_movsf_advanced,
7804 gen_movdf_advanced,
7805 gen_movxf_advanced,
7806 gen_movti_advanced,
7807 gen_zero_extendqidi2_advanced,
7808 gen_zero_extendhidi2_advanced,
7809 gen_zero_extendsidi2_advanced,
7811 static gen_func_t gen_ld_s[] = {
7812 gen_movbi_speculative,
7813 gen_movqi_speculative,
7814 gen_movhi_speculative,
7815 gen_movsi_speculative,
7816 gen_movdi_speculative,
7817 gen_movsf_speculative,
7818 gen_movdf_speculative,
7819 gen_movxf_speculative,
7820 gen_movti_speculative,
7821 gen_zero_extendqidi2_speculative,
7822 gen_zero_extendhidi2_speculative,
7823 gen_zero_extendsidi2_speculative,
7825 static gen_func_t gen_ld_sa[] = {
7826 gen_movbi_speculative_advanced,
7827 gen_movqi_speculative_advanced,
7828 gen_movhi_speculative_advanced,
7829 gen_movsi_speculative_advanced,
7830 gen_movdi_speculative_advanced,
7831 gen_movsf_speculative_advanced,
7832 gen_movdf_speculative_advanced,
7833 gen_movxf_speculative_advanced,
7834 gen_movti_speculative_advanced,
7835 gen_zero_extendqidi2_speculative_advanced,
7836 gen_zero_extendhidi2_speculative_advanced,
7837 gen_zero_extendsidi2_speculative_advanced,
7839 static gen_func_t gen_ld_s_a[] = {
7840 gen_movbi_speculative_a,
7841 gen_movqi_speculative_a,
7842 gen_movhi_speculative_a,
7843 gen_movsi_speculative_a,
7844 gen_movdi_speculative_a,
7845 gen_movsf_speculative_a,
7846 gen_movdf_speculative_a,
7847 gen_movxf_speculative_a,
7848 gen_movti_speculative_a,
7849 gen_zero_extendqidi2_speculative_a,
7850 gen_zero_extendhidi2_speculative_a,
7851 gen_zero_extendsidi2_speculative_a,
7854 gen_func_t *gen_ld;
7856 if (ts & BEGIN_DATA)
7858 if (ts & BEGIN_CONTROL)
7859 gen_ld = gen_ld_sa;
7860 else
7861 gen_ld = gen_ld_a;
7863 else if (ts & BEGIN_CONTROL)
7865 if ((spec_info->flags & SEL_SCHED_SPEC_DONT_CHECK_CONTROL)
7866 || ia64_needs_block_p (ts))
7867 gen_ld = gen_ld_s;
7868 else
7869 gen_ld = gen_ld_s_a;
7871 else if (ts == 0)
7872 gen_ld = gen_ld_;
7873 else
7874 gcc_unreachable ();
7876 return gen_ld[mode_no];
7879 /* Constants that help mapping 'machine_mode' to int. */
7880 enum SPEC_MODES
7882 SPEC_MODE_INVALID = -1,
7883 SPEC_MODE_FIRST = 0,
7884 SPEC_MODE_FOR_EXTEND_FIRST = 1,
7885 SPEC_MODE_FOR_EXTEND_LAST = 3,
7886 SPEC_MODE_LAST = 8
7889 enum
7891 /* Offset to reach ZERO_EXTEND patterns. */
7892 SPEC_GEN_EXTEND_OFFSET = SPEC_MODE_LAST - SPEC_MODE_FOR_EXTEND_FIRST + 1
7895 /* Return index of the MODE. */
7896 static int
7897 ia64_mode_to_int (machine_mode mode)
7899 switch (mode)
7901 case BImode: return 0; /* SPEC_MODE_FIRST */
7902 case QImode: return 1; /* SPEC_MODE_FOR_EXTEND_FIRST */
7903 case HImode: return 2;
7904 case SImode: return 3; /* SPEC_MODE_FOR_EXTEND_LAST */
7905 case DImode: return 4;
7906 case SFmode: return 5;
7907 case DFmode: return 6;
7908 case XFmode: return 7;
7909 case TImode:
7910 /* ??? This mode needs testing. Bypasses for ldfp8 instruction are not
7911 mentioned in itanium[12].md. Predicate fp_register_operand also
7912 needs to be defined. Bottom line: better disable for now. */
7913 return SPEC_MODE_INVALID;
7914 default: return SPEC_MODE_INVALID;
7918 /* Provide information about speculation capabilities. */
7919 static void
7920 ia64_set_sched_flags (spec_info_t spec_info)
7922 unsigned int *flags = &(current_sched_info->flags);
7924 if (*flags & SCHED_RGN
7925 || *flags & SCHED_EBB
7926 || *flags & SEL_SCHED)
7928 int mask = 0;
7930 if ((mflag_sched_br_data_spec && !reload_completed && optimize > 0)
7931 || (mflag_sched_ar_data_spec && reload_completed))
7933 mask |= BEGIN_DATA;
7935 if (!sel_sched_p ()
7936 && ((mflag_sched_br_in_data_spec && !reload_completed)
7937 || (mflag_sched_ar_in_data_spec && reload_completed)))
7938 mask |= BE_IN_DATA;
7941 if (mflag_sched_control_spec
7942 && (!sel_sched_p ()
7943 || reload_completed))
7945 mask |= BEGIN_CONTROL;
7947 if (!sel_sched_p () && mflag_sched_in_control_spec)
7948 mask |= BE_IN_CONTROL;
7951 spec_info->mask = mask;
7953 if (mask)
7955 *flags |= USE_DEPS_LIST | DO_SPECULATION;
7957 if (mask & BE_IN_SPEC)
7958 *flags |= NEW_BBS;
7960 spec_info->flags = 0;
7962 if ((mask & CONTROL_SPEC)
7963 && sel_sched_p () && mflag_sel_sched_dont_check_control_spec)
7964 spec_info->flags |= SEL_SCHED_SPEC_DONT_CHECK_CONTROL;
7966 if (sched_verbose >= 1)
7967 spec_info->dump = sched_dump;
7968 else
7969 spec_info->dump = 0;
7971 if (mflag_sched_count_spec_in_critical_path)
7972 spec_info->flags |= COUNT_SPEC_IN_CRITICAL_PATH;
7975 else
7976 spec_info->mask = 0;
7979 /* If INSN is an appropriate load return its mode.
7980 Return -1 otherwise. */
7981 static int
7982 get_mode_no_for_insn (rtx_insn *insn)
7984 rtx reg, mem, mode_rtx;
7985 int mode_no;
7986 bool extend_p;
7988 extract_insn_cached (insn);
7990 /* We use WHICH_ALTERNATIVE only after reload. This will
7991 guarantee that reload won't touch a speculative insn. */
7993 if (recog_data.n_operands != 2)
7994 return -1;
7996 reg = recog_data.operand[0];
7997 mem = recog_data.operand[1];
7999 /* We should use MEM's mode since REG's mode in presence of
8000 ZERO_EXTEND will always be DImode. */
8001 if (get_attr_speculable1 (insn) == SPECULABLE1_YES)
8002 /* Process non-speculative ld. */
8004 if (!reload_completed)
8006 /* Do not speculate into regs like ar.lc. */
8007 if (!REG_P (reg) || AR_REGNO_P (REGNO (reg)))
8008 return -1;
8010 if (!MEM_P (mem))
8011 return -1;
8014 rtx mem_reg = XEXP (mem, 0);
8016 if (!REG_P (mem_reg))
8017 return -1;
8020 mode_rtx = mem;
8022 else if (get_attr_speculable2 (insn) == SPECULABLE2_YES)
8024 gcc_assert (REG_P (reg) && MEM_P (mem));
8025 mode_rtx = mem;
8027 else
8028 return -1;
8030 else if (get_attr_data_speculative (insn) == DATA_SPECULATIVE_YES
8031 || get_attr_control_speculative (insn) == CONTROL_SPECULATIVE_YES
8032 || get_attr_check_load (insn) == CHECK_LOAD_YES)
8033 /* Process speculative ld or ld.c. */
8035 gcc_assert (REG_P (reg) && MEM_P (mem));
8036 mode_rtx = mem;
8038 else
8040 enum attr_itanium_class attr_class = get_attr_itanium_class (insn);
8042 if (attr_class == ITANIUM_CLASS_CHK_A
8043 || attr_class == ITANIUM_CLASS_CHK_S_I
8044 || attr_class == ITANIUM_CLASS_CHK_S_F)
8045 /* Process chk. */
8046 mode_rtx = reg;
8047 else
8048 return -1;
8051 mode_no = ia64_mode_to_int (GET_MODE (mode_rtx));
8053 if (mode_no == SPEC_MODE_INVALID)
8054 return -1;
8056 extend_p = (GET_MODE (reg) != GET_MODE (mode_rtx));
8058 if (extend_p)
8060 if (!(SPEC_MODE_FOR_EXTEND_FIRST <= mode_no
8061 && mode_no <= SPEC_MODE_FOR_EXTEND_LAST))
8062 return -1;
8064 mode_no += SPEC_GEN_EXTEND_OFFSET;
8067 return mode_no;
8070 /* If X is an unspec part of a speculative load, return its code.
8071 Return -1 otherwise. */
8072 static int
8073 get_spec_unspec_code (const_rtx x)
8075 if (GET_CODE (x) != UNSPEC)
8076 return -1;
8079 int code;
8081 code = XINT (x, 1);
8083 switch (code)
8085 case UNSPEC_LDA:
8086 case UNSPEC_LDS:
8087 case UNSPEC_LDS_A:
8088 case UNSPEC_LDSA:
8089 return code;
8091 default:
8092 return -1;
8097 /* Implement skip_rtx_p hook. */
8098 static bool
8099 ia64_skip_rtx_p (const_rtx x)
8101 return get_spec_unspec_code (x) != -1;
8104 /* If INSN is a speculative load, return its UNSPEC code.
8105 Return -1 otherwise. */
8106 static int
8107 get_insn_spec_code (const_rtx insn)
8109 rtx pat, reg, mem;
8111 pat = PATTERN (insn);
8113 if (GET_CODE (pat) == COND_EXEC)
8114 pat = COND_EXEC_CODE (pat);
8116 if (GET_CODE (pat) != SET)
8117 return -1;
8119 reg = SET_DEST (pat);
8120 if (!REG_P (reg))
8121 return -1;
8123 mem = SET_SRC (pat);
8124 if (GET_CODE (mem) == ZERO_EXTEND)
8125 mem = XEXP (mem, 0);
8127 return get_spec_unspec_code (mem);
8130 /* If INSN is a speculative load, return a ds with the speculation types.
8131 Otherwise [if INSN is a normal instruction] return 0. */
8132 static ds_t
8133 ia64_get_insn_spec_ds (rtx_insn *insn)
8135 int code = get_insn_spec_code (insn);
8137 switch (code)
8139 case UNSPEC_LDA:
8140 return BEGIN_DATA;
8142 case UNSPEC_LDS:
8143 case UNSPEC_LDS_A:
8144 return BEGIN_CONTROL;
8146 case UNSPEC_LDSA:
8147 return BEGIN_DATA | BEGIN_CONTROL;
8149 default:
8150 return 0;
8154 /* If INSN is a speculative load return a ds with the speculation types that
8155 will be checked.
8156 Otherwise [if INSN is a normal instruction] return 0. */
8157 static ds_t
8158 ia64_get_insn_checked_ds (rtx_insn *insn)
8160 int code = get_insn_spec_code (insn);
8162 switch (code)
8164 case UNSPEC_LDA:
8165 return BEGIN_DATA | BEGIN_CONTROL;
8167 case UNSPEC_LDS:
8168 return BEGIN_CONTROL;
8170 case UNSPEC_LDS_A:
8171 case UNSPEC_LDSA:
8172 return BEGIN_DATA | BEGIN_CONTROL;
8174 default:
8175 return 0;
8179 /* If GEN_P is true, calculate the index of needed speculation check and return
8180 speculative pattern for INSN with speculative mode TS, machine mode
8181 MODE_NO and with ZERO_EXTEND (if EXTEND_P is true).
8182 If GEN_P is false, just calculate the index of needed speculation check. */
8183 static rtx
8184 ia64_gen_spec_load (rtx insn, ds_t ts, int mode_no)
8186 rtx pat, new_pat;
8187 gen_func_t gen_load;
8189 gen_load = get_spec_load_gen_function (ts, mode_no);
8191 new_pat = gen_load (copy_rtx (recog_data.operand[0]),
8192 copy_rtx (recog_data.operand[1]));
8194 pat = PATTERN (insn);
8195 if (GET_CODE (pat) == COND_EXEC)
8196 new_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
8197 new_pat);
8199 return new_pat;
8202 static bool
8203 insn_can_be_in_speculative_p (rtx insn ATTRIBUTE_UNUSED,
8204 ds_t ds ATTRIBUTE_UNUSED)
8206 return false;
8209 /* Implement targetm.sched.speculate_insn hook.
8210 Check if the INSN can be TS speculative.
8211 If 'no' - return -1.
8212 If 'yes' - generate speculative pattern in the NEW_PAT and return 1.
8213 If current pattern of the INSN already provides TS speculation,
8214 return 0. */
8215 static int
8216 ia64_speculate_insn (rtx_insn *insn, ds_t ts, rtx *new_pat)
8218 int mode_no;
8219 int res;
8221 gcc_assert (!(ts & ~SPECULATIVE));
8223 if (ia64_spec_check_p (insn))
8224 return -1;
8226 if ((ts & BE_IN_SPEC)
8227 && !insn_can_be_in_speculative_p (insn, ts))
8228 return -1;
8230 mode_no = get_mode_no_for_insn (insn);
8232 if (mode_no != SPEC_MODE_INVALID)
8234 if (ia64_get_insn_spec_ds (insn) == ds_get_speculation_types (ts))
8235 res = 0;
8236 else
8238 res = 1;
8239 *new_pat = ia64_gen_spec_load (insn, ts, mode_no);
8242 else
8243 res = -1;
8245 return res;
8248 /* Return a function that will generate a check for speculation TS with mode
8249 MODE_NO.
8250 If simple check is needed, pass true for SIMPLE_CHECK_P.
8251 If clearing check is needed, pass true for CLEARING_CHECK_P. */
8252 static gen_func_t
8253 get_spec_check_gen_function (ds_t ts, int mode_no,
8254 bool simple_check_p, bool clearing_check_p)
8256 static gen_func_t gen_ld_c_clr[] = {
8257 gen_movbi_clr,
8258 gen_movqi_clr,
8259 gen_movhi_clr,
8260 gen_movsi_clr,
8261 gen_movdi_clr,
8262 gen_movsf_clr,
8263 gen_movdf_clr,
8264 gen_movxf_clr,
8265 gen_movti_clr,
8266 gen_zero_extendqidi2_clr,
8267 gen_zero_extendhidi2_clr,
8268 gen_zero_extendsidi2_clr,
8270 static gen_func_t gen_ld_c_nc[] = {
8271 gen_movbi_nc,
8272 gen_movqi_nc,
8273 gen_movhi_nc,
8274 gen_movsi_nc,
8275 gen_movdi_nc,
8276 gen_movsf_nc,
8277 gen_movdf_nc,
8278 gen_movxf_nc,
8279 gen_movti_nc,
8280 gen_zero_extendqidi2_nc,
8281 gen_zero_extendhidi2_nc,
8282 gen_zero_extendsidi2_nc,
8284 static gen_func_t gen_chk_a_clr[] = {
8285 gen_advanced_load_check_clr_bi,
8286 gen_advanced_load_check_clr_qi,
8287 gen_advanced_load_check_clr_hi,
8288 gen_advanced_load_check_clr_si,
8289 gen_advanced_load_check_clr_di,
8290 gen_advanced_load_check_clr_sf,
8291 gen_advanced_load_check_clr_df,
8292 gen_advanced_load_check_clr_xf,
8293 gen_advanced_load_check_clr_ti,
8294 gen_advanced_load_check_clr_di,
8295 gen_advanced_load_check_clr_di,
8296 gen_advanced_load_check_clr_di,
8298 static gen_func_t gen_chk_a_nc[] = {
8299 gen_advanced_load_check_nc_bi,
8300 gen_advanced_load_check_nc_qi,
8301 gen_advanced_load_check_nc_hi,
8302 gen_advanced_load_check_nc_si,
8303 gen_advanced_load_check_nc_di,
8304 gen_advanced_load_check_nc_sf,
8305 gen_advanced_load_check_nc_df,
8306 gen_advanced_load_check_nc_xf,
8307 gen_advanced_load_check_nc_ti,
8308 gen_advanced_load_check_nc_di,
8309 gen_advanced_load_check_nc_di,
8310 gen_advanced_load_check_nc_di,
8312 static gen_func_t gen_chk_s[] = {
8313 gen_speculation_check_bi,
8314 gen_speculation_check_qi,
8315 gen_speculation_check_hi,
8316 gen_speculation_check_si,
8317 gen_speculation_check_di,
8318 gen_speculation_check_sf,
8319 gen_speculation_check_df,
8320 gen_speculation_check_xf,
8321 gen_speculation_check_ti,
8322 gen_speculation_check_di,
8323 gen_speculation_check_di,
8324 gen_speculation_check_di,
8327 gen_func_t *gen_check;
8329 if (ts & BEGIN_DATA)
8331 /* We don't need recovery because even if this is ld.sa
8332 ALAT entry will be allocated only if NAT bit is set to zero.
8333 So it is enough to use ld.c here. */
8335 if (simple_check_p)
8337 gcc_assert (mflag_sched_spec_ldc);
8339 if (clearing_check_p)
8340 gen_check = gen_ld_c_clr;
8341 else
8342 gen_check = gen_ld_c_nc;
8344 else
8346 if (clearing_check_p)
8347 gen_check = gen_chk_a_clr;
8348 else
8349 gen_check = gen_chk_a_nc;
8352 else if (ts & BEGIN_CONTROL)
8354 if (simple_check_p)
8355 /* We might want to use ld.sa -> ld.c instead of
8356 ld.s -> chk.s. */
8358 gcc_assert (!ia64_needs_block_p (ts));
8360 if (clearing_check_p)
8361 gen_check = gen_ld_c_clr;
8362 else
8363 gen_check = gen_ld_c_nc;
8365 else
8367 gen_check = gen_chk_s;
8370 else
8371 gcc_unreachable ();
8373 gcc_assert (mode_no >= 0);
8374 return gen_check[mode_no];
8377 /* Return nonzero, if INSN needs branchy recovery check. */
8378 static bool
8379 ia64_needs_block_p (ds_t ts)
8381 if (ts & BEGIN_DATA)
8382 return !mflag_sched_spec_ldc;
8384 gcc_assert ((ts & BEGIN_CONTROL) != 0);
8386 return !(mflag_sched_spec_control_ldc && mflag_sched_spec_ldc);
8389 /* Generate (or regenerate) a recovery check for INSN. */
8390 static rtx
8391 ia64_gen_spec_check (rtx_insn *insn, rtx_insn *label, ds_t ds)
8393 rtx op1, pat, check_pat;
8394 gen_func_t gen_check;
8395 int mode_no;
8397 mode_no = get_mode_no_for_insn (insn);
8398 gcc_assert (mode_no >= 0);
8400 if (label)
8401 op1 = label;
8402 else
8404 gcc_assert (!ia64_needs_block_p (ds));
8405 op1 = copy_rtx (recog_data.operand[1]);
8408 gen_check = get_spec_check_gen_function (ds, mode_no, label == NULL_RTX,
8409 true);
8411 check_pat = gen_check (copy_rtx (recog_data.operand[0]), op1);
8413 pat = PATTERN (insn);
8414 if (GET_CODE (pat) == COND_EXEC)
8415 check_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
8416 check_pat);
8418 return check_pat;
8421 /* Return nonzero, if X is branchy recovery check. */
8422 static int
8423 ia64_spec_check_p (rtx x)
8425 x = PATTERN (x);
8426 if (GET_CODE (x) == COND_EXEC)
8427 x = COND_EXEC_CODE (x);
8428 if (GET_CODE (x) == SET)
8429 return ia64_spec_check_src_p (SET_SRC (x));
8430 return 0;
8433 /* Return nonzero, if SRC belongs to recovery check. */
8434 static int
8435 ia64_spec_check_src_p (rtx src)
8437 if (GET_CODE (src) == IF_THEN_ELSE)
8439 rtx t;
8441 t = XEXP (src, 0);
8442 if (GET_CODE (t) == NE)
8444 t = XEXP (t, 0);
8446 if (GET_CODE (t) == UNSPEC)
8448 int code;
8450 code = XINT (t, 1);
8452 if (code == UNSPEC_LDCCLR
8453 || code == UNSPEC_LDCNC
8454 || code == UNSPEC_CHKACLR
8455 || code == UNSPEC_CHKANC
8456 || code == UNSPEC_CHKS)
8458 gcc_assert (code != 0);
8459 return code;
8464 return 0;
8468 /* The following page contains abstract data `bundle states' which are
8469 used for bundling insns (inserting nops and template generation). */
8471 /* The following describes state of insn bundling. */
8473 struct bundle_state
8475 /* Unique bundle state number to identify them in the debugging
8476 output */
8477 int unique_num;
8478 rtx_insn *insn; /* corresponding insn, NULL for the 1st and the last state */
8479 /* number nops before and after the insn */
8480 short before_nops_num, after_nops_num;
8481 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
8482 insn */
8483 int cost; /* cost of the state in cycles */
8484 int accumulated_insns_num; /* number of all previous insns including
8485 nops. L is considered as 2 insns */
8486 int branch_deviation; /* deviation of previous branches from 3rd slots */
8487 int middle_bundle_stops; /* number of stop bits in the middle of bundles */
8488 struct bundle_state *next; /* next state with the same insn_num */
8489 struct bundle_state *originator; /* originator (previous insn state) */
8490 /* All bundle states are in the following chain. */
8491 struct bundle_state *allocated_states_chain;
8492 /* The DFA State after issuing the insn and the nops. */
8493 state_t dfa_state;
8496 /* The following is map insn number to the corresponding bundle state. */
8498 static struct bundle_state **index_to_bundle_states;
8500 /* The unique number of next bundle state. */
8502 static int bundle_states_num;
8504 /* All allocated bundle states are in the following chain. */
8506 static struct bundle_state *allocated_bundle_states_chain;
8508 /* All allocated but not used bundle states are in the following
8509 chain. */
8511 static struct bundle_state *free_bundle_state_chain;
8514 /* The following function returns a free bundle state. */
8516 static struct bundle_state *
8517 get_free_bundle_state (void)
8519 struct bundle_state *result;
8521 if (free_bundle_state_chain != NULL)
8523 result = free_bundle_state_chain;
8524 free_bundle_state_chain = result->next;
8526 else
8528 result = XNEW (struct bundle_state);
8529 result->dfa_state = xmalloc (dfa_state_size);
8530 result->allocated_states_chain = allocated_bundle_states_chain;
8531 allocated_bundle_states_chain = result;
8533 result->unique_num = bundle_states_num++;
8534 return result;
8538 /* The following function frees given bundle state. */
8540 static void
8541 free_bundle_state (struct bundle_state *state)
8543 state->next = free_bundle_state_chain;
8544 free_bundle_state_chain = state;
8547 /* Start work with abstract data `bundle states'. */
8549 static void
8550 initiate_bundle_states (void)
8552 bundle_states_num = 0;
8553 free_bundle_state_chain = NULL;
8554 allocated_bundle_states_chain = NULL;
8557 /* Finish work with abstract data `bundle states'. */
8559 static void
8560 finish_bundle_states (void)
8562 struct bundle_state *curr_state, *next_state;
8564 for (curr_state = allocated_bundle_states_chain;
8565 curr_state != NULL;
8566 curr_state = next_state)
8568 next_state = curr_state->allocated_states_chain;
8569 free (curr_state->dfa_state);
8570 free (curr_state);
8574 /* Hashtable helpers. */
8576 struct bundle_state_hasher : typed_noop_remove <bundle_state>
8578 typedef bundle_state value_type;
8579 typedef bundle_state compare_type;
8580 static inline hashval_t hash (const value_type *);
8581 static inline bool equal (const value_type *, const compare_type *);
8584 /* The function returns hash of BUNDLE_STATE. */
8586 inline hashval_t
8587 bundle_state_hasher::hash (const value_type *state)
8589 unsigned result, i;
8591 for (result = i = 0; i < dfa_state_size; i++)
8592 result += (((unsigned char *) state->dfa_state) [i]
8593 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
8594 return result + state->insn_num;
8597 /* The function returns nonzero if the bundle state keys are equal. */
8599 inline bool
8600 bundle_state_hasher::equal (const value_type *state1,
8601 const compare_type *state2)
8603 return (state1->insn_num == state2->insn_num
8604 && memcmp (state1->dfa_state, state2->dfa_state,
8605 dfa_state_size) == 0);
8608 /* Hash table of the bundle states. The key is dfa_state and insn_num
8609 of the bundle states. */
8611 static hash_table<bundle_state_hasher> *bundle_state_table;
8613 /* The function inserts the BUNDLE_STATE into the hash table. The
8614 function returns nonzero if the bundle has been inserted into the
8615 table. The table contains the best bundle state with given key. */
8617 static int
8618 insert_bundle_state (struct bundle_state *bundle_state)
8620 struct bundle_state **entry_ptr;
8622 entry_ptr = bundle_state_table->find_slot (bundle_state, INSERT);
8623 if (*entry_ptr == NULL)
8625 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
8626 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
8627 *entry_ptr = bundle_state;
8628 return TRUE;
8630 else if (bundle_state->cost < (*entry_ptr)->cost
8631 || (bundle_state->cost == (*entry_ptr)->cost
8632 && ((*entry_ptr)->accumulated_insns_num
8633 > bundle_state->accumulated_insns_num
8634 || ((*entry_ptr)->accumulated_insns_num
8635 == bundle_state->accumulated_insns_num
8636 && ((*entry_ptr)->branch_deviation
8637 > bundle_state->branch_deviation
8638 || ((*entry_ptr)->branch_deviation
8639 == bundle_state->branch_deviation
8640 && (*entry_ptr)->middle_bundle_stops
8641 > bundle_state->middle_bundle_stops))))))
8644 struct bundle_state temp;
8646 temp = **entry_ptr;
8647 **entry_ptr = *bundle_state;
8648 (*entry_ptr)->next = temp.next;
8649 *bundle_state = temp;
8651 return FALSE;
8654 /* Start work with the hash table. */
8656 static void
8657 initiate_bundle_state_table (void)
8659 bundle_state_table = new hash_table<bundle_state_hasher> (50);
8662 /* Finish work with the hash table. */
8664 static void
8665 finish_bundle_state_table (void)
8667 delete bundle_state_table;
8668 bundle_state_table = NULL;
8673 /* The following variable is a insn `nop' used to check bundle states
8674 with different number of inserted nops. */
8676 static rtx_insn *ia64_nop;
8678 /* The following function tries to issue NOPS_NUM nops for the current
8679 state without advancing processor cycle. If it failed, the
8680 function returns FALSE and frees the current state. */
8682 static int
8683 try_issue_nops (struct bundle_state *curr_state, int nops_num)
8685 int i;
8687 for (i = 0; i < nops_num; i++)
8688 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
8690 free_bundle_state (curr_state);
8691 return FALSE;
8693 return TRUE;
8696 /* The following function tries to issue INSN for the current
8697 state without advancing processor cycle. If it failed, the
8698 function returns FALSE and frees the current state. */
8700 static int
8701 try_issue_insn (struct bundle_state *curr_state, rtx insn)
8703 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
8705 free_bundle_state (curr_state);
8706 return FALSE;
8708 return TRUE;
8711 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
8712 starting with ORIGINATOR without advancing processor cycle. If
8713 TRY_BUNDLE_END_P is TRUE, the function also/only (if
8714 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
8715 If it was successful, the function creates new bundle state and
8716 insert into the hash table and into `index_to_bundle_states'. */
8718 static void
8719 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
8720 rtx_insn *insn, int try_bundle_end_p,
8721 int only_bundle_end_p)
8723 struct bundle_state *curr_state;
8725 curr_state = get_free_bundle_state ();
8726 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
8727 curr_state->insn = insn;
8728 curr_state->insn_num = originator->insn_num + 1;
8729 curr_state->cost = originator->cost;
8730 curr_state->originator = originator;
8731 curr_state->before_nops_num = before_nops_num;
8732 curr_state->after_nops_num = 0;
8733 curr_state->accumulated_insns_num
8734 = originator->accumulated_insns_num + before_nops_num;
8735 curr_state->branch_deviation = originator->branch_deviation;
8736 curr_state->middle_bundle_stops = originator->middle_bundle_stops;
8737 gcc_assert (insn);
8738 if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
8740 gcc_assert (GET_MODE (insn) != TImode);
8741 if (!try_issue_nops (curr_state, before_nops_num))
8742 return;
8743 if (!try_issue_insn (curr_state, insn))
8744 return;
8745 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
8746 if (curr_state->accumulated_insns_num % 3 != 0)
8747 curr_state->middle_bundle_stops++;
8748 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
8749 && curr_state->accumulated_insns_num % 3 != 0)
8751 free_bundle_state (curr_state);
8752 return;
8755 else if (GET_MODE (insn) != TImode)
8757 if (!try_issue_nops (curr_state, before_nops_num))
8758 return;
8759 if (!try_issue_insn (curr_state, insn))
8760 return;
8761 curr_state->accumulated_insns_num++;
8762 gcc_assert (!unknown_for_bundling_p (insn));
8764 if (ia64_safe_type (insn) == TYPE_L)
8765 curr_state->accumulated_insns_num++;
8767 else
8769 /* If this is an insn that must be first in a group, then don't allow
8770 nops to be emitted before it. Currently, alloc is the only such
8771 supported instruction. */
8772 /* ??? The bundling automatons should handle this for us, but they do
8773 not yet have support for the first_insn attribute. */
8774 if (before_nops_num > 0 && get_attr_first_insn (insn) == FIRST_INSN_YES)
8776 free_bundle_state (curr_state);
8777 return;
8780 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
8781 state_transition (curr_state->dfa_state, NULL);
8782 curr_state->cost++;
8783 if (!try_issue_nops (curr_state, before_nops_num))
8784 return;
8785 if (!try_issue_insn (curr_state, insn))
8786 return;
8787 curr_state->accumulated_insns_num++;
8788 if (unknown_for_bundling_p (insn))
8790 /* Finish bundle containing asm insn. */
8791 curr_state->after_nops_num
8792 = 3 - curr_state->accumulated_insns_num % 3;
8793 curr_state->accumulated_insns_num
8794 += 3 - curr_state->accumulated_insns_num % 3;
8796 else if (ia64_safe_type (insn) == TYPE_L)
8797 curr_state->accumulated_insns_num++;
8799 if (ia64_safe_type (insn) == TYPE_B)
8800 curr_state->branch_deviation
8801 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
8802 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
8804 if (!only_bundle_end_p && insert_bundle_state (curr_state))
8806 state_t dfa_state;
8807 struct bundle_state *curr_state1;
8808 struct bundle_state *allocated_states_chain;
8810 curr_state1 = get_free_bundle_state ();
8811 dfa_state = curr_state1->dfa_state;
8812 allocated_states_chain = curr_state1->allocated_states_chain;
8813 *curr_state1 = *curr_state;
8814 curr_state1->dfa_state = dfa_state;
8815 curr_state1->allocated_states_chain = allocated_states_chain;
8816 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
8817 dfa_state_size);
8818 curr_state = curr_state1;
8820 if (!try_issue_nops (curr_state,
8821 3 - curr_state->accumulated_insns_num % 3))
8822 return;
8823 curr_state->after_nops_num
8824 = 3 - curr_state->accumulated_insns_num % 3;
8825 curr_state->accumulated_insns_num
8826 += 3 - curr_state->accumulated_insns_num % 3;
8828 if (!insert_bundle_state (curr_state))
8829 free_bundle_state (curr_state);
8830 return;
8833 /* The following function returns position in the two window bundle
8834 for given STATE. */
8836 static int
8837 get_max_pos (state_t state)
8839 if (cpu_unit_reservation_p (state, pos_6))
8840 return 6;
8841 else if (cpu_unit_reservation_p (state, pos_5))
8842 return 5;
8843 else if (cpu_unit_reservation_p (state, pos_4))
8844 return 4;
8845 else if (cpu_unit_reservation_p (state, pos_3))
8846 return 3;
8847 else if (cpu_unit_reservation_p (state, pos_2))
8848 return 2;
8849 else if (cpu_unit_reservation_p (state, pos_1))
8850 return 1;
8851 else
8852 return 0;
8855 /* The function returns code of a possible template for given position
8856 and state. The function should be called only with 2 values of
8857 position equal to 3 or 6. We avoid generating F NOPs by putting
8858 templates containing F insns at the end of the template search
8859 because undocumented anomaly in McKinley derived cores which can
8860 cause stalls if an F-unit insn (including a NOP) is issued within a
8861 six-cycle window after reading certain application registers (such
8862 as ar.bsp). Furthermore, power-considerations also argue against
8863 the use of F-unit instructions unless they're really needed. */
8865 static int
8866 get_template (state_t state, int pos)
8868 switch (pos)
8870 case 3:
8871 if (cpu_unit_reservation_p (state, _0mmi_))
8872 return 1;
8873 else if (cpu_unit_reservation_p (state, _0mii_))
8874 return 0;
8875 else if (cpu_unit_reservation_p (state, _0mmb_))
8876 return 7;
8877 else if (cpu_unit_reservation_p (state, _0mib_))
8878 return 6;
8879 else if (cpu_unit_reservation_p (state, _0mbb_))
8880 return 5;
8881 else if (cpu_unit_reservation_p (state, _0bbb_))
8882 return 4;
8883 else if (cpu_unit_reservation_p (state, _0mmf_))
8884 return 3;
8885 else if (cpu_unit_reservation_p (state, _0mfi_))
8886 return 2;
8887 else if (cpu_unit_reservation_p (state, _0mfb_))
8888 return 8;
8889 else if (cpu_unit_reservation_p (state, _0mlx_))
8890 return 9;
8891 else
8892 gcc_unreachable ();
8893 case 6:
8894 if (cpu_unit_reservation_p (state, _1mmi_))
8895 return 1;
8896 else if (cpu_unit_reservation_p (state, _1mii_))
8897 return 0;
8898 else if (cpu_unit_reservation_p (state, _1mmb_))
8899 return 7;
8900 else if (cpu_unit_reservation_p (state, _1mib_))
8901 return 6;
8902 else if (cpu_unit_reservation_p (state, _1mbb_))
8903 return 5;
8904 else if (cpu_unit_reservation_p (state, _1bbb_))
8905 return 4;
8906 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
8907 return 3;
8908 else if (cpu_unit_reservation_p (state, _1mfi_))
8909 return 2;
8910 else if (cpu_unit_reservation_p (state, _1mfb_))
8911 return 8;
8912 else if (cpu_unit_reservation_p (state, _1mlx_))
8913 return 9;
8914 else
8915 gcc_unreachable ();
8916 default:
8917 gcc_unreachable ();
8921 /* True when INSN is important for bundling. */
8923 static bool
8924 important_for_bundling_p (rtx_insn *insn)
8926 return (INSN_P (insn)
8927 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
8928 && GET_CODE (PATTERN (insn)) != USE
8929 && GET_CODE (PATTERN (insn)) != CLOBBER);
8932 /* The following function returns an insn important for insn bundling
8933 followed by INSN and before TAIL. */
8935 static rtx_insn *
8936 get_next_important_insn (rtx_insn *insn, rtx_insn *tail)
8938 for (; insn && insn != tail; insn = NEXT_INSN (insn))
8939 if (important_for_bundling_p (insn))
8940 return insn;
8941 return NULL;
8944 /* True when INSN is unknown, but important, for bundling. */
8946 static bool
8947 unknown_for_bundling_p (rtx_insn *insn)
8949 return (INSN_P (insn)
8950 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_UNKNOWN
8951 && GET_CODE (PATTERN (insn)) != USE
8952 && GET_CODE (PATTERN (insn)) != CLOBBER);
8955 /* Add a bundle selector TEMPLATE0 before INSN. */
8957 static void
8958 ia64_add_bundle_selector_before (int template0, rtx_insn *insn)
8960 rtx b = gen_bundle_selector (GEN_INT (template0));
8962 ia64_emit_insn_before (b, insn);
8963 #if NR_BUNDLES == 10
8964 if ((template0 == 4 || template0 == 5)
8965 && ia64_except_unwind_info (&global_options) == UI_TARGET)
8967 int i;
8968 rtx note = NULL_RTX;
8970 /* In .mbb and .bbb bundles, check if CALL_INSN isn't in the
8971 first or second slot. If it is and has REG_EH_NOTE set, copy it
8972 to following nops, as br.call sets rp to the address of following
8973 bundle and therefore an EH region end must be on a bundle
8974 boundary. */
8975 insn = PREV_INSN (insn);
8976 for (i = 0; i < 3; i++)
8979 insn = next_active_insn (insn);
8980 while (NONJUMP_INSN_P (insn)
8981 && get_attr_empty (insn) == EMPTY_YES);
8982 if (CALL_P (insn))
8983 note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
8984 else if (note)
8986 int code;
8988 gcc_assert ((code = recog_memoized (insn)) == CODE_FOR_nop
8989 || code == CODE_FOR_nop_b);
8990 if (find_reg_note (insn, REG_EH_REGION, NULL_RTX))
8991 note = NULL_RTX;
8992 else
8993 add_reg_note (insn, REG_EH_REGION, XEXP (note, 0));
8997 #endif
9000 /* The following function does insn bundling. Bundling means
9001 inserting templates and nop insns to fit insn groups into permitted
9002 templates. Instruction scheduling uses NDFA (non-deterministic
9003 finite automata) encoding informations about the templates and the
9004 inserted nops. Nondeterminism of the automata permits follows
9005 all possible insn sequences very fast.
9007 Unfortunately it is not possible to get information about inserting
9008 nop insns and used templates from the automata states. The
9009 automata only says that we can issue an insn possibly inserting
9010 some nops before it and using some template. Therefore insn
9011 bundling in this function is implemented by using DFA
9012 (deterministic finite automata). We follow all possible insn
9013 sequences by inserting 0-2 nops (that is what the NDFA describe for
9014 insn scheduling) before/after each insn being bundled. We know the
9015 start of simulated processor cycle from insn scheduling (insn
9016 starting a new cycle has TImode).
9018 Simple implementation of insn bundling would create enormous
9019 number of possible insn sequences satisfying information about new
9020 cycle ticks taken from the insn scheduling. To make the algorithm
9021 practical we use dynamic programming. Each decision (about
9022 inserting nops and implicitly about previous decisions) is described
9023 by structure bundle_state (see above). If we generate the same
9024 bundle state (key is automaton state after issuing the insns and
9025 nops for it), we reuse already generated one. As consequence we
9026 reject some decisions which cannot improve the solution and
9027 reduce memory for the algorithm.
9029 When we reach the end of EBB (extended basic block), we choose the
9030 best sequence and then, moving back in EBB, insert templates for
9031 the best alternative. The templates are taken from querying
9032 automaton state for each insn in chosen bundle states.
9034 So the algorithm makes two (forward and backward) passes through
9035 EBB. */
9037 static void
9038 bundling (FILE *dump, int verbose, rtx_insn *prev_head_insn, rtx_insn *tail)
9040 struct bundle_state *curr_state, *next_state, *best_state;
9041 rtx_insn *insn, *next_insn;
9042 int insn_num;
9043 int i, bundle_end_p, only_bundle_end_p, asm_p;
9044 int pos = 0, max_pos, template0, template1;
9045 rtx_insn *b;
9046 enum attr_type type;
9048 insn_num = 0;
9049 /* Count insns in the EBB. */
9050 for (insn = NEXT_INSN (prev_head_insn);
9051 insn && insn != tail;
9052 insn = NEXT_INSN (insn))
9053 if (INSN_P (insn))
9054 insn_num++;
9055 if (insn_num == 0)
9056 return;
9057 bundling_p = 1;
9058 dfa_clean_insn_cache ();
9059 initiate_bundle_state_table ();
9060 index_to_bundle_states = XNEWVEC (struct bundle_state *, insn_num + 2);
9061 /* First (forward) pass -- generation of bundle states. */
9062 curr_state = get_free_bundle_state ();
9063 curr_state->insn = NULL;
9064 curr_state->before_nops_num = 0;
9065 curr_state->after_nops_num = 0;
9066 curr_state->insn_num = 0;
9067 curr_state->cost = 0;
9068 curr_state->accumulated_insns_num = 0;
9069 curr_state->branch_deviation = 0;
9070 curr_state->middle_bundle_stops = 0;
9071 curr_state->next = NULL;
9072 curr_state->originator = NULL;
9073 state_reset (curr_state->dfa_state);
9074 index_to_bundle_states [0] = curr_state;
9075 insn_num = 0;
9076 /* Shift cycle mark if it is put on insn which could be ignored. */
9077 for (insn = NEXT_INSN (prev_head_insn);
9078 insn != tail;
9079 insn = NEXT_INSN (insn))
9080 if (INSN_P (insn)
9081 && !important_for_bundling_p (insn)
9082 && GET_MODE (insn) == TImode)
9084 PUT_MODE (insn, VOIDmode);
9085 for (next_insn = NEXT_INSN (insn);
9086 next_insn != tail;
9087 next_insn = NEXT_INSN (next_insn))
9088 if (important_for_bundling_p (next_insn)
9089 && INSN_CODE (next_insn) != CODE_FOR_insn_group_barrier)
9091 PUT_MODE (next_insn, TImode);
9092 break;
9095 /* Forward pass: generation of bundle states. */
9096 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
9097 insn != NULL_RTX;
9098 insn = next_insn)
9100 gcc_assert (important_for_bundling_p (insn));
9101 type = ia64_safe_type (insn);
9102 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
9103 insn_num++;
9104 index_to_bundle_states [insn_num] = NULL;
9105 for (curr_state = index_to_bundle_states [insn_num - 1];
9106 curr_state != NULL;
9107 curr_state = next_state)
9109 pos = curr_state->accumulated_insns_num % 3;
9110 next_state = curr_state->next;
9111 /* We must fill up the current bundle in order to start a
9112 subsequent asm insn in a new bundle. Asm insn is always
9113 placed in a separate bundle. */
9114 only_bundle_end_p
9115 = (next_insn != NULL_RTX
9116 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
9117 && unknown_for_bundling_p (next_insn));
9118 /* We may fill up the current bundle if it is the cycle end
9119 without a group barrier. */
9120 bundle_end_p
9121 = (only_bundle_end_p || next_insn == NULL_RTX
9122 || (GET_MODE (next_insn) == TImode
9123 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
9124 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
9125 || type == TYPE_S)
9126 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
9127 only_bundle_end_p);
9128 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
9129 only_bundle_end_p);
9130 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
9131 only_bundle_end_p);
9133 gcc_assert (index_to_bundle_states [insn_num]);
9134 for (curr_state = index_to_bundle_states [insn_num];
9135 curr_state != NULL;
9136 curr_state = curr_state->next)
9137 if (verbose >= 2 && dump)
9139 /* This structure is taken from generated code of the
9140 pipeline hazard recognizer (see file insn-attrtab.c).
9141 Please don't forget to change the structure if a new
9142 automaton is added to .md file. */
9143 struct DFA_chip
9145 unsigned short one_automaton_state;
9146 unsigned short oneb_automaton_state;
9147 unsigned short two_automaton_state;
9148 unsigned short twob_automaton_state;
9151 fprintf
9152 (dump,
9153 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, mid.stops %d state %d) for %d\n",
9154 curr_state->unique_num,
9155 (curr_state->originator == NULL
9156 ? -1 : curr_state->originator->unique_num),
9157 curr_state->cost,
9158 curr_state->before_nops_num, curr_state->after_nops_num,
9159 curr_state->accumulated_insns_num, curr_state->branch_deviation,
9160 curr_state->middle_bundle_stops,
9161 ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state,
9162 INSN_UID (insn));
9166 /* We should find a solution because the 2nd insn scheduling has
9167 found one. */
9168 gcc_assert (index_to_bundle_states [insn_num]);
9169 /* Find a state corresponding to the best insn sequence. */
9170 best_state = NULL;
9171 for (curr_state = index_to_bundle_states [insn_num];
9172 curr_state != NULL;
9173 curr_state = curr_state->next)
9174 /* We are just looking at the states with fully filled up last
9175 bundle. The first we prefer insn sequences with minimal cost
9176 then with minimal inserted nops and finally with branch insns
9177 placed in the 3rd slots. */
9178 if (curr_state->accumulated_insns_num % 3 == 0
9179 && (best_state == NULL || best_state->cost > curr_state->cost
9180 || (best_state->cost == curr_state->cost
9181 && (curr_state->accumulated_insns_num
9182 < best_state->accumulated_insns_num
9183 || (curr_state->accumulated_insns_num
9184 == best_state->accumulated_insns_num
9185 && (curr_state->branch_deviation
9186 < best_state->branch_deviation
9187 || (curr_state->branch_deviation
9188 == best_state->branch_deviation
9189 && curr_state->middle_bundle_stops
9190 < best_state->middle_bundle_stops)))))))
9191 best_state = curr_state;
9192 /* Second (backward) pass: adding nops and templates. */
9193 gcc_assert (best_state);
9194 insn_num = best_state->before_nops_num;
9195 template0 = template1 = -1;
9196 for (curr_state = best_state;
9197 curr_state->originator != NULL;
9198 curr_state = curr_state->originator)
9200 insn = curr_state->insn;
9201 asm_p = unknown_for_bundling_p (insn);
9202 insn_num++;
9203 if (verbose >= 2 && dump)
9205 struct DFA_chip
9207 unsigned short one_automaton_state;
9208 unsigned short oneb_automaton_state;
9209 unsigned short two_automaton_state;
9210 unsigned short twob_automaton_state;
9213 fprintf
9214 (dump,
9215 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, mid.stops %d, state %d) for %d\n",
9216 curr_state->unique_num,
9217 (curr_state->originator == NULL
9218 ? -1 : curr_state->originator->unique_num),
9219 curr_state->cost,
9220 curr_state->before_nops_num, curr_state->after_nops_num,
9221 curr_state->accumulated_insns_num, curr_state->branch_deviation,
9222 curr_state->middle_bundle_stops,
9223 ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state,
9224 INSN_UID (insn));
9226 /* Find the position in the current bundle window. The window can
9227 contain at most two bundles. Two bundle window means that
9228 the processor will make two bundle rotation. */
9229 max_pos = get_max_pos (curr_state->dfa_state);
9230 if (max_pos == 6
9231 /* The following (negative template number) means that the
9232 processor did one bundle rotation. */
9233 || (max_pos == 3 && template0 < 0))
9235 /* We are at the end of the window -- find template(s) for
9236 its bundle(s). */
9237 pos = max_pos;
9238 if (max_pos == 3)
9239 template0 = get_template (curr_state->dfa_state, 3);
9240 else
9242 template1 = get_template (curr_state->dfa_state, 3);
9243 template0 = get_template (curr_state->dfa_state, 6);
9246 if (max_pos > 3 && template1 < 0)
9247 /* It may happen when we have the stop inside a bundle. */
9249 gcc_assert (pos <= 3);
9250 template1 = get_template (curr_state->dfa_state, 3);
9251 pos += 3;
9253 if (!asm_p)
9254 /* Emit nops after the current insn. */
9255 for (i = 0; i < curr_state->after_nops_num; i++)
9257 rtx nop_pat = gen_nop ();
9258 rtx_insn *nop = emit_insn_after (nop_pat, insn);
9259 pos--;
9260 gcc_assert (pos >= 0);
9261 if (pos % 3 == 0)
9263 /* We are at the start of a bundle: emit the template
9264 (it should be defined). */
9265 gcc_assert (template0 >= 0);
9266 ia64_add_bundle_selector_before (template0, nop);
9267 /* If we have two bundle window, we make one bundle
9268 rotation. Otherwise template0 will be undefined
9269 (negative value). */
9270 template0 = template1;
9271 template1 = -1;
9274 /* Move the position backward in the window. Group barrier has
9275 no slot. Asm insn takes all bundle. */
9276 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
9277 && !unknown_for_bundling_p (insn))
9278 pos--;
9279 /* Long insn takes 2 slots. */
9280 if (ia64_safe_type (insn) == TYPE_L)
9281 pos--;
9282 gcc_assert (pos >= 0);
9283 if (pos % 3 == 0
9284 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
9285 && !unknown_for_bundling_p (insn))
9287 /* The current insn is at the bundle start: emit the
9288 template. */
9289 gcc_assert (template0 >= 0);
9290 ia64_add_bundle_selector_before (template0, insn);
9291 b = PREV_INSN (insn);
9292 insn = b;
9293 /* See comment above in analogous place for emitting nops
9294 after the insn. */
9295 template0 = template1;
9296 template1 = -1;
9298 /* Emit nops after the current insn. */
9299 for (i = 0; i < curr_state->before_nops_num; i++)
9301 rtx nop_pat = gen_nop ();
9302 ia64_emit_insn_before (nop_pat, insn);
9303 rtx_insn *nop = PREV_INSN (insn);
9304 insn = nop;
9305 pos--;
9306 gcc_assert (pos >= 0);
9307 if (pos % 3 == 0)
9309 /* See comment above in analogous place for emitting nops
9310 after the insn. */
9311 gcc_assert (template0 >= 0);
9312 ia64_add_bundle_selector_before (template0, insn);
9313 b = PREV_INSN (insn);
9314 insn = b;
9315 template0 = template1;
9316 template1 = -1;
9321 #ifdef ENABLE_CHECKING
9323 /* Assert right calculation of middle_bundle_stops. */
9324 int num = best_state->middle_bundle_stops;
9325 bool start_bundle = true, end_bundle = false;
9327 for (insn = NEXT_INSN (prev_head_insn);
9328 insn && insn != tail;
9329 insn = NEXT_INSN (insn))
9331 if (!INSN_P (insn))
9332 continue;
9333 if (recog_memoized (insn) == CODE_FOR_bundle_selector)
9334 start_bundle = true;
9335 else
9337 rtx_insn *next_insn;
9339 for (next_insn = NEXT_INSN (insn);
9340 next_insn && next_insn != tail;
9341 next_insn = NEXT_INSN (next_insn))
9342 if (INSN_P (next_insn)
9343 && (ia64_safe_itanium_class (next_insn)
9344 != ITANIUM_CLASS_IGNORE
9345 || recog_memoized (next_insn)
9346 == CODE_FOR_bundle_selector)
9347 && GET_CODE (PATTERN (next_insn)) != USE
9348 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
9349 break;
9351 end_bundle = next_insn == NULL_RTX
9352 || next_insn == tail
9353 || (INSN_P (next_insn)
9354 && recog_memoized (next_insn)
9355 == CODE_FOR_bundle_selector);
9356 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier
9357 && !start_bundle && !end_bundle
9358 && next_insn
9359 && !unknown_for_bundling_p (next_insn))
9360 num--;
9362 start_bundle = false;
9366 gcc_assert (num == 0);
9368 #endif
9370 free (index_to_bundle_states);
9371 finish_bundle_state_table ();
9372 bundling_p = 0;
9373 dfa_clean_insn_cache ();
9376 /* The following function is called at the end of scheduling BB or
9377 EBB. After reload, it inserts stop bits and does insn bundling. */
9379 static void
9380 ia64_sched_finish (FILE *dump, int sched_verbose)
9382 if (sched_verbose)
9383 fprintf (dump, "// Finishing schedule.\n");
9384 if (!reload_completed)
9385 return;
9386 if (reload_completed)
9388 final_emit_insn_group_barriers (dump);
9389 bundling (dump, sched_verbose, current_sched_info->prev_head,
9390 current_sched_info->next_tail);
9391 if (sched_verbose && dump)
9392 fprintf (dump, "// finishing %d-%d\n",
9393 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
9394 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
9396 return;
9400 /* The following function inserts stop bits in scheduled BB or EBB. */
9402 static void
9403 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
9405 rtx_insn *insn;
9406 int need_barrier_p = 0;
9407 int seen_good_insn = 0;
9409 init_insn_group_barriers ();
9411 for (insn = NEXT_INSN (current_sched_info->prev_head);
9412 insn != current_sched_info->next_tail;
9413 insn = NEXT_INSN (insn))
9415 if (BARRIER_P (insn))
9417 rtx_insn *last = prev_active_insn (insn);
9419 if (! last)
9420 continue;
9421 if (JUMP_TABLE_DATA_P (last))
9422 last = prev_active_insn (last);
9423 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
9424 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
9426 init_insn_group_barriers ();
9427 seen_good_insn = 0;
9428 need_barrier_p = 0;
9430 else if (NONDEBUG_INSN_P (insn))
9432 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
9434 init_insn_group_barriers ();
9435 seen_good_insn = 0;
9436 need_barrier_p = 0;
9438 else if (need_barrier_p || group_barrier_needed (insn)
9439 || (mflag_sched_stop_bits_after_every_cycle
9440 && GET_MODE (insn) == TImode
9441 && seen_good_insn))
9443 if (TARGET_EARLY_STOP_BITS)
9445 rtx_insn *last;
9447 for (last = insn;
9448 last != current_sched_info->prev_head;
9449 last = PREV_INSN (last))
9450 if (INSN_P (last) && GET_MODE (last) == TImode
9451 && stops_p [INSN_UID (last)])
9452 break;
9453 if (last == current_sched_info->prev_head)
9454 last = insn;
9455 last = prev_active_insn (last);
9456 if (last
9457 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
9458 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
9459 last);
9460 init_insn_group_barriers ();
9461 for (last = NEXT_INSN (last);
9462 last != insn;
9463 last = NEXT_INSN (last))
9464 if (INSN_P (last))
9466 group_barrier_needed (last);
9467 if (recog_memoized (last) >= 0
9468 && important_for_bundling_p (last))
9469 seen_good_insn = 1;
9472 else
9474 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
9475 insn);
9476 init_insn_group_barriers ();
9477 seen_good_insn = 0;
9479 group_barrier_needed (insn);
9480 if (recog_memoized (insn) >= 0
9481 && important_for_bundling_p (insn))
9482 seen_good_insn = 1;
9484 else if (recog_memoized (insn) >= 0
9485 && important_for_bundling_p (insn))
9486 seen_good_insn = 1;
9487 need_barrier_p = (CALL_P (insn) || unknown_for_bundling_p (insn));
9494 /* If the following function returns TRUE, we will use the DFA
9495 insn scheduler. */
9497 static int
9498 ia64_first_cycle_multipass_dfa_lookahead (void)
9500 return (reload_completed ? 6 : 4);
9503 /* The following function initiates variable `dfa_pre_cycle_insn'. */
9505 static void
9506 ia64_init_dfa_pre_cycle_insn (void)
9508 if (temp_dfa_state == NULL)
9510 dfa_state_size = state_size ();
9511 temp_dfa_state = xmalloc (dfa_state_size);
9512 prev_cycle_state = xmalloc (dfa_state_size);
9514 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
9515 SET_PREV_INSN (dfa_pre_cycle_insn) = SET_NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
9516 recog_memoized (dfa_pre_cycle_insn);
9517 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
9518 SET_PREV_INSN (dfa_stop_insn) = SET_NEXT_INSN (dfa_stop_insn) = NULL_RTX;
9519 recog_memoized (dfa_stop_insn);
9522 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
9523 used by the DFA insn scheduler. */
9525 static rtx
9526 ia64_dfa_pre_cycle_insn (void)
9528 return dfa_pre_cycle_insn;
9531 /* The following function returns TRUE if PRODUCER (of type ilog or
9532 ld) produces address for CONSUMER (of type st or stf). */
9535 ia64_st_address_bypass_p (rtx_insn *producer, rtx_insn *consumer)
9537 rtx dest, reg, mem;
9539 gcc_assert (producer && consumer);
9540 dest = ia64_single_set (producer);
9541 gcc_assert (dest);
9542 reg = SET_DEST (dest);
9543 gcc_assert (reg);
9544 if (GET_CODE (reg) == SUBREG)
9545 reg = SUBREG_REG (reg);
9546 gcc_assert (GET_CODE (reg) == REG);
9548 dest = ia64_single_set (consumer);
9549 gcc_assert (dest);
9550 mem = SET_DEST (dest);
9551 gcc_assert (mem && GET_CODE (mem) == MEM);
9552 return reg_mentioned_p (reg, mem);
9555 /* The following function returns TRUE if PRODUCER (of type ilog or
9556 ld) produces address for CONSUMER (of type ld or fld). */
9559 ia64_ld_address_bypass_p (rtx_insn *producer, rtx_insn *consumer)
9561 rtx dest, src, reg, mem;
9563 gcc_assert (producer && consumer);
9564 dest = ia64_single_set (producer);
9565 gcc_assert (dest);
9566 reg = SET_DEST (dest);
9567 gcc_assert (reg);
9568 if (GET_CODE (reg) == SUBREG)
9569 reg = SUBREG_REG (reg);
9570 gcc_assert (GET_CODE (reg) == REG);
9572 src = ia64_single_set (consumer);
9573 gcc_assert (src);
9574 mem = SET_SRC (src);
9575 gcc_assert (mem);
9577 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
9578 mem = XVECEXP (mem, 0, 0);
9579 else if (GET_CODE (mem) == IF_THEN_ELSE)
9580 /* ??? Is this bypass necessary for ld.c? */
9582 gcc_assert (XINT (XEXP (XEXP (mem, 0), 0), 1) == UNSPEC_LDCCLR);
9583 mem = XEXP (mem, 1);
9586 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
9587 mem = XEXP (mem, 0);
9589 if (GET_CODE (mem) == UNSPEC)
9591 int c = XINT (mem, 1);
9593 gcc_assert (c == UNSPEC_LDA || c == UNSPEC_LDS || c == UNSPEC_LDS_A
9594 || c == UNSPEC_LDSA);
9595 mem = XVECEXP (mem, 0, 0);
9598 /* Note that LO_SUM is used for GOT loads. */
9599 gcc_assert (GET_CODE (mem) == LO_SUM || GET_CODE (mem) == MEM);
9601 return reg_mentioned_p (reg, mem);
9604 /* The following function returns TRUE if INSN produces address for a
9605 load/store insn. We will place such insns into M slot because it
9606 decreases its latency time. */
9609 ia64_produce_address_p (rtx insn)
9611 return insn->call;
9615 /* Emit pseudo-ops for the assembler to describe predicate relations.
9616 At present this assumes that we only consider predicate pairs to
9617 be mutex, and that the assembler can deduce proper values from
9618 straight-line code. */
9620 static void
9621 emit_predicate_relation_info (void)
9623 basic_block bb;
9625 FOR_EACH_BB_REVERSE_FN (bb, cfun)
9627 int r;
9628 rtx_insn *head = BB_HEAD (bb);
9630 /* We only need such notes at code labels. */
9631 if (! LABEL_P (head))
9632 continue;
9633 if (NOTE_INSN_BASIC_BLOCK_P (NEXT_INSN (head)))
9634 head = NEXT_INSN (head);
9636 /* Skip p0, which may be thought to be live due to (reg:DI p0)
9637 grabbing the entire block of predicate registers. */
9638 for (r = PR_REG (2); r < PR_REG (64); r += 2)
9639 if (REGNO_REG_SET_P (df_get_live_in (bb), r))
9641 rtx p = gen_rtx_REG (BImode, r);
9642 rtx_insn *n = emit_insn_after (gen_pred_rel_mutex (p), head);
9643 if (head == BB_END (bb))
9644 BB_END (bb) = n;
9645 head = n;
9649 /* Look for conditional calls that do not return, and protect predicate
9650 relations around them. Otherwise the assembler will assume the call
9651 returns, and complain about uses of call-clobbered predicates after
9652 the call. */
9653 FOR_EACH_BB_REVERSE_FN (bb, cfun)
9655 rtx_insn *insn = BB_HEAD (bb);
9657 while (1)
9659 if (CALL_P (insn)
9660 && GET_CODE (PATTERN (insn)) == COND_EXEC
9661 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
9663 rtx_insn *b =
9664 emit_insn_before (gen_safe_across_calls_all (), insn);
9665 rtx_insn *a = emit_insn_after (gen_safe_across_calls_normal (), insn);
9666 if (BB_HEAD (bb) == insn)
9667 BB_HEAD (bb) = b;
9668 if (BB_END (bb) == insn)
9669 BB_END (bb) = a;
9672 if (insn == BB_END (bb))
9673 break;
9674 insn = NEXT_INSN (insn);
9679 /* Perform machine dependent operations on the rtl chain INSNS. */
9681 static void
9682 ia64_reorg (void)
9684 /* We are freeing block_for_insn in the toplev to keep compatibility
9685 with old MDEP_REORGS that are not CFG based. Recompute it now. */
9686 compute_bb_for_insn ();
9688 /* If optimizing, we'll have split before scheduling. */
9689 if (optimize == 0)
9690 split_all_insns ();
9692 if (optimize && flag_schedule_insns_after_reload
9693 && dbg_cnt (ia64_sched2))
9695 basic_block bb;
9696 timevar_push (TV_SCHED2);
9697 ia64_final_schedule = 1;
9699 /* We can't let modulo-sched prevent us from scheduling any bbs,
9700 since we need the final schedule to produce bundle information. */
9701 FOR_EACH_BB_FN (bb, cfun)
9702 bb->flags &= ~BB_DISABLE_SCHEDULE;
9704 initiate_bundle_states ();
9705 ia64_nop = make_insn_raw (gen_nop ());
9706 SET_PREV_INSN (ia64_nop) = SET_NEXT_INSN (ia64_nop) = NULL_RTX;
9707 recog_memoized (ia64_nop);
9708 clocks_length = get_max_uid () + 1;
9709 stops_p = XCNEWVEC (char, clocks_length);
9711 if (ia64_tune == PROCESSOR_ITANIUM2)
9713 pos_1 = get_cpu_unit_code ("2_1");
9714 pos_2 = get_cpu_unit_code ("2_2");
9715 pos_3 = get_cpu_unit_code ("2_3");
9716 pos_4 = get_cpu_unit_code ("2_4");
9717 pos_5 = get_cpu_unit_code ("2_5");
9718 pos_6 = get_cpu_unit_code ("2_6");
9719 _0mii_ = get_cpu_unit_code ("2b_0mii.");
9720 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
9721 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
9722 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
9723 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
9724 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
9725 _0mib_ = get_cpu_unit_code ("2b_0mib.");
9726 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
9727 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
9728 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
9729 _1mii_ = get_cpu_unit_code ("2b_1mii.");
9730 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
9731 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
9732 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
9733 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
9734 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
9735 _1mib_ = get_cpu_unit_code ("2b_1mib.");
9736 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
9737 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
9738 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
9740 else
9742 pos_1 = get_cpu_unit_code ("1_1");
9743 pos_2 = get_cpu_unit_code ("1_2");
9744 pos_3 = get_cpu_unit_code ("1_3");
9745 pos_4 = get_cpu_unit_code ("1_4");
9746 pos_5 = get_cpu_unit_code ("1_5");
9747 pos_6 = get_cpu_unit_code ("1_6");
9748 _0mii_ = get_cpu_unit_code ("1b_0mii.");
9749 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
9750 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
9751 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
9752 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
9753 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
9754 _0mib_ = get_cpu_unit_code ("1b_0mib.");
9755 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
9756 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
9757 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
9758 _1mii_ = get_cpu_unit_code ("1b_1mii.");
9759 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
9760 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
9761 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
9762 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
9763 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
9764 _1mib_ = get_cpu_unit_code ("1b_1mib.");
9765 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
9766 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
9767 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
9770 if (flag_selective_scheduling2
9771 && !maybe_skip_selective_scheduling ())
9772 run_selective_scheduling ();
9773 else
9774 schedule_ebbs ();
9776 /* Redo alignment computation, as it might gone wrong. */
9777 compute_alignments ();
9779 /* We cannot reuse this one because it has been corrupted by the
9780 evil glat. */
9781 finish_bundle_states ();
9782 free (stops_p);
9783 stops_p = NULL;
9784 emit_insn_group_barriers (dump_file);
9786 ia64_final_schedule = 0;
9787 timevar_pop (TV_SCHED2);
9789 else
9790 emit_all_insn_group_barriers (dump_file);
9792 df_analyze ();
9794 /* A call must not be the last instruction in a function, so that the
9795 return address is still within the function, so that unwinding works
9796 properly. Note that IA-64 differs from dwarf2 on this point. */
9797 if (ia64_except_unwind_info (&global_options) == UI_TARGET)
9799 rtx_insn *insn;
9800 int saw_stop = 0;
9802 insn = get_last_insn ();
9803 if (! INSN_P (insn))
9804 insn = prev_active_insn (insn);
9805 if (insn)
9807 /* Skip over insns that expand to nothing. */
9808 while (NONJUMP_INSN_P (insn)
9809 && get_attr_empty (insn) == EMPTY_YES)
9811 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
9812 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
9813 saw_stop = 1;
9814 insn = prev_active_insn (insn);
9816 if (CALL_P (insn))
9818 if (! saw_stop)
9819 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
9820 emit_insn (gen_break_f ());
9821 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
9826 emit_predicate_relation_info ();
9828 if (flag_var_tracking)
9830 timevar_push (TV_VAR_TRACKING);
9831 variable_tracking_main ();
9832 timevar_pop (TV_VAR_TRACKING);
9834 df_finish_pass (false);
9837 /* Return true if REGNO is used by the epilogue. */
9840 ia64_epilogue_uses (int regno)
9842 switch (regno)
9844 case R_GR (1):
9845 /* With a call to a function in another module, we will write a new
9846 value to "gp". After returning from such a call, we need to make
9847 sure the function restores the original gp-value, even if the
9848 function itself does not use the gp anymore. */
9849 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
9851 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
9852 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
9853 /* For functions defined with the syscall_linkage attribute, all
9854 input registers are marked as live at all function exits. This
9855 prevents the register allocator from using the input registers,
9856 which in turn makes it possible to restart a system call after
9857 an interrupt without having to save/restore the input registers.
9858 This also prevents kernel data from leaking to application code. */
9859 return lookup_attribute ("syscall_linkage",
9860 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
9862 case R_BR (0):
9863 /* Conditional return patterns can't represent the use of `b0' as
9864 the return address, so we force the value live this way. */
9865 return 1;
9867 case AR_PFS_REGNUM:
9868 /* Likewise for ar.pfs, which is used by br.ret. */
9869 return 1;
9871 default:
9872 return 0;
9876 /* Return true if REGNO is used by the frame unwinder. */
9879 ia64_eh_uses (int regno)
9881 unsigned int r;
9883 if (! reload_completed)
9884 return 0;
9886 if (regno == 0)
9887 return 0;
9889 for (r = reg_save_b0; r <= reg_save_ar_lc; r++)
9890 if (regno == current_frame_info.r[r]
9891 || regno == emitted_frame_related_regs[r])
9892 return 1;
9894 return 0;
9897 /* Return true if this goes in small data/bss. */
9899 /* ??? We could also support own long data here. Generating movl/add/ld8
9900 instead of addl,ld8/ld8. This makes the code bigger, but should make the
9901 code faster because there is one less load. This also includes incomplete
9902 types which can't go in sdata/sbss. */
9904 static bool
9905 ia64_in_small_data_p (const_tree exp)
9907 if (TARGET_NO_SDATA)
9908 return false;
9910 /* We want to merge strings, so we never consider them small data. */
9911 if (TREE_CODE (exp) == STRING_CST)
9912 return false;
9914 /* Functions are never small data. */
9915 if (TREE_CODE (exp) == FUNCTION_DECL)
9916 return false;
9918 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
9920 const char *section = DECL_SECTION_NAME (exp);
9922 if (strcmp (section, ".sdata") == 0
9923 || strncmp (section, ".sdata.", 7) == 0
9924 || strncmp (section, ".gnu.linkonce.s.", 16) == 0
9925 || strcmp (section, ".sbss") == 0
9926 || strncmp (section, ".sbss.", 6) == 0
9927 || strncmp (section, ".gnu.linkonce.sb.", 17) == 0)
9928 return true;
9930 else
9932 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
9934 /* If this is an incomplete type with size 0, then we can't put it
9935 in sdata because it might be too big when completed. */
9936 if (size > 0 && size <= ia64_section_threshold)
9937 return true;
9940 return false;
9943 /* Output assembly directives for prologue regions. */
9945 /* The current basic block number. */
9947 static bool last_block;
9949 /* True if we need a copy_state command at the start of the next block. */
9951 static bool need_copy_state;
9953 #ifndef MAX_ARTIFICIAL_LABEL_BYTES
9954 # define MAX_ARTIFICIAL_LABEL_BYTES 30
9955 #endif
9957 /* The function emits unwind directives for the start of an epilogue. */
9959 static void
9960 process_epilogue (FILE *asm_out_file, rtx insn ATTRIBUTE_UNUSED,
9961 bool unwind, bool frame ATTRIBUTE_UNUSED)
9963 /* If this isn't the last block of the function, then we need to label the
9964 current state, and copy it back in at the start of the next block. */
9966 if (!last_block)
9968 if (unwind)
9969 fprintf (asm_out_file, "\t.label_state %d\n",
9970 ++cfun->machine->state_num);
9971 need_copy_state = true;
9974 if (unwind)
9975 fprintf (asm_out_file, "\t.restore sp\n");
9978 /* This function processes a SET pattern for REG_CFA_ADJUST_CFA. */
9980 static void
9981 process_cfa_adjust_cfa (FILE *asm_out_file, rtx pat, rtx insn,
9982 bool unwind, bool frame)
9984 rtx dest = SET_DEST (pat);
9985 rtx src = SET_SRC (pat);
9987 if (dest == stack_pointer_rtx)
9989 if (GET_CODE (src) == PLUS)
9991 rtx op0 = XEXP (src, 0);
9992 rtx op1 = XEXP (src, 1);
9994 gcc_assert (op0 == dest && GET_CODE (op1) == CONST_INT);
9996 if (INTVAL (op1) < 0)
9998 gcc_assert (!frame_pointer_needed);
9999 if (unwind)
10000 fprintf (asm_out_file,
10001 "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
10002 -INTVAL (op1));
10004 else
10005 process_epilogue (asm_out_file, insn, unwind, frame);
10007 else
10009 gcc_assert (src == hard_frame_pointer_rtx);
10010 process_epilogue (asm_out_file, insn, unwind, frame);
10013 else if (dest == hard_frame_pointer_rtx)
10015 gcc_assert (src == stack_pointer_rtx);
10016 gcc_assert (frame_pointer_needed);
10018 if (unwind)
10019 fprintf (asm_out_file, "\t.vframe r%d\n",
10020 ia64_dbx_register_number (REGNO (dest)));
10022 else
10023 gcc_unreachable ();
10026 /* This function processes a SET pattern for REG_CFA_REGISTER. */
10028 static void
10029 process_cfa_register (FILE *asm_out_file, rtx pat, bool unwind)
10031 rtx dest = SET_DEST (pat);
10032 rtx src = SET_SRC (pat);
10033 int dest_regno = REGNO (dest);
10034 int src_regno;
10036 if (src == pc_rtx)
10038 /* Saving return address pointer. */
10039 if (unwind)
10040 fprintf (asm_out_file, "\t.save rp, r%d\n",
10041 ia64_dbx_register_number (dest_regno));
10042 return;
10045 src_regno = REGNO (src);
10047 switch (src_regno)
10049 case PR_REG (0):
10050 gcc_assert (dest_regno == current_frame_info.r[reg_save_pr]);
10051 if (unwind)
10052 fprintf (asm_out_file, "\t.save pr, r%d\n",
10053 ia64_dbx_register_number (dest_regno));
10054 break;
10056 case AR_UNAT_REGNUM:
10057 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_unat]);
10058 if (unwind)
10059 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
10060 ia64_dbx_register_number (dest_regno));
10061 break;
10063 case AR_LC_REGNUM:
10064 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_lc]);
10065 if (unwind)
10066 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
10067 ia64_dbx_register_number (dest_regno));
10068 break;
10070 default:
10071 /* Everything else should indicate being stored to memory. */
10072 gcc_unreachable ();
10076 /* This function processes a SET pattern for REG_CFA_OFFSET. */
10078 static void
10079 process_cfa_offset (FILE *asm_out_file, rtx pat, bool unwind)
10081 rtx dest = SET_DEST (pat);
10082 rtx src = SET_SRC (pat);
10083 int src_regno = REGNO (src);
10084 const char *saveop;
10085 HOST_WIDE_INT off;
10086 rtx base;
10088 gcc_assert (MEM_P (dest));
10089 if (GET_CODE (XEXP (dest, 0)) == REG)
10091 base = XEXP (dest, 0);
10092 off = 0;
10094 else
10096 gcc_assert (GET_CODE (XEXP (dest, 0)) == PLUS
10097 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT);
10098 base = XEXP (XEXP (dest, 0), 0);
10099 off = INTVAL (XEXP (XEXP (dest, 0), 1));
10102 if (base == hard_frame_pointer_rtx)
10104 saveop = ".savepsp";
10105 off = - off;
10107 else
10109 gcc_assert (base == stack_pointer_rtx);
10110 saveop = ".savesp";
10113 src_regno = REGNO (src);
10114 switch (src_regno)
10116 case BR_REG (0):
10117 gcc_assert (!current_frame_info.r[reg_save_b0]);
10118 if (unwind)
10119 fprintf (asm_out_file, "\t%s rp, " HOST_WIDE_INT_PRINT_DEC "\n",
10120 saveop, off);
10121 break;
10123 case PR_REG (0):
10124 gcc_assert (!current_frame_info.r[reg_save_pr]);
10125 if (unwind)
10126 fprintf (asm_out_file, "\t%s pr, " HOST_WIDE_INT_PRINT_DEC "\n",
10127 saveop, off);
10128 break;
10130 case AR_LC_REGNUM:
10131 gcc_assert (!current_frame_info.r[reg_save_ar_lc]);
10132 if (unwind)
10133 fprintf (asm_out_file, "\t%s ar.lc, " HOST_WIDE_INT_PRINT_DEC "\n",
10134 saveop, off);
10135 break;
10137 case AR_PFS_REGNUM:
10138 gcc_assert (!current_frame_info.r[reg_save_ar_pfs]);
10139 if (unwind)
10140 fprintf (asm_out_file, "\t%s ar.pfs, " HOST_WIDE_INT_PRINT_DEC "\n",
10141 saveop, off);
10142 break;
10144 case AR_UNAT_REGNUM:
10145 gcc_assert (!current_frame_info.r[reg_save_ar_unat]);
10146 if (unwind)
10147 fprintf (asm_out_file, "\t%s ar.unat, " HOST_WIDE_INT_PRINT_DEC "\n",
10148 saveop, off);
10149 break;
10151 case GR_REG (4):
10152 case GR_REG (5):
10153 case GR_REG (6):
10154 case GR_REG (7):
10155 if (unwind)
10156 fprintf (asm_out_file, "\t.save.g 0x%x\n",
10157 1 << (src_regno - GR_REG (4)));
10158 break;
10160 case BR_REG (1):
10161 case BR_REG (2):
10162 case BR_REG (3):
10163 case BR_REG (4):
10164 case BR_REG (5):
10165 if (unwind)
10166 fprintf (asm_out_file, "\t.save.b 0x%x\n",
10167 1 << (src_regno - BR_REG (1)));
10168 break;
10170 case FR_REG (2):
10171 case FR_REG (3):
10172 case FR_REG (4):
10173 case FR_REG (5):
10174 if (unwind)
10175 fprintf (asm_out_file, "\t.save.f 0x%x\n",
10176 1 << (src_regno - FR_REG (2)));
10177 break;
10179 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
10180 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
10181 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
10182 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
10183 if (unwind)
10184 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
10185 1 << (src_regno - FR_REG (12)));
10186 break;
10188 default:
10189 /* ??? For some reason we mark other general registers, even those
10190 we can't represent in the unwind info. Ignore them. */
10191 break;
10195 /* This function looks at a single insn and emits any directives
10196 required to unwind this insn. */
10198 static void
10199 ia64_asm_unwind_emit (FILE *asm_out_file, rtx_insn *insn)
10201 bool unwind = ia64_except_unwind_info (&global_options) == UI_TARGET;
10202 bool frame = dwarf2out_do_frame ();
10203 rtx note, pat;
10204 bool handled_one;
10206 if (!unwind && !frame)
10207 return;
10209 if (NOTE_INSN_BASIC_BLOCK_P (insn))
10211 last_block = NOTE_BASIC_BLOCK (insn)->next_bb
10212 == EXIT_BLOCK_PTR_FOR_FN (cfun);
10214 /* Restore unwind state from immediately before the epilogue. */
10215 if (need_copy_state)
10217 if (unwind)
10219 fprintf (asm_out_file, "\t.body\n");
10220 fprintf (asm_out_file, "\t.copy_state %d\n",
10221 cfun->machine->state_num);
10223 need_copy_state = false;
10227 if (NOTE_P (insn) || ! RTX_FRAME_RELATED_P (insn))
10228 return;
10230 /* Look for the ALLOC insn. */
10231 if (INSN_CODE (insn) == CODE_FOR_alloc)
10233 rtx dest = SET_DEST (XVECEXP (PATTERN (insn), 0, 0));
10234 int dest_regno = REGNO (dest);
10236 /* If this is the final destination for ar.pfs, then this must
10237 be the alloc in the prologue. */
10238 if (dest_regno == current_frame_info.r[reg_save_ar_pfs])
10240 if (unwind)
10241 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
10242 ia64_dbx_register_number (dest_regno));
10244 else
10246 /* This must be an alloc before a sibcall. We must drop the
10247 old frame info. The easiest way to drop the old frame
10248 info is to ensure we had a ".restore sp" directive
10249 followed by a new prologue. If the procedure doesn't
10250 have a memory-stack frame, we'll issue a dummy ".restore
10251 sp" now. */
10252 if (current_frame_info.total_size == 0 && !frame_pointer_needed)
10253 /* if haven't done process_epilogue() yet, do it now */
10254 process_epilogue (asm_out_file, insn, unwind, frame);
10255 if (unwind)
10256 fprintf (asm_out_file, "\t.prologue\n");
10258 return;
10261 handled_one = false;
10262 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
10263 switch (REG_NOTE_KIND (note))
10265 case REG_CFA_ADJUST_CFA:
10266 pat = XEXP (note, 0);
10267 if (pat == NULL)
10268 pat = PATTERN (insn);
10269 process_cfa_adjust_cfa (asm_out_file, pat, insn, unwind, frame);
10270 handled_one = true;
10271 break;
10273 case REG_CFA_OFFSET:
10274 pat = XEXP (note, 0);
10275 if (pat == NULL)
10276 pat = PATTERN (insn);
10277 process_cfa_offset (asm_out_file, pat, unwind);
10278 handled_one = true;
10279 break;
10281 case REG_CFA_REGISTER:
10282 pat = XEXP (note, 0);
10283 if (pat == NULL)
10284 pat = PATTERN (insn);
10285 process_cfa_register (asm_out_file, pat, unwind);
10286 handled_one = true;
10287 break;
10289 case REG_FRAME_RELATED_EXPR:
10290 case REG_CFA_DEF_CFA:
10291 case REG_CFA_EXPRESSION:
10292 case REG_CFA_RESTORE:
10293 case REG_CFA_SET_VDRAP:
10294 /* Not used in the ia64 port. */
10295 gcc_unreachable ();
10297 default:
10298 /* Not a frame-related note. */
10299 break;
10302 /* All REG_FRAME_RELATED_P insns, besides ALLOC, are marked with the
10303 explicit action to take. No guessing required. */
10304 gcc_assert (handled_one);
10307 /* Implement TARGET_ASM_EMIT_EXCEPT_PERSONALITY. */
10309 static void
10310 ia64_asm_emit_except_personality (rtx personality)
10312 fputs ("\t.personality\t", asm_out_file);
10313 output_addr_const (asm_out_file, personality);
10314 fputc ('\n', asm_out_file);
10317 /* Implement TARGET_ASM_INITIALIZE_SECTIONS. */
10319 static void
10320 ia64_asm_init_sections (void)
10322 exception_section = get_unnamed_section (0, output_section_asm_op,
10323 "\t.handlerdata");
10326 /* Implement TARGET_DEBUG_UNWIND_INFO. */
10328 static enum unwind_info_type
10329 ia64_debug_unwind_info (void)
10331 return UI_TARGET;
10334 enum ia64_builtins
10336 IA64_BUILTIN_BSP,
10337 IA64_BUILTIN_COPYSIGNQ,
10338 IA64_BUILTIN_FABSQ,
10339 IA64_BUILTIN_FLUSHRS,
10340 IA64_BUILTIN_INFQ,
10341 IA64_BUILTIN_HUGE_VALQ,
10342 IA64_BUILTIN_max
10345 static GTY(()) tree ia64_builtins[(int) IA64_BUILTIN_max];
10347 void
10348 ia64_init_builtins (void)
10350 tree fpreg_type;
10351 tree float80_type;
10352 tree decl;
10354 /* The __fpreg type. */
10355 fpreg_type = make_node (REAL_TYPE);
10356 TYPE_PRECISION (fpreg_type) = 82;
10357 layout_type (fpreg_type);
10358 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
10360 /* The __float80 type. */
10361 float80_type = make_node (REAL_TYPE);
10362 TYPE_PRECISION (float80_type) = 80;
10363 layout_type (float80_type);
10364 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
10366 /* The __float128 type. */
10367 if (!TARGET_HPUX)
10369 tree ftype;
10370 tree float128_type = make_node (REAL_TYPE);
10372 TYPE_PRECISION (float128_type) = 128;
10373 layout_type (float128_type);
10374 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
10376 /* TFmode support builtins. */
10377 ftype = build_function_type_list (float128_type, NULL_TREE);
10378 decl = add_builtin_function ("__builtin_infq", ftype,
10379 IA64_BUILTIN_INFQ, BUILT_IN_MD,
10380 NULL, NULL_TREE);
10381 ia64_builtins[IA64_BUILTIN_INFQ] = decl;
10383 decl = add_builtin_function ("__builtin_huge_valq", ftype,
10384 IA64_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
10385 NULL, NULL_TREE);
10386 ia64_builtins[IA64_BUILTIN_HUGE_VALQ] = decl;
10388 ftype = build_function_type_list (float128_type,
10389 float128_type,
10390 NULL_TREE);
10391 decl = add_builtin_function ("__builtin_fabsq", ftype,
10392 IA64_BUILTIN_FABSQ, BUILT_IN_MD,
10393 "__fabstf2", NULL_TREE);
10394 TREE_READONLY (decl) = 1;
10395 ia64_builtins[IA64_BUILTIN_FABSQ] = decl;
10397 ftype = build_function_type_list (float128_type,
10398 float128_type,
10399 float128_type,
10400 NULL_TREE);
10401 decl = add_builtin_function ("__builtin_copysignq", ftype,
10402 IA64_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
10403 "__copysigntf3", NULL_TREE);
10404 TREE_READONLY (decl) = 1;
10405 ia64_builtins[IA64_BUILTIN_COPYSIGNQ] = decl;
10407 else
10408 /* Under HPUX, this is a synonym for "long double". */
10409 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
10410 "__float128");
10412 /* Fwrite on VMS is non-standard. */
10413 #if TARGET_ABI_OPEN_VMS
10414 vms_patch_builtins ();
10415 #endif
10417 #define def_builtin(name, type, code) \
10418 add_builtin_function ((name), (type), (code), BUILT_IN_MD, \
10419 NULL, NULL_TREE)
10421 decl = def_builtin ("__builtin_ia64_bsp",
10422 build_function_type_list (ptr_type_node, NULL_TREE),
10423 IA64_BUILTIN_BSP);
10424 ia64_builtins[IA64_BUILTIN_BSP] = decl;
10426 decl = def_builtin ("__builtin_ia64_flushrs",
10427 build_function_type_list (void_type_node, NULL_TREE),
10428 IA64_BUILTIN_FLUSHRS);
10429 ia64_builtins[IA64_BUILTIN_FLUSHRS] = decl;
10431 #undef def_builtin
10433 if (TARGET_HPUX)
10435 if ((decl = builtin_decl_explicit (BUILT_IN_FINITE)) != NULL_TREE)
10436 set_user_assembler_name (decl, "_Isfinite");
10437 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEF)) != NULL_TREE)
10438 set_user_assembler_name (decl, "_Isfinitef");
10439 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEL)) != NULL_TREE)
10440 set_user_assembler_name (decl, "_Isfinitef128");
10445 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10446 machine_mode mode ATTRIBUTE_UNUSED,
10447 int ignore ATTRIBUTE_UNUSED)
10449 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10450 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10452 switch (fcode)
10454 case IA64_BUILTIN_BSP:
10455 if (! target || ! register_operand (target, DImode))
10456 target = gen_reg_rtx (DImode);
10457 emit_insn (gen_bsp_value (target));
10458 #ifdef POINTERS_EXTEND_UNSIGNED
10459 target = convert_memory_address (ptr_mode, target);
10460 #endif
10461 return target;
10463 case IA64_BUILTIN_FLUSHRS:
10464 emit_insn (gen_flushrs ());
10465 return const0_rtx;
10467 case IA64_BUILTIN_INFQ:
10468 case IA64_BUILTIN_HUGE_VALQ:
10470 machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
10471 REAL_VALUE_TYPE inf;
10472 rtx tmp;
10474 real_inf (&inf);
10475 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, target_mode);
10477 tmp = validize_mem (force_const_mem (target_mode, tmp));
10479 if (target == 0)
10480 target = gen_reg_rtx (target_mode);
10482 emit_move_insn (target, tmp);
10483 return target;
10486 case IA64_BUILTIN_FABSQ:
10487 case IA64_BUILTIN_COPYSIGNQ:
10488 return expand_call (exp, target, ignore);
10490 default:
10491 gcc_unreachable ();
10494 return NULL_RTX;
10497 /* Return the ia64 builtin for CODE. */
10499 static tree
10500 ia64_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
10502 if (code >= IA64_BUILTIN_max)
10503 return error_mark_node;
10505 return ia64_builtins[code];
10508 /* For the HP-UX IA64 aggregate parameters are passed stored in the
10509 most significant bits of the stack slot. */
10511 enum direction
10512 ia64_hpux_function_arg_padding (machine_mode mode, const_tree type)
10514 /* Exception to normal case for structures/unions/etc. */
10516 if (type && AGGREGATE_TYPE_P (type)
10517 && int_size_in_bytes (type) < UNITS_PER_WORD)
10518 return upward;
10520 /* Fall back to the default. */
10521 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
10524 /* Emit text to declare externally defined variables and functions, because
10525 the Intel assembler does not support undefined externals. */
10527 void
10528 ia64_asm_output_external (FILE *file, tree decl, const char *name)
10530 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
10531 set in order to avoid putting out names that are never really
10532 used. */
10533 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
10535 /* maybe_assemble_visibility will return 1 if the assembler
10536 visibility directive is output. */
10537 int need_visibility = ((*targetm.binds_local_p) (decl)
10538 && maybe_assemble_visibility (decl));
10540 /* GNU as does not need anything here, but the HP linker does
10541 need something for external functions. */
10542 if ((TARGET_HPUX_LD || !TARGET_GNU_AS)
10543 && TREE_CODE (decl) == FUNCTION_DECL)
10544 (*targetm.asm_out.globalize_decl_name) (file, decl);
10545 else if (need_visibility && !TARGET_GNU_AS)
10546 (*targetm.asm_out.globalize_label) (file, name);
10550 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
10551 modes of word_mode and larger. Rename the TFmode libfuncs using the
10552 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
10553 backward compatibility. */
10555 static void
10556 ia64_init_libfuncs (void)
10558 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
10559 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
10560 set_optab_libfunc (smod_optab, SImode, "__modsi3");
10561 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
10563 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
10564 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
10565 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
10566 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
10567 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
10569 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
10570 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
10571 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
10572 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
10573 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
10574 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
10576 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
10577 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
10578 set_conv_libfunc (sfix_optab, TImode, TFmode, "_U_Qfcnvfxt_quad_to_quad");
10579 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
10580 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
10582 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
10583 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
10584 set_conv_libfunc (sfloat_optab, TFmode, TImode, "_U_Qfcnvxf_quad_to_quad");
10585 /* HP-UX 11.23 libc does not have a function for unsigned
10586 SImode-to-TFmode conversion. */
10587 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxuf_dbl_to_quad");
10590 /* Rename all the TFmode libfuncs using the HPUX conventions. */
10592 static void
10593 ia64_hpux_init_libfuncs (void)
10595 ia64_init_libfuncs ();
10597 /* The HP SI millicode division and mod functions expect DI arguments.
10598 By turning them off completely we avoid using both libgcc and the
10599 non-standard millicode routines and use the HP DI millicode routines
10600 instead. */
10602 set_optab_libfunc (sdiv_optab, SImode, 0);
10603 set_optab_libfunc (udiv_optab, SImode, 0);
10604 set_optab_libfunc (smod_optab, SImode, 0);
10605 set_optab_libfunc (umod_optab, SImode, 0);
10607 set_optab_libfunc (sdiv_optab, DImode, "__milli_divI");
10608 set_optab_libfunc (udiv_optab, DImode, "__milli_divU");
10609 set_optab_libfunc (smod_optab, DImode, "__milli_remI");
10610 set_optab_libfunc (umod_optab, DImode, "__milli_remU");
10612 /* HP-UX libc has TF min/max/abs routines in it. */
10613 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
10614 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
10615 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
10617 /* ia64_expand_compare uses this. */
10618 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
10620 /* These should never be used. */
10621 set_optab_libfunc (eq_optab, TFmode, 0);
10622 set_optab_libfunc (ne_optab, TFmode, 0);
10623 set_optab_libfunc (gt_optab, TFmode, 0);
10624 set_optab_libfunc (ge_optab, TFmode, 0);
10625 set_optab_libfunc (lt_optab, TFmode, 0);
10626 set_optab_libfunc (le_optab, TFmode, 0);
10629 /* Rename the division and modulus functions in VMS. */
10631 static void
10632 ia64_vms_init_libfuncs (void)
10634 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10635 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10636 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10637 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10638 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10639 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10640 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10641 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10642 abort_libfunc = init_one_libfunc ("decc$abort");
10643 memcmp_libfunc = init_one_libfunc ("decc$memcmp");
10644 #ifdef MEM_LIBFUNCS_INIT
10645 MEM_LIBFUNCS_INIT;
10646 #endif
10649 /* Rename the TFmode libfuncs available from soft-fp in glibc using
10650 the HPUX conventions. */
10652 static void
10653 ia64_sysv4_init_libfuncs (void)
10655 ia64_init_libfuncs ();
10657 /* These functions are not part of the HPUX TFmode interface. We
10658 use them instead of _U_Qfcmp, which doesn't work the way we
10659 expect. */
10660 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
10661 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
10662 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
10663 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
10664 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
10665 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
10667 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
10668 glibc doesn't have them. */
10671 /* Use soft-fp. */
10673 static void
10674 ia64_soft_fp_init_libfuncs (void)
10678 static bool
10679 ia64_vms_valid_pointer_mode (machine_mode mode)
10681 return (mode == SImode || mode == DImode);
10684 /* For HPUX, it is illegal to have relocations in shared segments. */
10686 static int
10687 ia64_hpux_reloc_rw_mask (void)
10689 return 3;
10692 /* For others, relax this so that relocations to local data goes in
10693 read-only segments, but we still cannot allow global relocations
10694 in read-only segments. */
10696 static int
10697 ia64_reloc_rw_mask (void)
10699 return flag_pic ? 3 : 2;
10702 /* Return the section to use for X. The only special thing we do here
10703 is to honor small data. */
10705 static section *
10706 ia64_select_rtx_section (machine_mode mode, rtx x,
10707 unsigned HOST_WIDE_INT align)
10709 if (GET_MODE_SIZE (mode) > 0
10710 && GET_MODE_SIZE (mode) <= ia64_section_threshold
10711 && !TARGET_NO_SDATA)
10712 return sdata_section;
10713 else
10714 return default_elf_select_rtx_section (mode, x, align);
10717 static unsigned int
10718 ia64_section_type_flags (tree decl, const char *name, int reloc)
10720 unsigned int flags = 0;
10722 if (strcmp (name, ".sdata") == 0
10723 || strncmp (name, ".sdata.", 7) == 0
10724 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
10725 || strncmp (name, ".sdata2.", 8) == 0
10726 || strncmp (name, ".gnu.linkonce.s2.", 17) == 0
10727 || strcmp (name, ".sbss") == 0
10728 || strncmp (name, ".sbss.", 6) == 0
10729 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
10730 flags = SECTION_SMALL;
10732 flags |= default_section_type_flags (decl, name, reloc);
10733 return flags;
10736 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
10737 structure type and that the address of that type should be passed
10738 in out0, rather than in r8. */
10740 static bool
10741 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
10743 tree ret_type = TREE_TYPE (fntype);
10745 /* The Itanium C++ ABI requires that out0, rather than r8, be used
10746 as the structure return address parameter, if the return value
10747 type has a non-trivial copy constructor or destructor. It is not
10748 clear if this same convention should be used for other
10749 programming languages. Until G++ 3.4, we incorrectly used r8 for
10750 these return values. */
10751 return (abi_version_at_least (2)
10752 && ret_type
10753 && TYPE_MODE (ret_type) == BLKmode
10754 && TREE_ADDRESSABLE (ret_type)
10755 && strcmp (lang_hooks.name, "GNU C++") == 0);
10758 /* Output the assembler code for a thunk function. THUNK_DECL is the
10759 declaration for the thunk function itself, FUNCTION is the decl for
10760 the target function. DELTA is an immediate constant offset to be
10761 added to THIS. If VCALL_OFFSET is nonzero, the word at
10762 *(*this + vcall_offset) should be added to THIS. */
10764 static void
10765 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
10766 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
10767 tree function)
10769 rtx this_rtx, funexp;
10770 rtx_insn *insn;
10771 unsigned int this_parmno;
10772 unsigned int this_regno;
10773 rtx delta_rtx;
10775 reload_completed = 1;
10776 epilogue_completed = 1;
10778 /* Set things up as ia64_expand_prologue might. */
10779 last_scratch_gr_reg = 15;
10781 memset (&current_frame_info, 0, sizeof (current_frame_info));
10782 current_frame_info.spill_cfa_off = -16;
10783 current_frame_info.n_input_regs = 1;
10784 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
10786 /* Mark the end of the (empty) prologue. */
10787 emit_note (NOTE_INSN_PROLOGUE_END);
10789 /* Figure out whether "this" will be the first parameter (the
10790 typical case) or the second parameter (as happens when the
10791 virtual function returns certain class objects). */
10792 this_parmno
10793 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
10794 ? 1 : 0);
10795 this_regno = IN_REG (this_parmno);
10796 if (!TARGET_REG_NAMES)
10797 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
10799 this_rtx = gen_rtx_REG (Pmode, this_regno);
10801 /* Apply the constant offset, if required. */
10802 delta_rtx = GEN_INT (delta);
10803 if (TARGET_ILP32)
10805 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
10806 REG_POINTER (tmp) = 1;
10807 if (delta && satisfies_constraint_I (delta_rtx))
10809 emit_insn (gen_ptr_extend_plus_imm (this_rtx, tmp, delta_rtx));
10810 delta = 0;
10812 else
10813 emit_insn (gen_ptr_extend (this_rtx, tmp));
10815 if (delta)
10817 if (!satisfies_constraint_I (delta_rtx))
10819 rtx tmp = gen_rtx_REG (Pmode, 2);
10820 emit_move_insn (tmp, delta_rtx);
10821 delta_rtx = tmp;
10823 emit_insn (gen_adddi3 (this_rtx, this_rtx, delta_rtx));
10826 /* Apply the offset from the vtable, if required. */
10827 if (vcall_offset)
10829 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
10830 rtx tmp = gen_rtx_REG (Pmode, 2);
10832 if (TARGET_ILP32)
10834 rtx t = gen_rtx_REG (ptr_mode, 2);
10835 REG_POINTER (t) = 1;
10836 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this_rtx));
10837 if (satisfies_constraint_I (vcall_offset_rtx))
10839 emit_insn (gen_ptr_extend_plus_imm (tmp, t, vcall_offset_rtx));
10840 vcall_offset = 0;
10842 else
10843 emit_insn (gen_ptr_extend (tmp, t));
10845 else
10846 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
10848 if (vcall_offset)
10850 if (!satisfies_constraint_J (vcall_offset_rtx))
10852 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
10853 emit_move_insn (tmp2, vcall_offset_rtx);
10854 vcall_offset_rtx = tmp2;
10856 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
10859 if (TARGET_ILP32)
10860 emit_insn (gen_zero_extendsidi2 (tmp, gen_rtx_MEM (ptr_mode, tmp)));
10861 else
10862 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
10864 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
10867 /* Generate a tail call to the target function. */
10868 if (! TREE_USED (function))
10870 assemble_external (function);
10871 TREE_USED (function) = 1;
10873 funexp = XEXP (DECL_RTL (function), 0);
10874 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
10875 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
10876 insn = get_last_insn ();
10877 SIBLING_CALL_P (insn) = 1;
10879 /* Code generation for calls relies on splitting. */
10880 reload_completed = 1;
10881 epilogue_completed = 1;
10882 try_split (PATTERN (insn), insn, 0);
10884 emit_barrier ();
10886 /* Run just enough of rest_of_compilation to get the insns emitted.
10887 There's not really enough bulk here to make other passes such as
10888 instruction scheduling worth while. Note that use_thunk calls
10889 assemble_start_function and assemble_end_function. */
10891 emit_all_insn_group_barriers (NULL);
10892 insn = get_insns ();
10893 shorten_branches (insn);
10894 final_start_function (insn, file, 1);
10895 final (insn, file, 1);
10896 final_end_function ();
10898 reload_completed = 0;
10899 epilogue_completed = 0;
10902 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
10904 static rtx
10905 ia64_struct_value_rtx (tree fntype,
10906 int incoming ATTRIBUTE_UNUSED)
10908 if (TARGET_ABI_OPEN_VMS ||
10909 (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype)))
10910 return NULL_RTX;
10911 return gen_rtx_REG (Pmode, GR_REG (8));
10914 static bool
10915 ia64_scalar_mode_supported_p (machine_mode mode)
10917 switch (mode)
10919 case QImode:
10920 case HImode:
10921 case SImode:
10922 case DImode:
10923 case TImode:
10924 return true;
10926 case SFmode:
10927 case DFmode:
10928 case XFmode:
10929 case RFmode:
10930 return true;
10932 case TFmode:
10933 return true;
10935 default:
10936 return false;
10940 static bool
10941 ia64_vector_mode_supported_p (machine_mode mode)
10943 switch (mode)
10945 case V8QImode:
10946 case V4HImode:
10947 case V2SImode:
10948 return true;
10950 case V2SFmode:
10951 return true;
10953 default:
10954 return false;
10958 /* Implement TARGET_LIBGCC_FLOATING_MODE_SUPPORTED_P. */
10960 static bool
10961 ia64_libgcc_floating_mode_supported_p (machine_mode mode)
10963 switch (mode)
10965 case SFmode:
10966 case DFmode:
10967 return true;
10969 case XFmode:
10970 #ifdef IA64_NO_LIBGCC_XFMODE
10971 return false;
10972 #else
10973 return true;
10974 #endif
10976 case TFmode:
10977 #ifdef IA64_NO_LIBGCC_TFMODE
10978 return false;
10979 #else
10980 return true;
10981 #endif
10983 default:
10984 return false;
10988 /* Implement the FUNCTION_PROFILER macro. */
10990 void
10991 ia64_output_function_profiler (FILE *file, int labelno)
10993 bool indirect_call;
10995 /* If the function needs a static chain and the static chain
10996 register is r15, we use an indirect call so as to bypass
10997 the PLT stub in case the executable is dynamically linked,
10998 because the stub clobbers r15 as per 5.3.6 of the psABI.
10999 We don't need to do that in non canonical PIC mode. */
11001 if (cfun->static_chain_decl && !TARGET_NO_PIC && !TARGET_AUTO_PIC)
11003 gcc_assert (STATIC_CHAIN_REGNUM == 15);
11004 indirect_call = true;
11006 else
11007 indirect_call = false;
11009 if (TARGET_GNU_AS)
11010 fputs ("\t.prologue 4, r40\n", file);
11011 else
11012 fputs ("\t.prologue\n\t.save ar.pfs, r40\n", file);
11013 fputs ("\talloc out0 = ar.pfs, 8, 0, 4, 0\n", file);
11015 if (NO_PROFILE_COUNTERS)
11016 fputs ("\tmov out3 = r0\n", file);
11017 else
11019 char buf[20];
11020 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
11022 if (TARGET_AUTO_PIC)
11023 fputs ("\tmovl out3 = @gprel(", file);
11024 else
11025 fputs ("\taddl out3 = @ltoff(", file);
11026 assemble_name (file, buf);
11027 if (TARGET_AUTO_PIC)
11028 fputs (")\n", file);
11029 else
11030 fputs ("), r1\n", file);
11033 if (indirect_call)
11034 fputs ("\taddl r14 = @ltoff(@fptr(_mcount)), r1\n", file);
11035 fputs ("\t;;\n", file);
11037 fputs ("\t.save rp, r42\n", file);
11038 fputs ("\tmov out2 = b0\n", file);
11039 if (indirect_call)
11040 fputs ("\tld8 r14 = [r14]\n\t;;\n", file);
11041 fputs ("\t.body\n", file);
11042 fputs ("\tmov out1 = r1\n", file);
11043 if (indirect_call)
11045 fputs ("\tld8 r16 = [r14], 8\n\t;;\n", file);
11046 fputs ("\tmov b6 = r16\n", file);
11047 fputs ("\tld8 r1 = [r14]\n", file);
11048 fputs ("\tbr.call.sptk.many b0 = b6\n\t;;\n", file);
11050 else
11051 fputs ("\tbr.call.sptk.many b0 = _mcount\n\t;;\n", file);
11054 static GTY(()) rtx mcount_func_rtx;
11055 static rtx
11056 gen_mcount_func_rtx (void)
11058 if (!mcount_func_rtx)
11059 mcount_func_rtx = init_one_libfunc ("_mcount");
11060 return mcount_func_rtx;
11063 void
11064 ia64_profile_hook (int labelno)
11066 rtx label, ip;
11068 if (NO_PROFILE_COUNTERS)
11069 label = const0_rtx;
11070 else
11072 char buf[30];
11073 const char *label_name;
11074 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
11075 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
11076 label = gen_rtx_SYMBOL_REF (Pmode, label_name);
11077 SYMBOL_REF_FLAGS (label) = SYMBOL_FLAG_LOCAL;
11079 ip = gen_reg_rtx (Pmode);
11080 emit_insn (gen_ip_value (ip));
11081 emit_library_call (gen_mcount_func_rtx (), LCT_NORMAL,
11082 VOIDmode, 3,
11083 gen_rtx_REG (Pmode, BR_REG (0)), Pmode,
11084 ip, Pmode,
11085 label, Pmode);
11088 /* Return the mangling of TYPE if it is an extended fundamental type. */
11090 static const char *
11091 ia64_mangle_type (const_tree type)
11093 type = TYPE_MAIN_VARIANT (type);
11095 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
11096 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
11097 return NULL;
11099 /* On HP-UX, "long double" is mangled as "e" so __float128 is
11100 mangled as "e". */
11101 if (!TARGET_HPUX && TYPE_MODE (type) == TFmode)
11102 return "g";
11103 /* On HP-UX, "e" is not available as a mangling of __float80 so use
11104 an extended mangling. Elsewhere, "e" is available since long
11105 double is 80 bits. */
11106 if (TYPE_MODE (type) == XFmode)
11107 return TARGET_HPUX ? "u9__float80" : "e";
11108 if (TYPE_MODE (type) == RFmode)
11109 return "u7__fpreg";
11110 return NULL;
11113 /* Return the diagnostic message string if conversion from FROMTYPE to
11114 TOTYPE is not allowed, NULL otherwise. */
11115 static const char *
11116 ia64_invalid_conversion (const_tree fromtype, const_tree totype)
11118 /* Reject nontrivial conversion to or from __fpreg. */
11119 if (TYPE_MODE (fromtype) == RFmode
11120 && TYPE_MODE (totype) != RFmode
11121 && TYPE_MODE (totype) != VOIDmode)
11122 return N_("invalid conversion from %<__fpreg%>");
11123 if (TYPE_MODE (totype) == RFmode
11124 && TYPE_MODE (fromtype) != RFmode)
11125 return N_("invalid conversion to %<__fpreg%>");
11126 return NULL;
11129 /* Return the diagnostic message string if the unary operation OP is
11130 not permitted on TYPE, NULL otherwise. */
11131 static const char *
11132 ia64_invalid_unary_op (int op, const_tree type)
11134 /* Reject operations on __fpreg other than unary + or &. */
11135 if (TYPE_MODE (type) == RFmode
11136 && op != CONVERT_EXPR
11137 && op != ADDR_EXPR)
11138 return N_("invalid operation on %<__fpreg%>");
11139 return NULL;
11142 /* Return the diagnostic message string if the binary operation OP is
11143 not permitted on TYPE1 and TYPE2, NULL otherwise. */
11144 static const char *
11145 ia64_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
11147 /* Reject operations on __fpreg. */
11148 if (TYPE_MODE (type1) == RFmode || TYPE_MODE (type2) == RFmode)
11149 return N_("invalid operation on %<__fpreg%>");
11150 return NULL;
11153 /* HP-UX version_id attribute.
11154 For object foo, if the version_id is set to 1234 put out an alias
11155 of '.alias foo "foo{1234}" We can't use "foo{1234}" in anything
11156 other than an alias statement because it is an illegal symbol name. */
11158 static tree
11159 ia64_handle_version_id_attribute (tree *node ATTRIBUTE_UNUSED,
11160 tree name ATTRIBUTE_UNUSED,
11161 tree args,
11162 int flags ATTRIBUTE_UNUSED,
11163 bool *no_add_attrs)
11165 tree arg = TREE_VALUE (args);
11167 if (TREE_CODE (arg) != STRING_CST)
11169 error("version attribute is not a string");
11170 *no_add_attrs = true;
11171 return NULL_TREE;
11173 return NULL_TREE;
11176 /* Target hook for c_mode_for_suffix. */
11178 static machine_mode
11179 ia64_c_mode_for_suffix (char suffix)
11181 if (suffix == 'q')
11182 return TFmode;
11183 if (suffix == 'w')
11184 return XFmode;
11186 return VOIDmode;
11189 static GTY(()) rtx ia64_dconst_0_5_rtx;
11192 ia64_dconst_0_5 (void)
11194 if (! ia64_dconst_0_5_rtx)
11196 REAL_VALUE_TYPE rv;
11197 real_from_string (&rv, "0.5");
11198 ia64_dconst_0_5_rtx = const_double_from_real_value (rv, DFmode);
11200 return ia64_dconst_0_5_rtx;
11203 static GTY(()) rtx ia64_dconst_0_375_rtx;
11206 ia64_dconst_0_375 (void)
11208 if (! ia64_dconst_0_375_rtx)
11210 REAL_VALUE_TYPE rv;
11211 real_from_string (&rv, "0.375");
11212 ia64_dconst_0_375_rtx = const_double_from_real_value (rv, DFmode);
11214 return ia64_dconst_0_375_rtx;
11217 static machine_mode
11218 ia64_get_reg_raw_mode (int regno)
11220 if (FR_REGNO_P (regno))
11221 return XFmode;
11222 return default_get_reg_raw_mode(regno);
11225 /* Implement TARGET_MEMBER_TYPE_FORCES_BLK. ??? Might not be needed
11226 anymore. */
11228 bool
11229 ia64_member_type_forces_blk (const_tree, machine_mode mode)
11231 return TARGET_HPUX && mode == TFmode;
11234 /* Always default to .text section until HP-UX linker is fixed. */
11236 ATTRIBUTE_UNUSED static section *
11237 ia64_hpux_function_section (tree decl ATTRIBUTE_UNUSED,
11238 enum node_frequency freq ATTRIBUTE_UNUSED,
11239 bool startup ATTRIBUTE_UNUSED,
11240 bool exit ATTRIBUTE_UNUSED)
11242 return NULL;
11245 /* Construct (set target (vec_select op0 (parallel perm))) and
11246 return true if that's a valid instruction in the active ISA. */
11248 static bool
11249 expand_vselect (rtx target, rtx op0, const unsigned char *perm, unsigned nelt)
11251 rtx rperm[MAX_VECT_LEN], x;
11252 unsigned i;
11254 for (i = 0; i < nelt; ++i)
11255 rperm[i] = GEN_INT (perm[i]);
11257 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
11258 x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
11259 x = gen_rtx_SET (VOIDmode, target, x);
11261 rtx_insn *insn = emit_insn (x);
11262 if (recog_memoized (insn) < 0)
11264 remove_insn (insn);
11265 return false;
11267 return true;
11270 /* Similar, but generate a vec_concat from op0 and op1 as well. */
11272 static bool
11273 expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
11274 const unsigned char *perm, unsigned nelt)
11276 machine_mode v2mode;
11277 rtx x;
11279 v2mode = GET_MODE_2XWIDER_MODE (GET_MODE (op0));
11280 x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
11281 return expand_vselect (target, x, perm, nelt);
11284 /* Try to expand a no-op permutation. */
11286 static bool
11287 expand_vec_perm_identity (struct expand_vec_perm_d *d)
11289 unsigned i, nelt = d->nelt;
11291 for (i = 0; i < nelt; ++i)
11292 if (d->perm[i] != i)
11293 return false;
11295 if (!d->testing_p)
11296 emit_move_insn (d->target, d->op0);
11298 return true;
11301 /* Try to expand D via a shrp instruction. */
11303 static bool
11304 expand_vec_perm_shrp (struct expand_vec_perm_d *d)
11306 unsigned i, nelt = d->nelt, shift, mask;
11307 rtx tmp, hi, lo;
11309 /* ??? Don't force V2SFmode into the integer registers. */
11310 if (d->vmode == V2SFmode)
11311 return false;
11313 mask = (d->one_operand_p ? nelt - 1 : 2 * nelt - 1);
11315 shift = d->perm[0];
11316 if (BYTES_BIG_ENDIAN && shift > nelt)
11317 return false;
11319 for (i = 1; i < nelt; ++i)
11320 if (d->perm[i] != ((shift + i) & mask))
11321 return false;
11323 if (d->testing_p)
11324 return true;
11326 hi = shift < nelt ? d->op1 : d->op0;
11327 lo = shift < nelt ? d->op0 : d->op1;
11329 shift %= nelt;
11331 shift *= GET_MODE_UNIT_SIZE (d->vmode) * BITS_PER_UNIT;
11333 /* We've eliminated the shift 0 case via expand_vec_perm_identity. */
11334 gcc_assert (IN_RANGE (shift, 1, 63));
11336 /* Recall that big-endian elements are numbered starting at the top of
11337 the register. Ideally we'd have a shift-left-pair. But since we
11338 don't, convert to a shift the other direction. */
11339 if (BYTES_BIG_ENDIAN)
11340 shift = 64 - shift;
11342 tmp = gen_reg_rtx (DImode);
11343 hi = gen_lowpart (DImode, hi);
11344 lo = gen_lowpart (DImode, lo);
11345 emit_insn (gen_shrp (tmp, hi, lo, GEN_INT (shift)));
11347 emit_move_insn (d->target, gen_lowpart (d->vmode, tmp));
11348 return true;
11351 /* Try to instantiate D in a single instruction. */
11353 static bool
11354 expand_vec_perm_1 (struct expand_vec_perm_d *d)
11356 unsigned i, nelt = d->nelt;
11357 unsigned char perm2[MAX_VECT_LEN];
11359 /* Try single-operand selections. */
11360 if (d->one_operand_p)
11362 if (expand_vec_perm_identity (d))
11363 return true;
11364 if (expand_vselect (d->target, d->op0, d->perm, nelt))
11365 return true;
11368 /* Try two operand selections. */
11369 if (expand_vselect_vconcat (d->target, d->op0, d->op1, d->perm, nelt))
11370 return true;
11372 /* Recognize interleave style patterns with reversed operands. */
11373 if (!d->one_operand_p)
11375 for (i = 0; i < nelt; ++i)
11377 unsigned e = d->perm[i];
11378 if (e >= nelt)
11379 e -= nelt;
11380 else
11381 e += nelt;
11382 perm2[i] = e;
11385 if (expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
11386 return true;
11389 if (expand_vec_perm_shrp (d))
11390 return true;
11392 /* ??? Look for deposit-like permutations where most of the result
11393 comes from one vector unchanged and the rest comes from a
11394 sequential hunk of the other vector. */
11396 return false;
11399 /* Pattern match broadcast permutations. */
11401 static bool
11402 expand_vec_perm_broadcast (struct expand_vec_perm_d *d)
11404 unsigned i, elt, nelt = d->nelt;
11405 unsigned char perm2[2];
11406 rtx temp;
11407 bool ok;
11409 if (!d->one_operand_p)
11410 return false;
11412 elt = d->perm[0];
11413 for (i = 1; i < nelt; ++i)
11414 if (d->perm[i] != elt)
11415 return false;
11417 switch (d->vmode)
11419 case V2SImode:
11420 case V2SFmode:
11421 /* Implementable by interleave. */
11422 perm2[0] = elt;
11423 perm2[1] = elt + 2;
11424 ok = expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, 2);
11425 gcc_assert (ok);
11426 break;
11428 case V8QImode:
11429 /* Implementable by extract + broadcast. */
11430 if (BYTES_BIG_ENDIAN)
11431 elt = 7 - elt;
11432 elt *= BITS_PER_UNIT;
11433 temp = gen_reg_rtx (DImode);
11434 emit_insn (gen_extzv (temp, gen_lowpart (DImode, d->op0),
11435 GEN_INT (8), GEN_INT (elt)));
11436 emit_insn (gen_mux1_brcst_qi (d->target, gen_lowpart (QImode, temp)));
11437 break;
11439 case V4HImode:
11440 /* Should have been matched directly by vec_select. */
11441 default:
11442 gcc_unreachable ();
11445 return true;
11448 /* A subroutine of ia64_expand_vec_perm_const_1. Try to simplify a
11449 two vector permutation into a single vector permutation by using
11450 an interleave operation to merge the vectors. */
11452 static bool
11453 expand_vec_perm_interleave_2 (struct expand_vec_perm_d *d)
11455 struct expand_vec_perm_d dremap, dfinal;
11456 unsigned char remap[2 * MAX_VECT_LEN];
11457 unsigned contents, i, nelt, nelt2;
11458 unsigned h0, h1, h2, h3;
11459 rtx_insn *seq;
11460 bool ok;
11462 if (d->one_operand_p)
11463 return false;
11465 nelt = d->nelt;
11466 nelt2 = nelt / 2;
11468 /* Examine from whence the elements come. */
11469 contents = 0;
11470 for (i = 0; i < nelt; ++i)
11471 contents |= 1u << d->perm[i];
11473 memset (remap, 0xff, sizeof (remap));
11474 dremap = *d;
11476 h0 = (1u << nelt2) - 1;
11477 h1 = h0 << nelt2;
11478 h2 = h0 << nelt;
11479 h3 = h0 << (nelt + nelt2);
11481 if ((contents & (h0 | h2)) == contents) /* punpck even halves */
11483 for (i = 0; i < nelt; ++i)
11485 unsigned which = i / 2 + (i & 1 ? nelt : 0);
11486 remap[which] = i;
11487 dremap.perm[i] = which;
11490 else if ((contents & (h1 | h3)) == contents) /* punpck odd halves */
11492 for (i = 0; i < nelt; ++i)
11494 unsigned which = i / 2 + nelt2 + (i & 1 ? nelt : 0);
11495 remap[which] = i;
11496 dremap.perm[i] = which;
11499 else if ((contents & 0x5555) == contents) /* mix even elements */
11501 for (i = 0; i < nelt; ++i)
11503 unsigned which = (i & ~1) + (i & 1 ? nelt : 0);
11504 remap[which] = i;
11505 dremap.perm[i] = which;
11508 else if ((contents & 0xaaaa) == contents) /* mix odd elements */
11510 for (i = 0; i < nelt; ++i)
11512 unsigned which = (i | 1) + (i & 1 ? nelt : 0);
11513 remap[which] = i;
11514 dremap.perm[i] = which;
11517 else if (floor_log2 (contents) - ctz_hwi (contents) < (int)nelt) /* shrp */
11519 unsigned shift = ctz_hwi (contents);
11520 for (i = 0; i < nelt; ++i)
11522 unsigned which = (i + shift) & (2 * nelt - 1);
11523 remap[which] = i;
11524 dremap.perm[i] = which;
11527 else
11528 return false;
11530 /* Use the remapping array set up above to move the elements from their
11531 swizzled locations into their final destinations. */
11532 dfinal = *d;
11533 for (i = 0; i < nelt; ++i)
11535 unsigned e = remap[d->perm[i]];
11536 gcc_assert (e < nelt);
11537 dfinal.perm[i] = e;
11539 dfinal.op0 = gen_reg_rtx (dfinal.vmode);
11540 dfinal.op1 = dfinal.op0;
11541 dfinal.one_operand_p = true;
11542 dremap.target = dfinal.op0;
11544 /* Test if the final remap can be done with a single insn. For V4HImode
11545 this *will* succeed. For V8QImode or V2SImode it may not. */
11546 start_sequence ();
11547 ok = expand_vec_perm_1 (&dfinal);
11548 seq = get_insns ();
11549 end_sequence ();
11550 if (!ok)
11551 return false;
11552 if (d->testing_p)
11553 return true;
11555 ok = expand_vec_perm_1 (&dremap);
11556 gcc_assert (ok);
11558 emit_insn (seq);
11559 return true;
11562 /* A subroutine of ia64_expand_vec_perm_const_1. Emit a full V4HImode
11563 constant permutation via two mux2 and a merge. */
11565 static bool
11566 expand_vec_perm_v4hi_5 (struct expand_vec_perm_d *d)
11568 unsigned char perm2[4];
11569 rtx rmask[4];
11570 unsigned i;
11571 rtx t0, t1, mask, x;
11572 bool ok;
11574 if (d->vmode != V4HImode || d->one_operand_p)
11575 return false;
11576 if (d->testing_p)
11577 return true;
11579 for (i = 0; i < 4; ++i)
11581 perm2[i] = d->perm[i] & 3;
11582 rmask[i] = (d->perm[i] & 4 ? const0_rtx : constm1_rtx);
11584 mask = gen_rtx_CONST_VECTOR (V4HImode, gen_rtvec_v (4, rmask));
11585 mask = force_reg (V4HImode, mask);
11587 t0 = gen_reg_rtx (V4HImode);
11588 t1 = gen_reg_rtx (V4HImode);
11590 ok = expand_vselect (t0, d->op0, perm2, 4);
11591 gcc_assert (ok);
11592 ok = expand_vselect (t1, d->op1, perm2, 4);
11593 gcc_assert (ok);
11595 x = gen_rtx_AND (V4HImode, mask, t0);
11596 emit_insn (gen_rtx_SET (VOIDmode, t0, x));
11598 x = gen_rtx_NOT (V4HImode, mask);
11599 x = gen_rtx_AND (V4HImode, x, t1);
11600 emit_insn (gen_rtx_SET (VOIDmode, t1, x));
11602 x = gen_rtx_IOR (V4HImode, t0, t1);
11603 emit_insn (gen_rtx_SET (VOIDmode, d->target, x));
11605 return true;
11608 /* The guts of ia64_expand_vec_perm_const, also used by the ok hook.
11609 With all of the interface bits taken care of, perform the expansion
11610 in D and return true on success. */
11612 static bool
11613 ia64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
11615 if (expand_vec_perm_1 (d))
11616 return true;
11617 if (expand_vec_perm_broadcast (d))
11618 return true;
11619 if (expand_vec_perm_interleave_2 (d))
11620 return true;
11621 if (expand_vec_perm_v4hi_5 (d))
11622 return true;
11623 return false;
11626 bool
11627 ia64_expand_vec_perm_const (rtx operands[4])
11629 struct expand_vec_perm_d d;
11630 unsigned char perm[MAX_VECT_LEN];
11631 int i, nelt, which;
11632 rtx sel;
11634 d.target = operands[0];
11635 d.op0 = operands[1];
11636 d.op1 = operands[2];
11637 sel = operands[3];
11639 d.vmode = GET_MODE (d.target);
11640 gcc_assert (VECTOR_MODE_P (d.vmode));
11641 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
11642 d.testing_p = false;
11644 gcc_assert (GET_CODE (sel) == CONST_VECTOR);
11645 gcc_assert (XVECLEN (sel, 0) == nelt);
11646 gcc_checking_assert (sizeof (d.perm) == sizeof (perm));
11648 for (i = which = 0; i < nelt; ++i)
11650 rtx e = XVECEXP (sel, 0, i);
11651 int ei = INTVAL (e) & (2 * nelt - 1);
11653 which |= (ei < nelt ? 1 : 2);
11654 d.perm[i] = ei;
11655 perm[i] = ei;
11658 switch (which)
11660 default:
11661 gcc_unreachable();
11663 case 3:
11664 if (!rtx_equal_p (d.op0, d.op1))
11666 d.one_operand_p = false;
11667 break;
11670 /* The elements of PERM do not suggest that only the first operand
11671 is used, but both operands are identical. Allow easier matching
11672 of the permutation by folding the permutation into the single
11673 input vector. */
11674 for (i = 0; i < nelt; ++i)
11675 if (d.perm[i] >= nelt)
11676 d.perm[i] -= nelt;
11677 /* FALLTHRU */
11679 case 1:
11680 d.op1 = d.op0;
11681 d.one_operand_p = true;
11682 break;
11684 case 2:
11685 for (i = 0; i < nelt; ++i)
11686 d.perm[i] -= nelt;
11687 d.op0 = d.op1;
11688 d.one_operand_p = true;
11689 break;
11692 if (ia64_expand_vec_perm_const_1 (&d))
11693 return true;
11695 /* If the mask says both arguments are needed, but they are the same,
11696 the above tried to expand with one_operand_p true. If that didn't
11697 work, retry with one_operand_p false, as that's what we used in _ok. */
11698 if (which == 3 && d.one_operand_p)
11700 memcpy (d.perm, perm, sizeof (perm));
11701 d.one_operand_p = false;
11702 return ia64_expand_vec_perm_const_1 (&d);
11705 return false;
11708 /* Implement targetm.vectorize.vec_perm_const_ok. */
11710 static bool
11711 ia64_vectorize_vec_perm_const_ok (machine_mode vmode,
11712 const unsigned char *sel)
11714 struct expand_vec_perm_d d;
11715 unsigned int i, nelt, which;
11716 bool ret;
11718 d.vmode = vmode;
11719 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
11720 d.testing_p = true;
11722 /* Extract the values from the vector CST into the permutation
11723 array in D. */
11724 memcpy (d.perm, sel, nelt);
11725 for (i = which = 0; i < nelt; ++i)
11727 unsigned char e = d.perm[i];
11728 gcc_assert (e < 2 * nelt);
11729 which |= (e < nelt ? 1 : 2);
11732 /* For all elements from second vector, fold the elements to first. */
11733 if (which == 2)
11734 for (i = 0; i < nelt; ++i)
11735 d.perm[i] -= nelt;
11737 /* Check whether the mask can be applied to the vector type. */
11738 d.one_operand_p = (which != 3);
11740 /* Otherwise we have to go through the motions and see if we can
11741 figure out how to generate the requested permutation. */
11742 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
11743 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
11744 if (!d.one_operand_p)
11745 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
11747 start_sequence ();
11748 ret = ia64_expand_vec_perm_const_1 (&d);
11749 end_sequence ();
11751 return ret;
11754 void
11755 ia64_expand_vec_setv2sf (rtx operands[3])
11757 struct expand_vec_perm_d d;
11758 unsigned int which;
11759 bool ok;
11761 d.target = operands[0];
11762 d.op0 = operands[0];
11763 d.op1 = gen_reg_rtx (V2SFmode);
11764 d.vmode = V2SFmode;
11765 d.nelt = 2;
11766 d.one_operand_p = false;
11767 d.testing_p = false;
11769 which = INTVAL (operands[2]);
11770 gcc_assert (which <= 1);
11771 d.perm[0] = 1 - which;
11772 d.perm[1] = which + 2;
11774 emit_insn (gen_fpack (d.op1, operands[1], CONST0_RTX (SFmode)));
11776 ok = ia64_expand_vec_perm_const_1 (&d);
11777 gcc_assert (ok);
11780 void
11781 ia64_expand_vec_perm_even_odd (rtx target, rtx op0, rtx op1, int odd)
11783 struct expand_vec_perm_d d;
11784 machine_mode vmode = GET_MODE (target);
11785 unsigned int i, nelt = GET_MODE_NUNITS (vmode);
11786 bool ok;
11788 d.target = target;
11789 d.op0 = op0;
11790 d.op1 = op1;
11791 d.vmode = vmode;
11792 d.nelt = nelt;
11793 d.one_operand_p = false;
11794 d.testing_p = false;
11796 for (i = 0; i < nelt; ++i)
11797 d.perm[i] = i * 2 + odd;
11799 ok = ia64_expand_vec_perm_const_1 (&d);
11800 gcc_assert (ok);
11803 #include "gt-ia64.h"