* config/ia64/ia64.c (rtx_needs_barrier): Handle
[official-gcc.git] / gcc / config / ia64 / ia64.c
blob5b48eba14f517b0d212330bcc8a3a1cc0bfaddbb
1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
3 Free Software Foundation, Inc.
4 Contributed by James E. Wilson <wilson@cygnus.com> and
5 David Mosberger <davidm@hpl.hp.com>.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "recog.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "except.h"
41 #include "function.h"
42 #include "ggc.h"
43 #include "basic-block.h"
44 #include "toplev.h"
45 #include "sched-int.h"
46 #include "timevar.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "tm_p.h"
50 #include "hashtab.h"
51 #include "langhooks.h"
52 #include "cfglayout.h"
53 #include "tree-gimple.h"
54 #include "intl.h"
55 #include "df.h"
56 #include "debug.h"
57 #include "params.h"
58 #include "dbgcnt.h"
59 #include "tm-constrs.h"
61 /* This is used for communication between ASM_OUTPUT_LABEL and
62 ASM_OUTPUT_LABELREF. */
63 int ia64_asm_output_label = 0;
65 /* Define the information needed to generate branch and scc insns. This is
66 stored from the compare operation. */
67 struct rtx_def * ia64_compare_op0;
68 struct rtx_def * ia64_compare_op1;
70 /* Register names for ia64_expand_prologue. */
71 static const char * const ia64_reg_numbers[96] =
72 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
73 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
74 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
75 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
76 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
77 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
78 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
79 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
80 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
81 "r104","r105","r106","r107","r108","r109","r110","r111",
82 "r112","r113","r114","r115","r116","r117","r118","r119",
83 "r120","r121","r122","r123","r124","r125","r126","r127"};
85 /* ??? These strings could be shared with REGISTER_NAMES. */
86 static const char * const ia64_input_reg_names[8] =
87 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
89 /* ??? These strings could be shared with REGISTER_NAMES. */
90 static const char * const ia64_local_reg_names[80] =
91 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
92 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
93 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
94 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
95 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
96 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
97 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
98 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
99 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
100 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
102 /* ??? These strings could be shared with REGISTER_NAMES. */
103 static const char * const ia64_output_reg_names[8] =
104 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
106 /* Which cpu are we scheduling for. */
107 enum processor_type ia64_tune = PROCESSOR_ITANIUM2;
109 /* Determines whether we run our final scheduling pass or not. We always
110 avoid the normal second scheduling pass. */
111 static int ia64_flag_schedule_insns2;
113 /* Determines whether we run variable tracking in machine dependent
114 reorganization. */
115 static int ia64_flag_var_tracking;
117 /* Variables which are this size or smaller are put in the sdata/sbss
118 sections. */
120 unsigned int ia64_section_threshold;
122 /* The following variable is used by the DFA insn scheduler. The value is
123 TRUE if we do insn bundling instead of insn scheduling. */
124 int bundling_p = 0;
126 enum ia64_frame_regs
128 reg_fp,
129 reg_save_b0,
130 reg_save_pr,
131 reg_save_ar_pfs,
132 reg_save_ar_unat,
133 reg_save_ar_lc,
134 reg_save_gp,
135 number_of_ia64_frame_regs
138 /* Structure to be filled in by ia64_compute_frame_size with register
139 save masks and offsets for the current function. */
141 struct ia64_frame_info
143 HOST_WIDE_INT total_size; /* size of the stack frame, not including
144 the caller's scratch area. */
145 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
146 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
147 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
148 HARD_REG_SET mask; /* mask of saved registers. */
149 unsigned int gr_used_mask; /* mask of registers in use as gr spill
150 registers or long-term scratches. */
151 int n_spilled; /* number of spilled registers. */
152 int r[number_of_ia64_frame_regs]; /* Frame related registers. */
153 int n_input_regs; /* number of input registers used. */
154 int n_local_regs; /* number of local registers used. */
155 int n_output_regs; /* number of output registers used. */
156 int n_rotate_regs; /* number of rotating registers used. */
158 char need_regstk; /* true if a .regstk directive needed. */
159 char initialized; /* true if the data is finalized. */
162 /* Current frame information calculated by ia64_compute_frame_size. */
163 static struct ia64_frame_info current_frame_info;
164 /* The actual registers that are emitted. */
165 static int emitted_frame_related_regs[number_of_ia64_frame_regs];
167 static int ia64_first_cycle_multipass_dfa_lookahead (void);
168 static void ia64_dependencies_evaluation_hook (rtx, rtx);
169 static void ia64_init_dfa_pre_cycle_insn (void);
170 static rtx ia64_dfa_pre_cycle_insn (void);
171 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
172 static bool ia64_first_cycle_multipass_dfa_lookahead_guard_spec (const_rtx);
173 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
174 static void ia64_h_i_d_extended (void);
175 static int ia64_mode_to_int (enum machine_mode);
176 static void ia64_set_sched_flags (spec_info_t);
177 static int ia64_speculate_insn (rtx, ds_t, rtx *);
178 static rtx ia64_gen_spec_insn (rtx, ds_t, int, bool, bool);
179 static bool ia64_needs_block_p (const_rtx);
180 static rtx ia64_gen_check (rtx, rtx, bool);
181 static int ia64_spec_check_p (rtx);
182 static int ia64_spec_check_src_p (rtx);
183 static rtx gen_tls_get_addr (void);
184 static rtx gen_thread_pointer (void);
185 static int find_gr_spill (enum ia64_frame_regs, int);
186 static int next_scratch_gr_reg (void);
187 static void mark_reg_gr_used_mask (rtx, void *);
188 static void ia64_compute_frame_size (HOST_WIDE_INT);
189 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
190 static void finish_spill_pointers (void);
191 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
192 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
193 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
194 static rtx gen_movdi_x (rtx, rtx, rtx);
195 static rtx gen_fr_spill_x (rtx, rtx, rtx);
196 static rtx gen_fr_restore_x (rtx, rtx, rtx);
198 static enum machine_mode hfa_element_mode (const_tree, bool);
199 static void ia64_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
200 tree, int *, int);
201 static int ia64_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
202 tree, bool);
203 static bool ia64_function_ok_for_sibcall (tree, tree);
204 static bool ia64_return_in_memory (const_tree, const_tree);
205 static bool ia64_rtx_costs (rtx, int, int, int *);
206 static int ia64_unspec_may_trap_p (const_rtx, unsigned);
207 static void fix_range (const char *);
208 static bool ia64_handle_option (size_t, const char *, int);
209 static struct machine_function * ia64_init_machine_status (void);
210 static void emit_insn_group_barriers (FILE *);
211 static void emit_all_insn_group_barriers (FILE *);
212 static void final_emit_insn_group_barriers (FILE *);
213 static void emit_predicate_relation_info (void);
214 static void ia64_reorg (void);
215 static bool ia64_in_small_data_p (const_tree);
216 static void process_epilogue (FILE *, rtx, bool, bool);
217 static int process_set (FILE *, rtx, rtx, bool, bool);
219 static bool ia64_assemble_integer (rtx, unsigned int, int);
220 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
221 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
222 static void ia64_output_function_end_prologue (FILE *);
224 static int ia64_issue_rate (void);
225 static int ia64_adjust_cost (rtx, rtx, rtx, int);
226 static void ia64_sched_init (FILE *, int, int);
227 static void ia64_sched_init_global (FILE *, int, int);
228 static void ia64_sched_finish_global (FILE *, int);
229 static void ia64_sched_finish (FILE *, int);
230 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
231 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
232 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
233 static int ia64_variable_issue (FILE *, int, rtx, int);
235 static struct bundle_state *get_free_bundle_state (void);
236 static void free_bundle_state (struct bundle_state *);
237 static void initiate_bundle_states (void);
238 static void finish_bundle_states (void);
239 static unsigned bundle_state_hash (const void *);
240 static int bundle_state_eq_p (const void *, const void *);
241 static int insert_bundle_state (struct bundle_state *);
242 static void initiate_bundle_state_table (void);
243 static void finish_bundle_state_table (void);
244 static int try_issue_nops (struct bundle_state *, int);
245 static int try_issue_insn (struct bundle_state *, rtx);
246 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
247 static int get_max_pos (state_t);
248 static int get_template (state_t, int);
250 static rtx get_next_important_insn (rtx, rtx);
251 static void bundling (FILE *, int, rtx, rtx);
253 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
254 HOST_WIDE_INT, tree);
255 static void ia64_file_start (void);
256 static void ia64_globalize_decl_name (FILE *, tree);
258 static int ia64_hpux_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
259 static int ia64_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
260 static section *ia64_select_rtx_section (enum machine_mode, rtx,
261 unsigned HOST_WIDE_INT);
262 static void ia64_output_dwarf_dtprel (FILE *, int, rtx)
263 ATTRIBUTE_UNUSED;
264 static unsigned int ia64_section_type_flags (tree, const char *, int);
265 static void ia64_init_libfuncs (void)
266 ATTRIBUTE_UNUSED;
267 static void ia64_hpux_init_libfuncs (void)
268 ATTRIBUTE_UNUSED;
269 static void ia64_sysv4_init_libfuncs (void)
270 ATTRIBUTE_UNUSED;
271 static void ia64_vms_init_libfuncs (void)
272 ATTRIBUTE_UNUSED;
274 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
275 static tree ia64_handle_version_id_attribute (tree *, tree, tree, int, bool *);
276 static void ia64_encode_section_info (tree, rtx, int);
277 static rtx ia64_struct_value_rtx (tree, int);
278 static tree ia64_gimplify_va_arg (tree, tree, tree *, tree *);
279 static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
280 static bool ia64_vector_mode_supported_p (enum machine_mode mode);
281 static bool ia64_cannot_force_const_mem (rtx);
282 static const char *ia64_mangle_type (const_tree);
283 static const char *ia64_invalid_conversion (const_tree, const_tree);
284 static const char *ia64_invalid_unary_op (int, const_tree);
285 static const char *ia64_invalid_binary_op (int, const_tree, const_tree);
286 static enum machine_mode ia64_c_mode_for_suffix (char);
288 /* Table of valid machine attributes. */
289 static const struct attribute_spec ia64_attribute_table[] =
291 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
292 { "syscall_linkage", 0, 0, false, true, true, NULL },
293 { "model", 1, 1, true, false, false, ia64_handle_model_attribute },
294 { "version_id", 1, 1, true, false, false,
295 ia64_handle_version_id_attribute },
296 { NULL, 0, 0, false, false, false, NULL }
299 /* Initialize the GCC target structure. */
300 #undef TARGET_ATTRIBUTE_TABLE
301 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
303 #undef TARGET_INIT_BUILTINS
304 #define TARGET_INIT_BUILTINS ia64_init_builtins
306 #undef TARGET_EXPAND_BUILTIN
307 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
309 #undef TARGET_ASM_BYTE_OP
310 #define TARGET_ASM_BYTE_OP "\tdata1\t"
311 #undef TARGET_ASM_ALIGNED_HI_OP
312 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
313 #undef TARGET_ASM_ALIGNED_SI_OP
314 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
315 #undef TARGET_ASM_ALIGNED_DI_OP
316 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
317 #undef TARGET_ASM_UNALIGNED_HI_OP
318 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
319 #undef TARGET_ASM_UNALIGNED_SI_OP
320 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
321 #undef TARGET_ASM_UNALIGNED_DI_OP
322 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
323 #undef TARGET_ASM_INTEGER
324 #define TARGET_ASM_INTEGER ia64_assemble_integer
326 #undef TARGET_ASM_FUNCTION_PROLOGUE
327 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
328 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
329 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
330 #undef TARGET_ASM_FUNCTION_EPILOGUE
331 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
333 #undef TARGET_IN_SMALL_DATA_P
334 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
336 #undef TARGET_SCHED_ADJUST_COST
337 #define TARGET_SCHED_ADJUST_COST ia64_adjust_cost
338 #undef TARGET_SCHED_ISSUE_RATE
339 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
340 #undef TARGET_SCHED_VARIABLE_ISSUE
341 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
342 #undef TARGET_SCHED_INIT
343 #define TARGET_SCHED_INIT ia64_sched_init
344 #undef TARGET_SCHED_FINISH
345 #define TARGET_SCHED_FINISH ia64_sched_finish
346 #undef TARGET_SCHED_INIT_GLOBAL
347 #define TARGET_SCHED_INIT_GLOBAL ia64_sched_init_global
348 #undef TARGET_SCHED_FINISH_GLOBAL
349 #define TARGET_SCHED_FINISH_GLOBAL ia64_sched_finish_global
350 #undef TARGET_SCHED_REORDER
351 #define TARGET_SCHED_REORDER ia64_sched_reorder
352 #undef TARGET_SCHED_REORDER2
353 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
355 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
356 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
358 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
359 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
361 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
362 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
363 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
364 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
366 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
367 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
368 ia64_first_cycle_multipass_dfa_lookahead_guard
370 #undef TARGET_SCHED_DFA_NEW_CYCLE
371 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
373 #undef TARGET_SCHED_H_I_D_EXTENDED
374 #define TARGET_SCHED_H_I_D_EXTENDED ia64_h_i_d_extended
376 #undef TARGET_SCHED_SET_SCHED_FLAGS
377 #define TARGET_SCHED_SET_SCHED_FLAGS ia64_set_sched_flags
379 #undef TARGET_SCHED_SPECULATE_INSN
380 #define TARGET_SCHED_SPECULATE_INSN ia64_speculate_insn
382 #undef TARGET_SCHED_NEEDS_BLOCK_P
383 #define TARGET_SCHED_NEEDS_BLOCK_P ia64_needs_block_p
385 #undef TARGET_SCHED_GEN_CHECK
386 #define TARGET_SCHED_GEN_CHECK ia64_gen_check
388 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC
389 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC\
390 ia64_first_cycle_multipass_dfa_lookahead_guard_spec
392 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
393 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
394 #undef TARGET_ARG_PARTIAL_BYTES
395 #define TARGET_ARG_PARTIAL_BYTES ia64_arg_partial_bytes
397 #undef TARGET_ASM_OUTPUT_MI_THUNK
398 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
399 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
400 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
402 #undef TARGET_ASM_FILE_START
403 #define TARGET_ASM_FILE_START ia64_file_start
405 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
406 #define TARGET_ASM_GLOBALIZE_DECL_NAME ia64_globalize_decl_name
408 #undef TARGET_RTX_COSTS
409 #define TARGET_RTX_COSTS ia64_rtx_costs
410 #undef TARGET_ADDRESS_COST
411 #define TARGET_ADDRESS_COST hook_int_rtx_0
413 #undef TARGET_UNSPEC_MAY_TRAP_P
414 #define TARGET_UNSPEC_MAY_TRAP_P ia64_unspec_may_trap_p
416 #undef TARGET_MACHINE_DEPENDENT_REORG
417 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
419 #undef TARGET_ENCODE_SECTION_INFO
420 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
422 #undef TARGET_SECTION_TYPE_FLAGS
423 #define TARGET_SECTION_TYPE_FLAGS ia64_section_type_flags
425 #ifdef HAVE_AS_TLS
426 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
427 #define TARGET_ASM_OUTPUT_DWARF_DTPREL ia64_output_dwarf_dtprel
428 #endif
430 /* ??? ABI doesn't allow us to define this. */
431 #if 0
432 #undef TARGET_PROMOTE_FUNCTION_ARGS
433 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
434 #endif
436 /* ??? ABI doesn't allow us to define this. */
437 #if 0
438 #undef TARGET_PROMOTE_FUNCTION_RETURN
439 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
440 #endif
442 /* ??? Investigate. */
443 #if 0
444 #undef TARGET_PROMOTE_PROTOTYPES
445 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
446 #endif
448 #undef TARGET_STRUCT_VALUE_RTX
449 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
450 #undef TARGET_RETURN_IN_MEMORY
451 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
452 #undef TARGET_SETUP_INCOMING_VARARGS
453 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
454 #undef TARGET_STRICT_ARGUMENT_NAMING
455 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
456 #undef TARGET_MUST_PASS_IN_STACK
457 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
459 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
460 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
462 #undef TARGET_UNWIND_EMIT
463 #define TARGET_UNWIND_EMIT process_for_unwind_directive
465 #undef TARGET_SCALAR_MODE_SUPPORTED_P
466 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
467 #undef TARGET_VECTOR_MODE_SUPPORTED_P
468 #define TARGET_VECTOR_MODE_SUPPORTED_P ia64_vector_mode_supported_p
470 /* ia64 architecture manual 4.4.7: ... reads, writes, and flushes may occur
471 in an order different from the specified program order. */
472 #undef TARGET_RELAXED_ORDERING
473 #define TARGET_RELAXED_ORDERING true
475 #undef TARGET_DEFAULT_TARGET_FLAGS
476 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
477 #undef TARGET_HANDLE_OPTION
478 #define TARGET_HANDLE_OPTION ia64_handle_option
480 #undef TARGET_CANNOT_FORCE_CONST_MEM
481 #define TARGET_CANNOT_FORCE_CONST_MEM ia64_cannot_force_const_mem
483 #undef TARGET_MANGLE_TYPE
484 #define TARGET_MANGLE_TYPE ia64_mangle_type
486 #undef TARGET_INVALID_CONVERSION
487 #define TARGET_INVALID_CONVERSION ia64_invalid_conversion
488 #undef TARGET_INVALID_UNARY_OP
489 #define TARGET_INVALID_UNARY_OP ia64_invalid_unary_op
490 #undef TARGET_INVALID_BINARY_OP
491 #define TARGET_INVALID_BINARY_OP ia64_invalid_binary_op
493 #undef TARGET_C_MODE_FOR_SUFFIX
494 #define TARGET_C_MODE_FOR_SUFFIX ia64_c_mode_for_suffix
496 struct gcc_target targetm = TARGET_INITIALIZER;
498 typedef enum
500 ADDR_AREA_NORMAL, /* normal address area */
501 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
503 ia64_addr_area;
505 static GTY(()) tree small_ident1;
506 static GTY(()) tree small_ident2;
508 static void
509 init_idents (void)
511 if (small_ident1 == 0)
513 small_ident1 = get_identifier ("small");
514 small_ident2 = get_identifier ("__small__");
518 /* Retrieve the address area that has been chosen for the given decl. */
520 static ia64_addr_area
521 ia64_get_addr_area (tree decl)
523 tree model_attr;
525 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
526 if (model_attr)
528 tree id;
530 init_idents ();
531 id = TREE_VALUE (TREE_VALUE (model_attr));
532 if (id == small_ident1 || id == small_ident2)
533 return ADDR_AREA_SMALL;
535 return ADDR_AREA_NORMAL;
538 static tree
539 ia64_handle_model_attribute (tree *node, tree name, tree args,
540 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
542 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
543 ia64_addr_area area;
544 tree arg, decl = *node;
546 init_idents ();
547 arg = TREE_VALUE (args);
548 if (arg == small_ident1 || arg == small_ident2)
550 addr_area = ADDR_AREA_SMALL;
552 else
554 warning (OPT_Wattributes, "invalid argument of %qs attribute",
555 IDENTIFIER_POINTER (name));
556 *no_add_attrs = true;
559 switch (TREE_CODE (decl))
561 case VAR_DECL:
562 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
563 == FUNCTION_DECL)
564 && !TREE_STATIC (decl))
566 error ("%Jan address area attribute cannot be specified for "
567 "local variables", decl);
568 *no_add_attrs = true;
570 area = ia64_get_addr_area (decl);
571 if (area != ADDR_AREA_NORMAL && addr_area != area)
573 error ("address area of %q+D conflicts with previous "
574 "declaration", decl);
575 *no_add_attrs = true;
577 break;
579 case FUNCTION_DECL:
580 error ("%Jaddress area attribute cannot be specified for functions",
581 decl);
582 *no_add_attrs = true;
583 break;
585 default:
586 warning (OPT_Wattributes, "%qs attribute ignored",
587 IDENTIFIER_POINTER (name));
588 *no_add_attrs = true;
589 break;
592 return NULL_TREE;
595 static void
596 ia64_encode_addr_area (tree decl, rtx symbol)
598 int flags;
600 flags = SYMBOL_REF_FLAGS (symbol);
601 switch (ia64_get_addr_area (decl))
603 case ADDR_AREA_NORMAL: break;
604 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
605 default: gcc_unreachable ();
607 SYMBOL_REF_FLAGS (symbol) = flags;
610 static void
611 ia64_encode_section_info (tree decl, rtx rtl, int first)
613 default_encode_section_info (decl, rtl, first);
615 /* Careful not to prod global register variables. */
616 if (TREE_CODE (decl) == VAR_DECL
617 && GET_CODE (DECL_RTL (decl)) == MEM
618 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
619 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
620 ia64_encode_addr_area (decl, XEXP (rtl, 0));
623 /* Return 1 if the operands of a move are ok. */
626 ia64_move_ok (rtx dst, rtx src)
628 /* If we're under init_recog_no_volatile, we'll not be able to use
629 memory_operand. So check the code directly and don't worry about
630 the validity of the underlying address, which should have been
631 checked elsewhere anyway. */
632 if (GET_CODE (dst) != MEM)
633 return 1;
634 if (GET_CODE (src) == MEM)
635 return 0;
636 if (register_operand (src, VOIDmode))
637 return 1;
639 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
640 if (INTEGRAL_MODE_P (GET_MODE (dst)))
641 return src == const0_rtx;
642 else
643 return satisfies_constraint_G (src);
646 /* Return 1 if the operands are ok for a floating point load pair. */
649 ia64_load_pair_ok (rtx dst, rtx src)
651 if (GET_CODE (dst) != REG || !FP_REGNO_P (REGNO (dst)))
652 return 0;
653 if (GET_CODE (src) != MEM || MEM_VOLATILE_P (src))
654 return 0;
655 switch (GET_CODE (XEXP (src, 0)))
657 case REG:
658 case POST_INC:
659 break;
660 case POST_DEC:
661 return 0;
662 case POST_MODIFY:
664 rtx adjust = XEXP (XEXP (XEXP (src, 0), 1), 1);
666 if (GET_CODE (adjust) != CONST_INT
667 || INTVAL (adjust) != GET_MODE_SIZE (GET_MODE (src)))
668 return 0;
670 break;
671 default:
672 abort ();
674 return 1;
678 addp4_optimize_ok (rtx op1, rtx op2)
680 return (basereg_operand (op1, GET_MODE(op1)) !=
681 basereg_operand (op2, GET_MODE(op2)));
684 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
685 Return the length of the field, or <= 0 on failure. */
688 ia64_depz_field_mask (rtx rop, rtx rshift)
690 unsigned HOST_WIDE_INT op = INTVAL (rop);
691 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
693 /* Get rid of the zero bits we're shifting in. */
694 op >>= shift;
696 /* We must now have a solid block of 1's at bit 0. */
697 return exact_log2 (op + 1);
700 /* Return the TLS model to use for ADDR. */
702 static enum tls_model
703 tls_symbolic_operand_type (rtx addr)
705 enum tls_model tls_kind = 0;
707 if (GET_CODE (addr) == CONST)
709 if (GET_CODE (XEXP (addr, 0)) == PLUS
710 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF)
711 tls_kind = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (addr, 0), 0));
713 else if (GET_CODE (addr) == SYMBOL_REF)
714 tls_kind = SYMBOL_REF_TLS_MODEL (addr);
716 return tls_kind;
719 /* Return true if X is a constant that is valid for some immediate
720 field in an instruction. */
722 bool
723 ia64_legitimate_constant_p (rtx x)
725 switch (GET_CODE (x))
727 case CONST_INT:
728 case LABEL_REF:
729 return true;
731 case CONST_DOUBLE:
732 if (GET_MODE (x) == VOIDmode)
733 return true;
734 return satisfies_constraint_G (x);
736 case CONST:
737 case SYMBOL_REF:
738 /* ??? Short term workaround for PR 28490. We must make the code here
739 match the code in ia64_expand_move and move_operand, even though they
740 are both technically wrong. */
741 if (tls_symbolic_operand_type (x) == 0)
743 HOST_WIDE_INT addend = 0;
744 rtx op = x;
746 if (GET_CODE (op) == CONST
747 && GET_CODE (XEXP (op, 0)) == PLUS
748 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
750 addend = INTVAL (XEXP (XEXP (op, 0), 1));
751 op = XEXP (XEXP (op, 0), 0);
754 if (any_offset_symbol_operand (op, GET_MODE (op))
755 || function_operand (op, GET_MODE (op)))
756 return true;
757 if (aligned_offset_symbol_operand (op, GET_MODE (op)))
758 return (addend & 0x3fff) == 0;
759 return false;
761 return false;
763 case CONST_VECTOR:
765 enum machine_mode mode = GET_MODE (x);
767 if (mode == V2SFmode)
768 return satisfies_constraint_Y (x);
770 return (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
771 && GET_MODE_SIZE (mode) <= 8);
774 default:
775 return false;
779 /* Don't allow TLS addresses to get spilled to memory. */
781 static bool
782 ia64_cannot_force_const_mem (rtx x)
784 return tls_symbolic_operand_type (x) != 0;
787 /* Expand a symbolic constant load. */
789 bool
790 ia64_expand_load_address (rtx dest, rtx src)
792 gcc_assert (GET_CODE (dest) == REG);
794 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
795 having to pointer-extend the value afterward. Other forms of address
796 computation below are also more natural to compute as 64-bit quantities.
797 If we've been given an SImode destination register, change it. */
798 if (GET_MODE (dest) != Pmode)
799 dest = gen_rtx_REG_offset (dest, Pmode, REGNO (dest),
800 byte_lowpart_offset (Pmode, GET_MODE (dest)));
802 if (TARGET_NO_PIC)
803 return false;
804 if (small_addr_symbolic_operand (src, VOIDmode))
805 return false;
807 if (TARGET_AUTO_PIC)
808 emit_insn (gen_load_gprel64 (dest, src));
809 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
810 emit_insn (gen_load_fptr (dest, src));
811 else if (sdata_symbolic_operand (src, VOIDmode))
812 emit_insn (gen_load_gprel (dest, src));
813 else
815 HOST_WIDE_INT addend = 0;
816 rtx tmp;
818 /* We did split constant offsets in ia64_expand_move, and we did try
819 to keep them split in move_operand, but we also allowed reload to
820 rematerialize arbitrary constants rather than spill the value to
821 the stack and reload it. So we have to be prepared here to split
822 them apart again. */
823 if (GET_CODE (src) == CONST)
825 HOST_WIDE_INT hi, lo;
827 hi = INTVAL (XEXP (XEXP (src, 0), 1));
828 lo = ((hi & 0x3fff) ^ 0x2000) - 0x2000;
829 hi = hi - lo;
831 if (lo != 0)
833 addend = lo;
834 src = plus_constant (XEXP (XEXP (src, 0), 0), hi);
838 tmp = gen_rtx_HIGH (Pmode, src);
839 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
840 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
842 tmp = gen_rtx_LO_SUM (Pmode, dest, src);
843 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
845 if (addend)
847 tmp = gen_rtx_PLUS (Pmode, dest, GEN_INT (addend));
848 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
852 return true;
855 static GTY(()) rtx gen_tls_tga;
856 static rtx
857 gen_tls_get_addr (void)
859 if (!gen_tls_tga)
860 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
861 return gen_tls_tga;
864 static GTY(()) rtx thread_pointer_rtx;
865 static rtx
866 gen_thread_pointer (void)
868 if (!thread_pointer_rtx)
869 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
870 return thread_pointer_rtx;
873 static rtx
874 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1,
875 rtx orig_op1, HOST_WIDE_INT addend)
877 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
878 rtx orig_op0 = op0;
879 HOST_WIDE_INT addend_lo, addend_hi;
881 switch (tls_kind)
883 case TLS_MODEL_GLOBAL_DYNAMIC:
884 start_sequence ();
886 tga_op1 = gen_reg_rtx (Pmode);
887 emit_insn (gen_load_dtpmod (tga_op1, op1));
889 tga_op2 = gen_reg_rtx (Pmode);
890 emit_insn (gen_load_dtprel (tga_op2, op1));
892 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
893 LCT_CONST, Pmode, 2, tga_op1,
894 Pmode, tga_op2, Pmode);
896 insns = get_insns ();
897 end_sequence ();
899 if (GET_MODE (op0) != Pmode)
900 op0 = tga_ret;
901 emit_libcall_block (insns, op0, tga_ret, op1);
902 break;
904 case TLS_MODEL_LOCAL_DYNAMIC:
905 /* ??? This isn't the completely proper way to do local-dynamic
906 If the call to __tls_get_addr is used only by a single symbol,
907 then we should (somehow) move the dtprel to the second arg
908 to avoid the extra add. */
909 start_sequence ();
911 tga_op1 = gen_reg_rtx (Pmode);
912 emit_insn (gen_load_dtpmod (tga_op1, op1));
914 tga_op2 = const0_rtx;
916 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
917 LCT_CONST, Pmode, 2, tga_op1,
918 Pmode, tga_op2, Pmode);
920 insns = get_insns ();
921 end_sequence ();
923 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
924 UNSPEC_LD_BASE);
925 tmp = gen_reg_rtx (Pmode);
926 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
928 if (!register_operand (op0, Pmode))
929 op0 = gen_reg_rtx (Pmode);
930 if (TARGET_TLS64)
932 emit_insn (gen_load_dtprel (op0, op1));
933 emit_insn (gen_adddi3 (op0, tmp, op0));
935 else
936 emit_insn (gen_add_dtprel (op0, op1, tmp));
937 break;
939 case TLS_MODEL_INITIAL_EXEC:
940 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
941 addend_hi = addend - addend_lo;
943 op1 = plus_constant (op1, addend_hi);
944 addend = addend_lo;
946 tmp = gen_reg_rtx (Pmode);
947 emit_insn (gen_load_tprel (tmp, op1));
949 if (!register_operand (op0, Pmode))
950 op0 = gen_reg_rtx (Pmode);
951 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
952 break;
954 case TLS_MODEL_LOCAL_EXEC:
955 if (!register_operand (op0, Pmode))
956 op0 = gen_reg_rtx (Pmode);
958 op1 = orig_op1;
959 addend = 0;
960 if (TARGET_TLS64)
962 emit_insn (gen_load_tprel (op0, op1));
963 emit_insn (gen_adddi3 (op0, op0, gen_thread_pointer ()));
965 else
966 emit_insn (gen_add_tprel (op0, op1, gen_thread_pointer ()));
967 break;
969 default:
970 gcc_unreachable ();
973 if (addend)
974 op0 = expand_simple_binop (Pmode, PLUS, op0, GEN_INT (addend),
975 orig_op0, 1, OPTAB_DIRECT);
976 if (orig_op0 == op0)
977 return NULL_RTX;
978 if (GET_MODE (orig_op0) == Pmode)
979 return op0;
980 return gen_lowpart (GET_MODE (orig_op0), op0);
984 ia64_expand_move (rtx op0, rtx op1)
986 enum machine_mode mode = GET_MODE (op0);
988 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
989 op1 = force_reg (mode, op1);
991 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
993 HOST_WIDE_INT addend = 0;
994 enum tls_model tls_kind;
995 rtx sym = op1;
997 if (GET_CODE (op1) == CONST
998 && GET_CODE (XEXP (op1, 0)) == PLUS
999 && GET_CODE (XEXP (XEXP (op1, 0), 1)) == CONST_INT)
1001 addend = INTVAL (XEXP (XEXP (op1, 0), 1));
1002 sym = XEXP (XEXP (op1, 0), 0);
1005 tls_kind = tls_symbolic_operand_type (sym);
1006 if (tls_kind)
1007 return ia64_expand_tls_address (tls_kind, op0, sym, op1, addend);
1009 if (any_offset_symbol_operand (sym, mode))
1010 addend = 0;
1011 else if (aligned_offset_symbol_operand (sym, mode))
1013 HOST_WIDE_INT addend_lo, addend_hi;
1015 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1016 addend_hi = addend - addend_lo;
1018 if (addend_lo != 0)
1020 op1 = plus_constant (sym, addend_hi);
1021 addend = addend_lo;
1023 else
1024 addend = 0;
1026 else
1027 op1 = sym;
1029 if (reload_completed)
1031 /* We really should have taken care of this offset earlier. */
1032 gcc_assert (addend == 0);
1033 if (ia64_expand_load_address (op0, op1))
1034 return NULL_RTX;
1037 if (addend)
1039 rtx subtarget = !can_create_pseudo_p () ? op0 : gen_reg_rtx (mode);
1041 emit_insn (gen_rtx_SET (VOIDmode, subtarget, op1));
1043 op1 = expand_simple_binop (mode, PLUS, subtarget,
1044 GEN_INT (addend), op0, 1, OPTAB_DIRECT);
1045 if (op0 == op1)
1046 return NULL_RTX;
1050 return op1;
1053 /* Split a move from OP1 to OP0 conditional on COND. */
1055 void
1056 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
1058 rtx insn, first = get_last_insn ();
1060 emit_move_insn (op0, op1);
1062 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
1063 if (INSN_P (insn))
1064 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
1065 PATTERN (insn));
1068 /* Split a post-reload TImode or TFmode reference into two DImode
1069 components. This is made extra difficult by the fact that we do
1070 not get any scratch registers to work with, because reload cannot
1071 be prevented from giving us a scratch that overlaps the register
1072 pair involved. So instead, when addressing memory, we tweak the
1073 pointer register up and back down with POST_INCs. Or up and not
1074 back down when we can get away with it.
1076 REVERSED is true when the loads must be done in reversed order
1077 (high word first) for correctness. DEAD is true when the pointer
1078 dies with the second insn we generate and therefore the second
1079 address must not carry a postmodify.
1081 May return an insn which is to be emitted after the moves. */
1083 static rtx
1084 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
1086 rtx fixup = 0;
1088 switch (GET_CODE (in))
1090 case REG:
1091 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
1092 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
1093 break;
1095 case CONST_INT:
1096 case CONST_DOUBLE:
1097 /* Cannot occur reversed. */
1098 gcc_assert (!reversed);
1100 if (GET_MODE (in) != TFmode)
1101 split_double (in, &out[0], &out[1]);
1102 else
1103 /* split_double does not understand how to split a TFmode
1104 quantity into a pair of DImode constants. */
1106 REAL_VALUE_TYPE r;
1107 unsigned HOST_WIDE_INT p[2];
1108 long l[4]; /* TFmode is 128 bits */
1110 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
1111 real_to_target (l, &r, TFmode);
1113 if (FLOAT_WORDS_BIG_ENDIAN)
1115 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
1116 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
1118 else
1120 p[0] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
1121 p[1] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
1123 out[0] = GEN_INT (p[0]);
1124 out[1] = GEN_INT (p[1]);
1126 break;
1128 case MEM:
1130 rtx base = XEXP (in, 0);
1131 rtx offset;
1133 switch (GET_CODE (base))
1135 case REG:
1136 if (!reversed)
1138 out[0] = adjust_automodify_address
1139 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1140 out[1] = adjust_automodify_address
1141 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
1143 else
1145 /* Reversal requires a pre-increment, which can only
1146 be done as a separate insn. */
1147 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
1148 out[0] = adjust_automodify_address
1149 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
1150 out[1] = adjust_address (in, DImode, 0);
1152 break;
1154 case POST_INC:
1155 gcc_assert (!reversed && !dead);
1157 /* Just do the increment in two steps. */
1158 out[0] = adjust_automodify_address (in, DImode, 0, 0);
1159 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1160 break;
1162 case POST_DEC:
1163 gcc_assert (!reversed && !dead);
1165 /* Add 8, subtract 24. */
1166 base = XEXP (base, 0);
1167 out[0] = adjust_automodify_address
1168 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1169 out[1] = adjust_automodify_address
1170 (in, DImode,
1171 gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -24)),
1173 break;
1175 case POST_MODIFY:
1176 gcc_assert (!reversed && !dead);
1178 /* Extract and adjust the modification. This case is
1179 trickier than the others, because we might have an
1180 index register, or we might have a combined offset that
1181 doesn't fit a signed 9-bit displacement field. We can
1182 assume the incoming expression is already legitimate. */
1183 offset = XEXP (base, 1);
1184 base = XEXP (base, 0);
1186 out[0] = adjust_automodify_address
1187 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1189 if (GET_CODE (XEXP (offset, 1)) == REG)
1191 /* Can't adjust the postmodify to match. Emit the
1192 original, then a separate addition insn. */
1193 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1194 fixup = gen_adddi3 (base, base, GEN_INT (-8));
1196 else
1198 gcc_assert (GET_CODE (XEXP (offset, 1)) == CONST_INT);
1199 if (INTVAL (XEXP (offset, 1)) < -256 + 8)
1201 /* Again the postmodify cannot be made to match,
1202 but in this case it's more efficient to get rid
1203 of the postmodify entirely and fix up with an
1204 add insn. */
1205 out[1] = adjust_automodify_address (in, DImode, base, 8);
1206 fixup = gen_adddi3
1207 (base, base, GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
1209 else
1211 /* Combined offset still fits in the displacement field.
1212 (We cannot overflow it at the high end.) */
1213 out[1] = adjust_automodify_address
1214 (in, DImode, gen_rtx_POST_MODIFY
1215 (Pmode, base, gen_rtx_PLUS
1216 (Pmode, base,
1217 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
1221 break;
1223 default:
1224 gcc_unreachable ();
1226 break;
1229 default:
1230 gcc_unreachable ();
1233 return fixup;
1236 /* Split a TImode or TFmode move instruction after reload.
1237 This is used by *movtf_internal and *movti_internal. */
1238 void
1239 ia64_split_tmode_move (rtx operands[])
1241 rtx in[2], out[2], insn;
1242 rtx fixup[2];
1243 bool dead = false;
1244 bool reversed = false;
1246 /* It is possible for reload to decide to overwrite a pointer with
1247 the value it points to. In that case we have to do the loads in
1248 the appropriate order so that the pointer is not destroyed too
1249 early. Also we must not generate a postmodify for that second
1250 load, or rws_access_regno will die. */
1251 if (GET_CODE (operands[1]) == MEM
1252 && reg_overlap_mentioned_p (operands[0], operands[1]))
1254 rtx base = XEXP (operands[1], 0);
1255 while (GET_CODE (base) != REG)
1256 base = XEXP (base, 0);
1258 if (REGNO (base) == REGNO (operands[0]))
1259 reversed = true;
1260 dead = true;
1262 /* Another reason to do the moves in reversed order is if the first
1263 element of the target register pair is also the second element of
1264 the source register pair. */
1265 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1266 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1267 reversed = true;
1269 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1270 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1272 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1273 if (GET_CODE (EXP) == MEM \
1274 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1275 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1276 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1277 REG_NOTES (INSN) = gen_rtx_EXPR_LIST (REG_INC, \
1278 XEXP (XEXP (EXP, 0), 0), \
1279 REG_NOTES (INSN))
1281 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1282 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1283 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1285 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1286 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1287 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1289 if (fixup[0])
1290 emit_insn (fixup[0]);
1291 if (fixup[1])
1292 emit_insn (fixup[1]);
1294 #undef MAYBE_ADD_REG_INC_NOTE
1297 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1298 through memory plus an extra GR scratch register. Except that you can
1299 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1300 SECONDARY_RELOAD_CLASS, but not both.
1302 We got into problems in the first place by allowing a construct like
1303 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1304 This solution attempts to prevent this situation from occurring. When
1305 we see something like the above, we spill the inner register to memory. */
1307 static rtx
1308 spill_xfmode_rfmode_operand (rtx in, int force, enum machine_mode mode)
1310 if (GET_CODE (in) == SUBREG
1311 && GET_MODE (SUBREG_REG (in)) == TImode
1312 && GET_CODE (SUBREG_REG (in)) == REG)
1314 rtx memt = assign_stack_temp (TImode, 16, 0);
1315 emit_move_insn (memt, SUBREG_REG (in));
1316 return adjust_address (memt, mode, 0);
1318 else if (force && GET_CODE (in) == REG)
1320 rtx memx = assign_stack_temp (mode, 16, 0);
1321 emit_move_insn (memx, in);
1322 return memx;
1324 else
1325 return in;
1328 /* Expand the movxf or movrf pattern (MODE says which) with the given
1329 OPERANDS, returning true if the pattern should then invoke
1330 DONE. */
1332 bool
1333 ia64_expand_movxf_movrf (enum machine_mode mode, rtx operands[])
1335 rtx op0 = operands[0];
1337 if (GET_CODE (op0) == SUBREG)
1338 op0 = SUBREG_REG (op0);
1340 /* We must support XFmode loads into general registers for stdarg/vararg,
1341 unprototyped calls, and a rare case where a long double is passed as
1342 an argument after a float HFA fills the FP registers. We split them into
1343 DImode loads for convenience. We also need to support XFmode stores
1344 for the last case. This case does not happen for stdarg/vararg routines,
1345 because we do a block store to memory of unnamed arguments. */
1347 if (GET_CODE (op0) == REG && GR_REGNO_P (REGNO (op0)))
1349 rtx out[2];
1351 /* We're hoping to transform everything that deals with XFmode
1352 quantities and GR registers early in the compiler. */
1353 gcc_assert (can_create_pseudo_p ());
1355 /* Struct to register can just use TImode instead. */
1356 if ((GET_CODE (operands[1]) == SUBREG
1357 && GET_MODE (SUBREG_REG (operands[1])) == TImode)
1358 || (GET_CODE (operands[1]) == REG
1359 && GR_REGNO_P (REGNO (operands[1]))))
1361 rtx op1 = operands[1];
1363 if (GET_CODE (op1) == SUBREG)
1364 op1 = SUBREG_REG (op1);
1365 else
1366 op1 = gen_rtx_REG (TImode, REGNO (op1));
1368 emit_move_insn (gen_rtx_REG (TImode, REGNO (op0)), op1);
1369 return true;
1372 if (GET_CODE (operands[1]) == CONST_DOUBLE)
1374 /* Don't word-swap when reading in the constant. */
1375 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0)),
1376 operand_subword (operands[1], WORDS_BIG_ENDIAN,
1377 0, mode));
1378 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0) + 1),
1379 operand_subword (operands[1], !WORDS_BIG_ENDIAN,
1380 0, mode));
1381 return true;
1384 /* If the quantity is in a register not known to be GR, spill it. */
1385 if (register_operand (operands[1], mode))
1386 operands[1] = spill_xfmode_rfmode_operand (operands[1], 1, mode);
1388 gcc_assert (GET_CODE (operands[1]) == MEM);
1390 /* Don't word-swap when reading in the value. */
1391 out[0] = gen_rtx_REG (DImode, REGNO (op0));
1392 out[1] = gen_rtx_REG (DImode, REGNO (op0) + 1);
1394 emit_move_insn (out[0], adjust_address (operands[1], DImode, 0));
1395 emit_move_insn (out[1], adjust_address (operands[1], DImode, 8));
1396 return true;
1399 if (GET_CODE (operands[1]) == REG && GR_REGNO_P (REGNO (operands[1])))
1401 /* We're hoping to transform everything that deals with XFmode
1402 quantities and GR registers early in the compiler. */
1403 gcc_assert (can_create_pseudo_p ());
1405 /* Op0 can't be a GR_REG here, as that case is handled above.
1406 If op0 is a register, then we spill op1, so that we now have a
1407 MEM operand. This requires creating an XFmode subreg of a TImode reg
1408 to force the spill. */
1409 if (register_operand (operands[0], mode))
1411 rtx op1 = gen_rtx_REG (TImode, REGNO (operands[1]));
1412 op1 = gen_rtx_SUBREG (mode, op1, 0);
1413 operands[1] = spill_xfmode_rfmode_operand (op1, 0, mode);
1416 else
1418 rtx in[2];
1420 gcc_assert (GET_CODE (operands[0]) == MEM);
1422 /* Don't word-swap when writing out the value. */
1423 in[0] = gen_rtx_REG (DImode, REGNO (operands[1]));
1424 in[1] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
1426 emit_move_insn (adjust_address (operands[0], DImode, 0), in[0]);
1427 emit_move_insn (adjust_address (operands[0], DImode, 8), in[1]);
1428 return true;
1432 if (!reload_in_progress && !reload_completed)
1434 operands[1] = spill_xfmode_rfmode_operand (operands[1], 0, mode);
1436 if (GET_MODE (op0) == TImode && GET_CODE (op0) == REG)
1438 rtx memt, memx, in = operands[1];
1439 if (CONSTANT_P (in))
1440 in = validize_mem (force_const_mem (mode, in));
1441 if (GET_CODE (in) == MEM)
1442 memt = adjust_address (in, TImode, 0);
1443 else
1445 memt = assign_stack_temp (TImode, 16, 0);
1446 memx = adjust_address (memt, mode, 0);
1447 emit_move_insn (memx, in);
1449 emit_move_insn (op0, memt);
1450 return true;
1453 if (!ia64_move_ok (operands[0], operands[1]))
1454 operands[1] = force_reg (mode, operands[1]);
1457 return false;
1460 /* Emit comparison instruction if necessary, returning the expression
1461 that holds the compare result in the proper mode. */
1463 static GTY(()) rtx cmptf_libfunc;
1466 ia64_expand_compare (enum rtx_code code, enum machine_mode mode)
1468 rtx op0 = ia64_compare_op0, op1 = ia64_compare_op1;
1469 rtx cmp;
1471 /* If we have a BImode input, then we already have a compare result, and
1472 do not need to emit another comparison. */
1473 if (GET_MODE (op0) == BImode)
1475 gcc_assert ((code == NE || code == EQ) && op1 == const0_rtx);
1476 cmp = op0;
1478 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1479 magic number as its third argument, that indicates what to do.
1480 The return value is an integer to be compared against zero. */
1481 else if (GET_MODE (op0) == TFmode)
1483 enum qfcmp_magic {
1484 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1485 QCMP_UNORD = 2,
1486 QCMP_EQ = 4,
1487 QCMP_LT = 8,
1488 QCMP_GT = 16
1489 } magic;
1490 enum rtx_code ncode;
1491 rtx ret, insns;
1493 gcc_assert (cmptf_libfunc && GET_MODE (op1) == TFmode);
1494 switch (code)
1496 /* 1 = equal, 0 = not equal. Equality operators do
1497 not raise FP_INVALID when given an SNaN operand. */
1498 case EQ: magic = QCMP_EQ; ncode = NE; break;
1499 case NE: magic = QCMP_EQ; ncode = EQ; break;
1500 /* isunordered() from C99. */
1501 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1502 case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
1503 /* Relational operators raise FP_INVALID when given
1504 an SNaN operand. */
1505 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1506 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1507 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1508 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1509 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1510 Expanders for buneq etc. weuld have to be added to ia64.md
1511 for this to be useful. */
1512 default: gcc_unreachable ();
1515 start_sequence ();
1517 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1518 op0, TFmode, op1, TFmode,
1519 GEN_INT (magic), DImode);
1520 cmp = gen_reg_rtx (BImode);
1521 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1522 gen_rtx_fmt_ee (ncode, BImode,
1523 ret, const0_rtx)));
1525 insns = get_insns ();
1526 end_sequence ();
1528 emit_libcall_block (insns, cmp, cmp,
1529 gen_rtx_fmt_ee (code, BImode, op0, op1));
1530 code = NE;
1532 else
1534 cmp = gen_reg_rtx (BImode);
1535 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1536 gen_rtx_fmt_ee (code, BImode, op0, op1)));
1537 code = NE;
1540 return gen_rtx_fmt_ee (code, mode, cmp, const0_rtx);
1543 /* Generate an integral vector comparison. Return true if the condition has
1544 been reversed, and so the sense of the comparison should be inverted. */
1546 static bool
1547 ia64_expand_vecint_compare (enum rtx_code code, enum machine_mode mode,
1548 rtx dest, rtx op0, rtx op1)
1550 bool negate = false;
1551 rtx x;
1553 /* Canonicalize the comparison to EQ, GT, GTU. */
1554 switch (code)
1556 case EQ:
1557 case GT:
1558 case GTU:
1559 break;
1561 case NE:
1562 case LE:
1563 case LEU:
1564 code = reverse_condition (code);
1565 negate = true;
1566 break;
1568 case GE:
1569 case GEU:
1570 code = reverse_condition (code);
1571 negate = true;
1572 /* FALLTHRU */
1574 case LT:
1575 case LTU:
1576 code = swap_condition (code);
1577 x = op0, op0 = op1, op1 = x;
1578 break;
1580 default:
1581 gcc_unreachable ();
1584 /* Unsigned parallel compare is not supported by the hardware. Play some
1585 tricks to turn this into a signed comparison against 0. */
1586 if (code == GTU)
1588 switch (mode)
1590 case V2SImode:
1592 rtx t1, t2, mask;
1594 /* Perform a parallel modulo subtraction. */
1595 t1 = gen_reg_rtx (V2SImode);
1596 emit_insn (gen_subv2si3 (t1, op0, op1));
1598 /* Extract the original sign bit of op0. */
1599 mask = GEN_INT (-0x80000000);
1600 mask = gen_rtx_CONST_VECTOR (V2SImode, gen_rtvec (2, mask, mask));
1601 mask = force_reg (V2SImode, mask);
1602 t2 = gen_reg_rtx (V2SImode);
1603 emit_insn (gen_andv2si3 (t2, op0, mask));
1605 /* XOR it back into the result of the subtraction. This results
1606 in the sign bit set iff we saw unsigned underflow. */
1607 x = gen_reg_rtx (V2SImode);
1608 emit_insn (gen_xorv2si3 (x, t1, t2));
1610 code = GT;
1611 op0 = x;
1612 op1 = CONST0_RTX (mode);
1614 break;
1616 case V8QImode:
1617 case V4HImode:
1618 /* Perform a parallel unsigned saturating subtraction. */
1619 x = gen_reg_rtx (mode);
1620 emit_insn (gen_rtx_SET (VOIDmode, x,
1621 gen_rtx_US_MINUS (mode, op0, op1)));
1623 code = EQ;
1624 op0 = x;
1625 op1 = CONST0_RTX (mode);
1626 negate = !negate;
1627 break;
1629 default:
1630 gcc_unreachable ();
1634 x = gen_rtx_fmt_ee (code, mode, op0, op1);
1635 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
1637 return negate;
1640 /* Emit an integral vector conditional move. */
1642 void
1643 ia64_expand_vecint_cmov (rtx operands[])
1645 enum machine_mode mode = GET_MODE (operands[0]);
1646 enum rtx_code code = GET_CODE (operands[3]);
1647 bool negate;
1648 rtx cmp, x, ot, of;
1650 cmp = gen_reg_rtx (mode);
1651 negate = ia64_expand_vecint_compare (code, mode, cmp,
1652 operands[4], operands[5]);
1654 ot = operands[1+negate];
1655 of = operands[2-negate];
1657 if (ot == CONST0_RTX (mode))
1659 if (of == CONST0_RTX (mode))
1661 emit_move_insn (operands[0], ot);
1662 return;
1665 x = gen_rtx_NOT (mode, cmp);
1666 x = gen_rtx_AND (mode, x, of);
1667 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1669 else if (of == CONST0_RTX (mode))
1671 x = gen_rtx_AND (mode, cmp, ot);
1672 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1674 else
1676 rtx t, f;
1678 t = gen_reg_rtx (mode);
1679 x = gen_rtx_AND (mode, cmp, operands[1+negate]);
1680 emit_insn (gen_rtx_SET (VOIDmode, t, x));
1682 f = gen_reg_rtx (mode);
1683 x = gen_rtx_NOT (mode, cmp);
1684 x = gen_rtx_AND (mode, x, operands[2-negate]);
1685 emit_insn (gen_rtx_SET (VOIDmode, f, x));
1687 x = gen_rtx_IOR (mode, t, f);
1688 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1692 /* Emit an integral vector min or max operation. Return true if all done. */
1694 bool
1695 ia64_expand_vecint_minmax (enum rtx_code code, enum machine_mode mode,
1696 rtx operands[])
1698 rtx xops[6];
1700 /* These four combinations are supported directly. */
1701 if (mode == V8QImode && (code == UMIN || code == UMAX))
1702 return false;
1703 if (mode == V4HImode && (code == SMIN || code == SMAX))
1704 return false;
1706 /* This combination can be implemented with only saturating subtraction. */
1707 if (mode == V4HImode && code == UMAX)
1709 rtx x, tmp = gen_reg_rtx (mode);
1711 x = gen_rtx_US_MINUS (mode, operands[1], operands[2]);
1712 emit_insn (gen_rtx_SET (VOIDmode, tmp, x));
1714 emit_insn (gen_addv4hi3 (operands[0], tmp, operands[2]));
1715 return true;
1718 /* Everything else implemented via vector comparisons. */
1719 xops[0] = operands[0];
1720 xops[4] = xops[1] = operands[1];
1721 xops[5] = xops[2] = operands[2];
1723 switch (code)
1725 case UMIN:
1726 code = LTU;
1727 break;
1728 case UMAX:
1729 code = GTU;
1730 break;
1731 case SMIN:
1732 code = LT;
1733 break;
1734 case SMAX:
1735 code = GT;
1736 break;
1737 default:
1738 gcc_unreachable ();
1740 xops[3] = gen_rtx_fmt_ee (code, VOIDmode, operands[1], operands[2]);
1742 ia64_expand_vecint_cmov (xops);
1743 return true;
1746 /* Emit an integral vector widening sum operations. */
1748 void
1749 ia64_expand_widen_sum (rtx operands[3], bool unsignedp)
1751 rtx l, h, x, s;
1752 enum machine_mode wmode, mode;
1753 rtx (*unpack_l) (rtx, rtx, rtx);
1754 rtx (*unpack_h) (rtx, rtx, rtx);
1755 rtx (*plus) (rtx, rtx, rtx);
1757 wmode = GET_MODE (operands[0]);
1758 mode = GET_MODE (operands[1]);
1760 switch (mode)
1762 case V8QImode:
1763 unpack_l = gen_unpack1_l;
1764 unpack_h = gen_unpack1_h;
1765 plus = gen_addv4hi3;
1766 break;
1767 case V4HImode:
1768 unpack_l = gen_unpack2_l;
1769 unpack_h = gen_unpack2_h;
1770 plus = gen_addv2si3;
1771 break;
1772 default:
1773 gcc_unreachable ();
1776 /* Fill in x with the sign extension of each element in op1. */
1777 if (unsignedp)
1778 x = CONST0_RTX (mode);
1779 else
1781 bool neg;
1783 x = gen_reg_rtx (mode);
1785 neg = ia64_expand_vecint_compare (LT, mode, x, operands[1],
1786 CONST0_RTX (mode));
1787 gcc_assert (!neg);
1790 l = gen_reg_rtx (wmode);
1791 h = gen_reg_rtx (wmode);
1792 s = gen_reg_rtx (wmode);
1794 emit_insn (unpack_l (gen_lowpart (mode, l), operands[1], x));
1795 emit_insn (unpack_h (gen_lowpart (mode, h), operands[1], x));
1796 emit_insn (plus (s, l, operands[2]));
1797 emit_insn (plus (operands[0], h, s));
1800 /* Emit a signed or unsigned V8QI dot product operation. */
1802 void
1803 ia64_expand_dot_prod_v8qi (rtx operands[4], bool unsignedp)
1805 rtx l1, l2, h1, h2, x1, x2, p1, p2, p3, p4, s1, s2, s3;
1807 /* Fill in x1 and x2 with the sign extension of each element. */
1808 if (unsignedp)
1809 x1 = x2 = CONST0_RTX (V8QImode);
1810 else
1812 bool neg;
1814 x1 = gen_reg_rtx (V8QImode);
1815 x2 = gen_reg_rtx (V8QImode);
1817 neg = ia64_expand_vecint_compare (LT, V8QImode, x1, operands[1],
1818 CONST0_RTX (V8QImode));
1819 gcc_assert (!neg);
1820 neg = ia64_expand_vecint_compare (LT, V8QImode, x2, operands[2],
1821 CONST0_RTX (V8QImode));
1822 gcc_assert (!neg);
1825 l1 = gen_reg_rtx (V4HImode);
1826 l2 = gen_reg_rtx (V4HImode);
1827 h1 = gen_reg_rtx (V4HImode);
1828 h2 = gen_reg_rtx (V4HImode);
1830 emit_insn (gen_unpack1_l (gen_lowpart (V8QImode, l1), operands[1], x1));
1831 emit_insn (gen_unpack1_l (gen_lowpart (V8QImode, l2), operands[2], x2));
1832 emit_insn (gen_unpack1_h (gen_lowpart (V8QImode, h1), operands[1], x1));
1833 emit_insn (gen_unpack1_h (gen_lowpart (V8QImode, h2), operands[2], x2));
1835 p1 = gen_reg_rtx (V2SImode);
1836 p2 = gen_reg_rtx (V2SImode);
1837 p3 = gen_reg_rtx (V2SImode);
1838 p4 = gen_reg_rtx (V2SImode);
1839 emit_insn (gen_pmpy2_r (p1, l1, l2));
1840 emit_insn (gen_pmpy2_l (p2, l1, l2));
1841 emit_insn (gen_pmpy2_r (p3, h1, h2));
1842 emit_insn (gen_pmpy2_l (p4, h1, h2));
1844 s1 = gen_reg_rtx (V2SImode);
1845 s2 = gen_reg_rtx (V2SImode);
1846 s3 = gen_reg_rtx (V2SImode);
1847 emit_insn (gen_addv2si3 (s1, p1, p2));
1848 emit_insn (gen_addv2si3 (s2, p3, p4));
1849 emit_insn (gen_addv2si3 (s3, s1, operands[3]));
1850 emit_insn (gen_addv2si3 (operands[0], s2, s3));
1853 /* Emit the appropriate sequence for a call. */
1855 void
1856 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
1857 int sibcall_p)
1859 rtx insn, b0;
1861 addr = XEXP (addr, 0);
1862 addr = convert_memory_address (DImode, addr);
1863 b0 = gen_rtx_REG (DImode, R_BR (0));
1865 /* ??? Should do this for functions known to bind local too. */
1866 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
1868 if (sibcall_p)
1869 insn = gen_sibcall_nogp (addr);
1870 else if (! retval)
1871 insn = gen_call_nogp (addr, b0);
1872 else
1873 insn = gen_call_value_nogp (retval, addr, b0);
1874 insn = emit_call_insn (insn);
1876 else
1878 if (sibcall_p)
1879 insn = gen_sibcall_gp (addr);
1880 else if (! retval)
1881 insn = gen_call_gp (addr, b0);
1882 else
1883 insn = gen_call_value_gp (retval, addr, b0);
1884 insn = emit_call_insn (insn);
1886 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
1889 if (sibcall_p)
1890 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
1893 static void
1894 reg_emitted (enum ia64_frame_regs r)
1896 if (emitted_frame_related_regs[r] == 0)
1897 emitted_frame_related_regs[r] = current_frame_info.r[r];
1898 else
1899 gcc_assert (emitted_frame_related_regs[r] == current_frame_info.r[r]);
1902 static int
1903 get_reg (enum ia64_frame_regs r)
1905 reg_emitted (r);
1906 return current_frame_info.r[r];
1909 static bool
1910 is_emitted (int regno)
1912 enum ia64_frame_regs r;
1914 for (r = reg_fp; r < number_of_ia64_frame_regs; r++)
1915 if (emitted_frame_related_regs[r] == regno)
1916 return true;
1917 return false;
1920 void
1921 ia64_reload_gp (void)
1923 rtx tmp;
1925 if (current_frame_info.r[reg_save_gp])
1927 tmp = gen_rtx_REG (DImode, get_reg (reg_save_gp));
1929 else
1931 HOST_WIDE_INT offset;
1932 rtx offset_r;
1934 offset = (current_frame_info.spill_cfa_off
1935 + current_frame_info.spill_size);
1936 if (frame_pointer_needed)
1938 tmp = hard_frame_pointer_rtx;
1939 offset = -offset;
1941 else
1943 tmp = stack_pointer_rtx;
1944 offset = current_frame_info.total_size - offset;
1947 offset_r = GEN_INT (offset);
1948 if (satisfies_constraint_I (offset_r))
1949 emit_insn (gen_adddi3 (pic_offset_table_rtx, tmp, offset_r));
1950 else
1952 emit_move_insn (pic_offset_table_rtx, offset_r);
1953 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1954 pic_offset_table_rtx, tmp));
1957 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
1960 emit_move_insn (pic_offset_table_rtx, tmp);
1963 void
1964 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
1965 rtx scratch_b, int noreturn_p, int sibcall_p)
1967 rtx insn;
1968 bool is_desc = false;
1970 /* If we find we're calling through a register, then we're actually
1971 calling through a descriptor, so load up the values. */
1972 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
1974 rtx tmp;
1975 bool addr_dead_p;
1977 /* ??? We are currently constrained to *not* use peep2, because
1978 we can legitimately change the global lifetime of the GP
1979 (in the form of killing where previously live). This is
1980 because a call through a descriptor doesn't use the previous
1981 value of the GP, while a direct call does, and we do not
1982 commit to either form until the split here.
1984 That said, this means that we lack precise life info for
1985 whether ADDR is dead after this call. This is not terribly
1986 important, since we can fix things up essentially for free
1987 with the POST_DEC below, but it's nice to not use it when we
1988 can immediately tell it's not necessary. */
1989 addr_dead_p = ((noreturn_p || sibcall_p
1990 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
1991 REGNO (addr)))
1992 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
1994 /* Load the code address into scratch_b. */
1995 tmp = gen_rtx_POST_INC (Pmode, addr);
1996 tmp = gen_rtx_MEM (Pmode, tmp);
1997 emit_move_insn (scratch_r, tmp);
1998 emit_move_insn (scratch_b, scratch_r);
2000 /* Load the GP address. If ADDR is not dead here, then we must
2001 revert the change made above via the POST_INCREMENT. */
2002 if (!addr_dead_p)
2003 tmp = gen_rtx_POST_DEC (Pmode, addr);
2004 else
2005 tmp = addr;
2006 tmp = gen_rtx_MEM (Pmode, tmp);
2007 emit_move_insn (pic_offset_table_rtx, tmp);
2009 is_desc = true;
2010 addr = scratch_b;
2013 if (sibcall_p)
2014 insn = gen_sibcall_nogp (addr);
2015 else if (retval)
2016 insn = gen_call_value_nogp (retval, addr, retaddr);
2017 else
2018 insn = gen_call_nogp (addr, retaddr);
2019 emit_call_insn (insn);
2021 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
2022 ia64_reload_gp ();
2025 /* Expand an atomic operation. We want to perform MEM <CODE>= VAL atomically.
2027 This differs from the generic code in that we know about the zero-extending
2028 properties of cmpxchg, and the zero-extending requirements of ar.ccv. We
2029 also know that ld.acq+cmpxchg.rel equals a full barrier.
2031 The loop we want to generate looks like
2033 cmp_reg = mem;
2034 label:
2035 old_reg = cmp_reg;
2036 new_reg = cmp_reg op val;
2037 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
2038 if (cmp_reg != old_reg)
2039 goto label;
2041 Note that we only do the plain load from memory once. Subsequent
2042 iterations use the value loaded by the compare-and-swap pattern. */
2044 void
2045 ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
2046 rtx old_dst, rtx new_dst)
2048 enum machine_mode mode = GET_MODE (mem);
2049 rtx old_reg, new_reg, cmp_reg, ar_ccv, label;
2050 enum insn_code icode;
2052 /* Special case for using fetchadd. */
2053 if ((mode == SImode || mode == DImode)
2054 && (code == PLUS || code == MINUS)
2055 && fetchadd_operand (val, mode))
2057 if (code == MINUS)
2058 val = GEN_INT (-INTVAL (val));
2060 if (!old_dst)
2061 old_dst = gen_reg_rtx (mode);
2063 emit_insn (gen_memory_barrier ());
2065 if (mode == SImode)
2066 icode = CODE_FOR_fetchadd_acq_si;
2067 else
2068 icode = CODE_FOR_fetchadd_acq_di;
2069 emit_insn (GEN_FCN (icode) (old_dst, mem, val));
2071 if (new_dst)
2073 new_reg = expand_simple_binop (mode, PLUS, old_dst, val, new_dst,
2074 true, OPTAB_WIDEN);
2075 if (new_reg != new_dst)
2076 emit_move_insn (new_dst, new_reg);
2078 return;
2081 /* Because of the volatile mem read, we get an ld.acq, which is the
2082 front half of the full barrier. The end half is the cmpxchg.rel. */
2083 gcc_assert (MEM_VOLATILE_P (mem));
2085 old_reg = gen_reg_rtx (DImode);
2086 cmp_reg = gen_reg_rtx (DImode);
2087 label = gen_label_rtx ();
2089 if (mode != DImode)
2091 val = simplify_gen_subreg (DImode, val, mode, 0);
2092 emit_insn (gen_extend_insn (cmp_reg, mem, DImode, mode, 1));
2094 else
2095 emit_move_insn (cmp_reg, mem);
2097 emit_label (label);
2099 ar_ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
2100 emit_move_insn (old_reg, cmp_reg);
2101 emit_move_insn (ar_ccv, cmp_reg);
2103 if (old_dst)
2104 emit_move_insn (old_dst, gen_lowpart (mode, cmp_reg));
2106 new_reg = cmp_reg;
2107 if (code == NOT)
2109 new_reg = expand_simple_unop (DImode, NOT, new_reg, NULL_RTX, true);
2110 code = AND;
2112 new_reg = expand_simple_binop (DImode, code, new_reg, val, NULL_RTX,
2113 true, OPTAB_DIRECT);
2115 if (mode != DImode)
2116 new_reg = gen_lowpart (mode, new_reg);
2117 if (new_dst)
2118 emit_move_insn (new_dst, new_reg);
2120 switch (mode)
2122 case QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;
2123 case HImode: icode = CODE_FOR_cmpxchg_rel_hi; break;
2124 case SImode: icode = CODE_FOR_cmpxchg_rel_si; break;
2125 case DImode: icode = CODE_FOR_cmpxchg_rel_di; break;
2126 default:
2127 gcc_unreachable ();
2130 emit_insn (GEN_FCN (icode) (cmp_reg, mem, ar_ccv, new_reg));
2132 emit_cmp_and_jump_insns (cmp_reg, old_reg, NE, NULL, DImode, true, label);
2135 /* Begin the assembly file. */
2137 static void
2138 ia64_file_start (void)
2140 /* Variable tracking should be run after all optimizations which change order
2141 of insns. It also needs a valid CFG. This can't be done in
2142 ia64_override_options, because flag_var_tracking is finalized after
2143 that. */
2144 ia64_flag_var_tracking = flag_var_tracking;
2145 flag_var_tracking = 0;
2147 default_file_start ();
2148 emit_safe_across_calls ();
2151 void
2152 emit_safe_across_calls (void)
2154 unsigned int rs, re;
2155 int out_state;
2157 rs = 1;
2158 out_state = 0;
2159 while (1)
2161 while (rs < 64 && call_used_regs[PR_REG (rs)])
2162 rs++;
2163 if (rs >= 64)
2164 break;
2165 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
2166 continue;
2167 if (out_state == 0)
2169 fputs ("\t.pred.safe_across_calls ", asm_out_file);
2170 out_state = 1;
2172 else
2173 fputc (',', asm_out_file);
2174 if (re == rs + 1)
2175 fprintf (asm_out_file, "p%u", rs);
2176 else
2177 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
2178 rs = re + 1;
2180 if (out_state)
2181 fputc ('\n', asm_out_file);
2184 /* Globalize a declaration. */
2186 static void
2187 ia64_globalize_decl_name (FILE * stream, tree decl)
2189 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2190 tree version_attr = lookup_attribute ("version_id", DECL_ATTRIBUTES (decl));
2191 if (version_attr)
2193 tree v = TREE_VALUE (TREE_VALUE (version_attr));
2194 const char *p = TREE_STRING_POINTER (v);
2195 fprintf (stream, "\t.alias %s#, \"%s{%s}\"\n", name, name, p);
2197 targetm.asm_out.globalize_label (stream, name);
2198 if (TREE_CODE (decl) == FUNCTION_DECL)
2199 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "function");
2202 /* Helper function for ia64_compute_frame_size: find an appropriate general
2203 register to spill some special register to. SPECIAL_SPILL_MASK contains
2204 bits in GR0 to GR31 that have already been allocated by this routine.
2205 TRY_LOCALS is true if we should attempt to locate a local regnum. */
2207 static int
2208 find_gr_spill (enum ia64_frame_regs r, int try_locals)
2210 int regno;
2212 if (emitted_frame_related_regs[r] != 0)
2214 regno = emitted_frame_related_regs[r];
2215 if (regno >= LOC_REG (0) && regno < LOC_REG (80 - frame_pointer_needed)
2216 && current_frame_info.n_local_regs < regno - LOC_REG (0) + 1)
2217 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2218 else if (current_function_is_leaf
2219 && regno >= GR_REG (1) && regno <= GR_REG (31))
2220 current_frame_info.gr_used_mask |= 1 << regno;
2222 return regno;
2225 /* If this is a leaf function, first try an otherwise unused
2226 call-clobbered register. */
2227 if (current_function_is_leaf)
2229 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2230 if (! df_regs_ever_live_p (regno)
2231 && call_used_regs[regno]
2232 && ! fixed_regs[regno]
2233 && ! global_regs[regno]
2234 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0
2235 && ! is_emitted (regno))
2237 current_frame_info.gr_used_mask |= 1 << regno;
2238 return regno;
2242 if (try_locals)
2244 regno = current_frame_info.n_local_regs;
2245 /* If there is a frame pointer, then we can't use loc79, because
2246 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
2247 reg_name switching code in ia64_expand_prologue. */
2248 while (regno < (80 - frame_pointer_needed))
2249 if (! is_emitted (LOC_REG (regno++)))
2251 current_frame_info.n_local_regs = regno;
2252 return LOC_REG (regno - 1);
2256 /* Failed to find a general register to spill to. Must use stack. */
2257 return 0;
2260 /* In order to make for nice schedules, we try to allocate every temporary
2261 to a different register. We must of course stay away from call-saved,
2262 fixed, and global registers. We must also stay away from registers
2263 allocated in current_frame_info.gr_used_mask, since those include regs
2264 used all through the prologue.
2266 Any register allocated here must be used immediately. The idea is to
2267 aid scheduling, not to solve data flow problems. */
2269 static int last_scratch_gr_reg;
2271 static int
2272 next_scratch_gr_reg (void)
2274 int i, regno;
2276 for (i = 0; i < 32; ++i)
2278 regno = (last_scratch_gr_reg + i + 1) & 31;
2279 if (call_used_regs[regno]
2280 && ! fixed_regs[regno]
2281 && ! global_regs[regno]
2282 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
2284 last_scratch_gr_reg = regno;
2285 return regno;
2289 /* There must be _something_ available. */
2290 gcc_unreachable ();
2293 /* Helper function for ia64_compute_frame_size, called through
2294 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
2296 static void
2297 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
2299 unsigned int regno = REGNO (reg);
2300 if (regno < 32)
2302 unsigned int i, n = hard_regno_nregs[regno][GET_MODE (reg)];
2303 for (i = 0; i < n; ++i)
2304 current_frame_info.gr_used_mask |= 1 << (regno + i);
2309 /* Returns the number of bytes offset between the frame pointer and the stack
2310 pointer for the current function. SIZE is the number of bytes of space
2311 needed for local variables. */
2313 static void
2314 ia64_compute_frame_size (HOST_WIDE_INT size)
2316 HOST_WIDE_INT total_size;
2317 HOST_WIDE_INT spill_size = 0;
2318 HOST_WIDE_INT extra_spill_size = 0;
2319 HOST_WIDE_INT pretend_args_size;
2320 HARD_REG_SET mask;
2321 int n_spilled = 0;
2322 int spilled_gr_p = 0;
2323 int spilled_fr_p = 0;
2324 unsigned int regno;
2325 int min_regno;
2326 int max_regno;
2327 int i;
2329 if (current_frame_info.initialized)
2330 return;
2332 memset (&current_frame_info, 0, sizeof current_frame_info);
2333 CLEAR_HARD_REG_SET (mask);
2335 /* Don't allocate scratches to the return register. */
2336 diddle_return_value (mark_reg_gr_used_mask, NULL);
2338 /* Don't allocate scratches to the EH scratch registers. */
2339 if (cfun->machine->ia64_eh_epilogue_sp)
2340 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
2341 if (cfun->machine->ia64_eh_epilogue_bsp)
2342 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
2344 /* Find the size of the register stack frame. We have only 80 local
2345 registers, because we reserve 8 for the inputs and 8 for the
2346 outputs. */
2348 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
2349 since we'll be adjusting that down later. */
2350 regno = LOC_REG (78) + ! frame_pointer_needed;
2351 for (; regno >= LOC_REG (0); regno--)
2352 if (df_regs_ever_live_p (regno) && !is_emitted (regno))
2353 break;
2354 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2356 /* For functions marked with the syscall_linkage attribute, we must mark
2357 all eight input registers as in use, so that locals aren't visible to
2358 the caller. */
2360 if (cfun->machine->n_varargs > 0
2361 || lookup_attribute ("syscall_linkage",
2362 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
2363 current_frame_info.n_input_regs = 8;
2364 else
2366 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
2367 if (df_regs_ever_live_p (regno))
2368 break;
2369 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
2372 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
2373 if (df_regs_ever_live_p (regno))
2374 break;
2375 i = regno - OUT_REG (0) + 1;
2377 #ifndef PROFILE_HOOK
2378 /* When -p profiling, we need one output register for the mcount argument.
2379 Likewise for -a profiling for the bb_init_func argument. For -ax
2380 profiling, we need two output registers for the two bb_init_trace_func
2381 arguments. */
2382 if (current_function_profile)
2383 i = MAX (i, 1);
2384 #endif
2385 current_frame_info.n_output_regs = i;
2387 /* ??? No rotating register support yet. */
2388 current_frame_info.n_rotate_regs = 0;
2390 /* Discover which registers need spilling, and how much room that
2391 will take. Begin with floating point and general registers,
2392 which will always wind up on the stack. */
2394 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
2395 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2397 SET_HARD_REG_BIT (mask, regno);
2398 spill_size += 16;
2399 n_spilled += 1;
2400 spilled_fr_p = 1;
2403 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2404 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2406 SET_HARD_REG_BIT (mask, regno);
2407 spill_size += 8;
2408 n_spilled += 1;
2409 spilled_gr_p = 1;
2412 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
2413 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2415 SET_HARD_REG_BIT (mask, regno);
2416 spill_size += 8;
2417 n_spilled += 1;
2420 /* Now come all special registers that might get saved in other
2421 general registers. */
2423 if (frame_pointer_needed)
2425 current_frame_info.r[reg_fp] = find_gr_spill (reg_fp, 1);
2426 /* If we did not get a register, then we take LOC79. This is guaranteed
2427 to be free, even if regs_ever_live is already set, because this is
2428 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
2429 as we don't count loc79 above. */
2430 if (current_frame_info.r[reg_fp] == 0)
2432 current_frame_info.r[reg_fp] = LOC_REG (79);
2433 current_frame_info.n_local_regs = LOC_REG (79) - LOC_REG (0) + 1;
2437 if (! current_function_is_leaf)
2439 /* Emit a save of BR0 if we call other functions. Do this even
2440 if this function doesn't return, as EH depends on this to be
2441 able to unwind the stack. */
2442 SET_HARD_REG_BIT (mask, BR_REG (0));
2444 current_frame_info.r[reg_save_b0] = find_gr_spill (reg_save_b0, 1);
2445 if (current_frame_info.r[reg_save_b0] == 0)
2447 extra_spill_size += 8;
2448 n_spilled += 1;
2451 /* Similarly for ar.pfs. */
2452 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2453 current_frame_info.r[reg_save_ar_pfs] = find_gr_spill (reg_save_ar_pfs, 1);
2454 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2456 extra_spill_size += 8;
2457 n_spilled += 1;
2460 /* Similarly for gp. Note that if we're calling setjmp, the stacked
2461 registers are clobbered, so we fall back to the stack. */
2462 current_frame_info.r[reg_save_gp]
2463 = (current_function_calls_setjmp ? 0 : find_gr_spill (reg_save_gp, 1));
2464 if (current_frame_info.r[reg_save_gp] == 0)
2466 SET_HARD_REG_BIT (mask, GR_REG (1));
2467 spill_size += 8;
2468 n_spilled += 1;
2471 else
2473 if (df_regs_ever_live_p (BR_REG (0)) && ! call_used_regs[BR_REG (0)])
2475 SET_HARD_REG_BIT (mask, BR_REG (0));
2476 extra_spill_size += 8;
2477 n_spilled += 1;
2480 if (df_regs_ever_live_p (AR_PFS_REGNUM))
2482 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2483 current_frame_info.r[reg_save_ar_pfs]
2484 = find_gr_spill (reg_save_ar_pfs, 1);
2485 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2487 extra_spill_size += 8;
2488 n_spilled += 1;
2493 /* Unwind descriptor hackery: things are most efficient if we allocate
2494 consecutive GR save registers for RP, PFS, FP in that order. However,
2495 it is absolutely critical that FP get the only hard register that's
2496 guaranteed to be free, so we allocated it first. If all three did
2497 happen to be allocated hard regs, and are consecutive, rearrange them
2498 into the preferred order now.
2500 If we have already emitted code for any of those registers,
2501 then it's already too late to change. */
2502 min_regno = MIN (current_frame_info.r[reg_fp],
2503 MIN (current_frame_info.r[reg_save_b0],
2504 current_frame_info.r[reg_save_ar_pfs]));
2505 max_regno = MAX (current_frame_info.r[reg_fp],
2506 MAX (current_frame_info.r[reg_save_b0],
2507 current_frame_info.r[reg_save_ar_pfs]));
2508 if (min_regno > 0
2509 && min_regno + 2 == max_regno
2510 && (current_frame_info.r[reg_fp] == min_regno + 1
2511 || current_frame_info.r[reg_save_b0] == min_regno + 1
2512 || current_frame_info.r[reg_save_ar_pfs] == min_regno + 1)
2513 && (emitted_frame_related_regs[reg_save_b0] == 0
2514 || emitted_frame_related_regs[reg_save_b0] == min_regno)
2515 && (emitted_frame_related_regs[reg_save_ar_pfs] == 0
2516 || emitted_frame_related_regs[reg_save_ar_pfs] == min_regno + 1)
2517 && (emitted_frame_related_regs[reg_fp] == 0
2518 || emitted_frame_related_regs[reg_fp] == min_regno + 2))
2520 current_frame_info.r[reg_save_b0] = min_regno;
2521 current_frame_info.r[reg_save_ar_pfs] = min_regno + 1;
2522 current_frame_info.r[reg_fp] = min_regno + 2;
2525 /* See if we need to store the predicate register block. */
2526 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2527 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2528 break;
2529 if (regno <= PR_REG (63))
2531 SET_HARD_REG_BIT (mask, PR_REG (0));
2532 current_frame_info.r[reg_save_pr] = find_gr_spill (reg_save_pr, 1);
2533 if (current_frame_info.r[reg_save_pr] == 0)
2535 extra_spill_size += 8;
2536 n_spilled += 1;
2539 /* ??? Mark them all as used so that register renaming and such
2540 are free to use them. */
2541 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2542 df_set_regs_ever_live (regno, true);
2545 /* If we're forced to use st8.spill, we're forced to save and restore
2546 ar.unat as well. The check for existing liveness allows inline asm
2547 to touch ar.unat. */
2548 if (spilled_gr_p || cfun->machine->n_varargs
2549 || df_regs_ever_live_p (AR_UNAT_REGNUM))
2551 df_set_regs_ever_live (AR_UNAT_REGNUM, true);
2552 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
2553 current_frame_info.r[reg_save_ar_unat]
2554 = find_gr_spill (reg_save_ar_unat, spill_size == 0);
2555 if (current_frame_info.r[reg_save_ar_unat] == 0)
2557 extra_spill_size += 8;
2558 n_spilled += 1;
2562 if (df_regs_ever_live_p (AR_LC_REGNUM))
2564 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
2565 current_frame_info.r[reg_save_ar_lc]
2566 = find_gr_spill (reg_save_ar_lc, spill_size == 0);
2567 if (current_frame_info.r[reg_save_ar_lc] == 0)
2569 extra_spill_size += 8;
2570 n_spilled += 1;
2574 /* If we have an odd number of words of pretend arguments written to
2575 the stack, then the FR save area will be unaligned. We round the
2576 size of this area up to keep things 16 byte aligned. */
2577 if (spilled_fr_p)
2578 pretend_args_size = IA64_STACK_ALIGN (crtl->args.pretend_args_size);
2579 else
2580 pretend_args_size = crtl->args.pretend_args_size;
2582 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2583 + crtl->outgoing_args_size);
2584 total_size = IA64_STACK_ALIGN (total_size);
2586 /* We always use the 16-byte scratch area provided by the caller, but
2587 if we are a leaf function, there's no one to which we need to provide
2588 a scratch area. */
2589 if (current_function_is_leaf)
2590 total_size = MAX (0, total_size - 16);
2592 current_frame_info.total_size = total_size;
2593 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2594 current_frame_info.spill_size = spill_size;
2595 current_frame_info.extra_spill_size = extra_spill_size;
2596 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2597 current_frame_info.n_spilled = n_spilled;
2598 current_frame_info.initialized = reload_completed;
2601 /* Compute the initial difference between the specified pair of registers. */
2603 HOST_WIDE_INT
2604 ia64_initial_elimination_offset (int from, int to)
2606 HOST_WIDE_INT offset;
2608 ia64_compute_frame_size (get_frame_size ());
2609 switch (from)
2611 case FRAME_POINTER_REGNUM:
2612 switch (to)
2614 case HARD_FRAME_POINTER_REGNUM:
2615 if (current_function_is_leaf)
2616 offset = -current_frame_info.total_size;
2617 else
2618 offset = -(current_frame_info.total_size
2619 - crtl->outgoing_args_size - 16);
2620 break;
2622 case STACK_POINTER_REGNUM:
2623 if (current_function_is_leaf)
2624 offset = 0;
2625 else
2626 offset = 16 + crtl->outgoing_args_size;
2627 break;
2629 default:
2630 gcc_unreachable ();
2632 break;
2634 case ARG_POINTER_REGNUM:
2635 /* Arguments start above the 16 byte save area, unless stdarg
2636 in which case we store through the 16 byte save area. */
2637 switch (to)
2639 case HARD_FRAME_POINTER_REGNUM:
2640 offset = 16 - crtl->args.pretend_args_size;
2641 break;
2643 case STACK_POINTER_REGNUM:
2644 offset = (current_frame_info.total_size
2645 + 16 - crtl->args.pretend_args_size);
2646 break;
2648 default:
2649 gcc_unreachable ();
2651 break;
2653 default:
2654 gcc_unreachable ();
2657 return offset;
2660 /* If there are more than a trivial number of register spills, we use
2661 two interleaved iterators so that we can get two memory references
2662 per insn group.
2664 In order to simplify things in the prologue and epilogue expanders,
2665 we use helper functions to fix up the memory references after the
2666 fact with the appropriate offsets to a POST_MODIFY memory mode.
2667 The following data structure tracks the state of the two iterators
2668 while insns are being emitted. */
2670 struct spill_fill_data
2672 rtx init_after; /* point at which to emit initializations */
2673 rtx init_reg[2]; /* initial base register */
2674 rtx iter_reg[2]; /* the iterator registers */
2675 rtx *prev_addr[2]; /* address of last memory use */
2676 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
2677 HOST_WIDE_INT prev_off[2]; /* last offset */
2678 int n_iter; /* number of iterators in use */
2679 int next_iter; /* next iterator to use */
2680 unsigned int save_gr_used_mask;
2683 static struct spill_fill_data spill_fill_data;
2685 static void
2686 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
2688 int i;
2690 spill_fill_data.init_after = get_last_insn ();
2691 spill_fill_data.init_reg[0] = init_reg;
2692 spill_fill_data.init_reg[1] = init_reg;
2693 spill_fill_data.prev_addr[0] = NULL;
2694 spill_fill_data.prev_addr[1] = NULL;
2695 spill_fill_data.prev_insn[0] = NULL;
2696 spill_fill_data.prev_insn[1] = NULL;
2697 spill_fill_data.prev_off[0] = cfa_off;
2698 spill_fill_data.prev_off[1] = cfa_off;
2699 spill_fill_data.next_iter = 0;
2700 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
2702 spill_fill_data.n_iter = 1 + (n_spills > 2);
2703 for (i = 0; i < spill_fill_data.n_iter; ++i)
2705 int regno = next_scratch_gr_reg ();
2706 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
2707 current_frame_info.gr_used_mask |= 1 << regno;
2711 static void
2712 finish_spill_pointers (void)
2714 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
2717 static rtx
2718 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
2720 int iter = spill_fill_data.next_iter;
2721 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
2722 rtx disp_rtx = GEN_INT (disp);
2723 rtx mem;
2725 if (spill_fill_data.prev_addr[iter])
2727 if (satisfies_constraint_N (disp_rtx))
2729 *spill_fill_data.prev_addr[iter]
2730 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
2731 gen_rtx_PLUS (DImode,
2732 spill_fill_data.iter_reg[iter],
2733 disp_rtx));
2734 REG_NOTES (spill_fill_data.prev_insn[iter])
2735 = gen_rtx_EXPR_LIST (REG_INC, spill_fill_data.iter_reg[iter],
2736 REG_NOTES (spill_fill_data.prev_insn[iter]));
2738 else
2740 /* ??? Could use register post_modify for loads. */
2741 if (!satisfies_constraint_I (disp_rtx))
2743 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2744 emit_move_insn (tmp, disp_rtx);
2745 disp_rtx = tmp;
2747 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2748 spill_fill_data.iter_reg[iter], disp_rtx));
2751 /* Micro-optimization: if we've created a frame pointer, it's at
2752 CFA 0, which may allow the real iterator to be initialized lower,
2753 slightly increasing parallelism. Also, if there are few saves
2754 it may eliminate the iterator entirely. */
2755 else if (disp == 0
2756 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
2757 && frame_pointer_needed)
2759 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
2760 set_mem_alias_set (mem, get_varargs_alias_set ());
2761 return mem;
2763 else
2765 rtx seq, insn;
2767 if (disp == 0)
2768 seq = gen_movdi (spill_fill_data.iter_reg[iter],
2769 spill_fill_data.init_reg[iter]);
2770 else
2772 start_sequence ();
2774 if (!satisfies_constraint_I (disp_rtx))
2776 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2777 emit_move_insn (tmp, disp_rtx);
2778 disp_rtx = tmp;
2781 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2782 spill_fill_data.init_reg[iter],
2783 disp_rtx));
2785 seq = get_insns ();
2786 end_sequence ();
2789 /* Careful for being the first insn in a sequence. */
2790 if (spill_fill_data.init_after)
2791 insn = emit_insn_after (seq, spill_fill_data.init_after);
2792 else
2794 rtx first = get_insns ();
2795 if (first)
2796 insn = emit_insn_before (seq, first);
2797 else
2798 insn = emit_insn (seq);
2800 spill_fill_data.init_after = insn;
2803 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
2805 /* ??? Not all of the spills are for varargs, but some of them are.
2806 The rest of the spills belong in an alias set of their own. But
2807 it doesn't actually hurt to include them here. */
2808 set_mem_alias_set (mem, get_varargs_alias_set ());
2810 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
2811 spill_fill_data.prev_off[iter] = cfa_off;
2813 if (++iter >= spill_fill_data.n_iter)
2814 iter = 0;
2815 spill_fill_data.next_iter = iter;
2817 return mem;
2820 static void
2821 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
2822 rtx frame_reg)
2824 int iter = spill_fill_data.next_iter;
2825 rtx mem, insn;
2827 mem = spill_restore_mem (reg, cfa_off);
2828 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
2829 spill_fill_data.prev_insn[iter] = insn;
2831 if (frame_reg)
2833 rtx base;
2834 HOST_WIDE_INT off;
2836 RTX_FRAME_RELATED_P (insn) = 1;
2838 /* Don't even pretend that the unwind code can intuit its way
2839 through a pair of interleaved post_modify iterators. Just
2840 provide the correct answer. */
2842 if (frame_pointer_needed)
2844 base = hard_frame_pointer_rtx;
2845 off = - cfa_off;
2847 else
2849 base = stack_pointer_rtx;
2850 off = current_frame_info.total_size - cfa_off;
2853 REG_NOTES (insn)
2854 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2855 gen_rtx_SET (VOIDmode,
2856 gen_rtx_MEM (GET_MODE (reg),
2857 plus_constant (base, off)),
2858 frame_reg),
2859 REG_NOTES (insn));
2863 static void
2864 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
2866 int iter = spill_fill_data.next_iter;
2867 rtx insn;
2869 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
2870 GEN_INT (cfa_off)));
2871 spill_fill_data.prev_insn[iter] = insn;
2874 /* Wrapper functions that discards the CONST_INT spill offset. These
2875 exist so that we can give gr_spill/gr_fill the offset they need and
2876 use a consistent function interface. */
2878 static rtx
2879 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2881 return gen_movdi (dest, src);
2884 static rtx
2885 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2887 return gen_fr_spill (dest, src);
2890 static rtx
2891 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2893 return gen_fr_restore (dest, src);
2896 /* Called after register allocation to add any instructions needed for the
2897 prologue. Using a prologue insn is favored compared to putting all of the
2898 instructions in output_function_prologue(), since it allows the scheduler
2899 to intermix instructions with the saves of the caller saved registers. In
2900 some cases, it might be necessary to emit a barrier instruction as the last
2901 insn to prevent such scheduling.
2903 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
2904 so that the debug info generation code can handle them properly.
2906 The register save area is layed out like so:
2907 cfa+16
2908 [ varargs spill area ]
2909 [ fr register spill area ]
2910 [ br register spill area ]
2911 [ ar register spill area ]
2912 [ pr register spill area ]
2913 [ gr register spill area ] */
2915 /* ??? Get inefficient code when the frame size is larger than can fit in an
2916 adds instruction. */
2918 void
2919 ia64_expand_prologue (void)
2921 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
2922 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
2923 rtx reg, alt_reg;
2925 ia64_compute_frame_size (get_frame_size ());
2926 last_scratch_gr_reg = 15;
2928 if (dump_file)
2930 fprintf (dump_file, "ia64 frame related registers "
2931 "recorded in current_frame_info.r[]:\n");
2932 #define PRINTREG(a) if (current_frame_info.r[a]) \
2933 fprintf(dump_file, "%s = %d\n", #a, current_frame_info.r[a])
2934 PRINTREG(reg_fp);
2935 PRINTREG(reg_save_b0);
2936 PRINTREG(reg_save_pr);
2937 PRINTREG(reg_save_ar_pfs);
2938 PRINTREG(reg_save_ar_unat);
2939 PRINTREG(reg_save_ar_lc);
2940 PRINTREG(reg_save_gp);
2941 #undef PRINTREG
2944 /* If there is no epilogue, then we don't need some prologue insns.
2945 We need to avoid emitting the dead prologue insns, because flow
2946 will complain about them. */
2947 if (optimize)
2949 edge e;
2950 edge_iterator ei;
2952 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
2953 if ((e->flags & EDGE_FAKE) == 0
2954 && (e->flags & EDGE_FALLTHRU) != 0)
2955 break;
2956 epilogue_p = (e != NULL);
2958 else
2959 epilogue_p = 1;
2961 /* Set the local, input, and output register names. We need to do this
2962 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
2963 half. If we use in/loc/out register names, then we get assembler errors
2964 in crtn.S because there is no alloc insn or regstk directive in there. */
2965 if (! TARGET_REG_NAMES)
2967 int inputs = current_frame_info.n_input_regs;
2968 int locals = current_frame_info.n_local_regs;
2969 int outputs = current_frame_info.n_output_regs;
2971 for (i = 0; i < inputs; i++)
2972 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
2973 for (i = 0; i < locals; i++)
2974 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
2975 for (i = 0; i < outputs; i++)
2976 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
2979 /* Set the frame pointer register name. The regnum is logically loc79,
2980 but of course we'll not have allocated that many locals. Rather than
2981 worrying about renumbering the existing rtxs, we adjust the name. */
2982 /* ??? This code means that we can never use one local register when
2983 there is a frame pointer. loc79 gets wasted in this case, as it is
2984 renamed to a register that will never be used. See also the try_locals
2985 code in find_gr_spill. */
2986 if (current_frame_info.r[reg_fp])
2988 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
2989 reg_names[HARD_FRAME_POINTER_REGNUM]
2990 = reg_names[current_frame_info.r[reg_fp]];
2991 reg_names[current_frame_info.r[reg_fp]] = tmp;
2994 /* We don't need an alloc instruction if we've used no outputs or locals. */
2995 if (current_frame_info.n_local_regs == 0
2996 && current_frame_info.n_output_regs == 0
2997 && current_frame_info.n_input_regs <= crtl->args.info.int_regs
2998 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3000 /* If there is no alloc, but there are input registers used, then we
3001 need a .regstk directive. */
3002 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
3003 ar_pfs_save_reg = NULL_RTX;
3005 else
3007 current_frame_info.need_regstk = 0;
3009 if (current_frame_info.r[reg_save_ar_pfs])
3011 regno = current_frame_info.r[reg_save_ar_pfs];
3012 reg_emitted (reg_save_ar_pfs);
3014 else
3015 regno = next_scratch_gr_reg ();
3016 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
3018 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
3019 GEN_INT (current_frame_info.n_input_regs),
3020 GEN_INT (current_frame_info.n_local_regs),
3021 GEN_INT (current_frame_info.n_output_regs),
3022 GEN_INT (current_frame_info.n_rotate_regs)));
3023 RTX_FRAME_RELATED_P (insn) = (current_frame_info.r[reg_save_ar_pfs] != 0);
3026 /* Set up frame pointer, stack pointer, and spill iterators. */
3028 n_varargs = cfun->machine->n_varargs;
3029 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
3030 stack_pointer_rtx, 0);
3032 if (frame_pointer_needed)
3034 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3035 RTX_FRAME_RELATED_P (insn) = 1;
3038 if (current_frame_info.total_size != 0)
3040 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
3041 rtx offset;
3043 if (satisfies_constraint_I (frame_size_rtx))
3044 offset = frame_size_rtx;
3045 else
3047 regno = next_scratch_gr_reg ();
3048 offset = gen_rtx_REG (DImode, regno);
3049 emit_move_insn (offset, frame_size_rtx);
3052 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
3053 stack_pointer_rtx, offset));
3055 if (! frame_pointer_needed)
3057 RTX_FRAME_RELATED_P (insn) = 1;
3058 if (GET_CODE (offset) != CONST_INT)
3060 REG_NOTES (insn)
3061 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3062 gen_rtx_SET (VOIDmode,
3063 stack_pointer_rtx,
3064 gen_rtx_PLUS (DImode,
3065 stack_pointer_rtx,
3066 frame_size_rtx)),
3067 REG_NOTES (insn));
3071 /* ??? At this point we must generate a magic insn that appears to
3072 modify the stack pointer, the frame pointer, and all spill
3073 iterators. This would allow the most scheduling freedom. For
3074 now, just hard stop. */
3075 emit_insn (gen_blockage ());
3078 /* Must copy out ar.unat before doing any integer spills. */
3079 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3081 if (current_frame_info.r[reg_save_ar_unat])
3083 ar_unat_save_reg
3084 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3085 reg_emitted (reg_save_ar_unat);
3087 else
3089 alt_regno = next_scratch_gr_reg ();
3090 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3091 current_frame_info.gr_used_mask |= 1 << alt_regno;
3094 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3095 insn = emit_move_insn (ar_unat_save_reg, reg);
3096 RTX_FRAME_RELATED_P (insn) = (current_frame_info.r[reg_save_ar_unat] != 0);
3098 /* Even if we're not going to generate an epilogue, we still
3099 need to save the register so that EH works. */
3100 if (! epilogue_p && current_frame_info.r[reg_save_ar_unat])
3101 emit_insn (gen_prologue_use (ar_unat_save_reg));
3103 else
3104 ar_unat_save_reg = NULL_RTX;
3106 /* Spill all varargs registers. Do this before spilling any GR registers,
3107 since we want the UNAT bits for the GR registers to override the UNAT
3108 bits from varargs, which we don't care about. */
3110 cfa_off = -16;
3111 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
3113 reg = gen_rtx_REG (DImode, regno);
3114 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
3117 /* Locate the bottom of the register save area. */
3118 cfa_off = (current_frame_info.spill_cfa_off
3119 + current_frame_info.spill_size
3120 + current_frame_info.extra_spill_size);
3122 /* Save the predicate register block either in a register or in memory. */
3123 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3125 reg = gen_rtx_REG (DImode, PR_REG (0));
3126 if (current_frame_info.r[reg_save_pr] != 0)
3128 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3129 reg_emitted (reg_save_pr);
3130 insn = emit_move_insn (alt_reg, reg);
3132 /* ??? Denote pr spill/fill by a DImode move that modifies all
3133 64 hard registers. */
3134 RTX_FRAME_RELATED_P (insn) = 1;
3135 REG_NOTES (insn)
3136 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3137 gen_rtx_SET (VOIDmode, alt_reg, reg),
3138 REG_NOTES (insn));
3140 /* Even if we're not going to generate an epilogue, we still
3141 need to save the register so that EH works. */
3142 if (! epilogue_p)
3143 emit_insn (gen_prologue_use (alt_reg));
3145 else
3147 alt_regno = next_scratch_gr_reg ();
3148 alt_reg = gen_rtx_REG (DImode, alt_regno);
3149 insn = emit_move_insn (alt_reg, reg);
3150 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3151 cfa_off -= 8;
3155 /* Handle AR regs in numerical order. All of them get special handling. */
3156 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
3157 && current_frame_info.r[reg_save_ar_unat] == 0)
3159 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3160 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
3161 cfa_off -= 8;
3164 /* The alloc insn already copied ar.pfs into a general register. The
3165 only thing we have to do now is copy that register to a stack slot
3166 if we'd not allocated a local register for the job. */
3167 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
3168 && current_frame_info.r[reg_save_ar_pfs] == 0)
3170 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3171 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
3172 cfa_off -= 8;
3175 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3177 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3178 if (current_frame_info.r[reg_save_ar_lc] != 0)
3180 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3181 reg_emitted (reg_save_ar_lc);
3182 insn = emit_move_insn (alt_reg, reg);
3183 RTX_FRAME_RELATED_P (insn) = 1;
3185 /* Even if we're not going to generate an epilogue, we still
3186 need to save the register so that EH works. */
3187 if (! epilogue_p)
3188 emit_insn (gen_prologue_use (alt_reg));
3190 else
3192 alt_regno = next_scratch_gr_reg ();
3193 alt_reg = gen_rtx_REG (DImode, alt_regno);
3194 emit_move_insn (alt_reg, reg);
3195 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3196 cfa_off -= 8;
3200 /* Save the return pointer. */
3201 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3203 reg = gen_rtx_REG (DImode, BR_REG (0));
3204 if (current_frame_info.r[reg_save_b0] != 0)
3206 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3207 reg_emitted (reg_save_b0);
3208 insn = emit_move_insn (alt_reg, reg);
3209 RTX_FRAME_RELATED_P (insn) = 1;
3211 /* Even if we're not going to generate an epilogue, we still
3212 need to save the register so that EH works. */
3213 if (! epilogue_p)
3214 emit_insn (gen_prologue_use (alt_reg));
3216 else
3218 alt_regno = next_scratch_gr_reg ();
3219 alt_reg = gen_rtx_REG (DImode, alt_regno);
3220 emit_move_insn (alt_reg, reg);
3221 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3222 cfa_off -= 8;
3226 if (current_frame_info.r[reg_save_gp])
3228 reg_emitted (reg_save_gp);
3229 insn = emit_move_insn (gen_rtx_REG (DImode,
3230 current_frame_info.r[reg_save_gp]),
3231 pic_offset_table_rtx);
3234 /* We should now be at the base of the gr/br/fr spill area. */
3235 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3236 + current_frame_info.spill_size));
3238 /* Spill all general registers. */
3239 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3240 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3242 reg = gen_rtx_REG (DImode, regno);
3243 do_spill (gen_gr_spill, reg, cfa_off, reg);
3244 cfa_off -= 8;
3247 /* Spill the rest of the BR registers. */
3248 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3249 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3251 alt_regno = next_scratch_gr_reg ();
3252 alt_reg = gen_rtx_REG (DImode, alt_regno);
3253 reg = gen_rtx_REG (DImode, regno);
3254 emit_move_insn (alt_reg, reg);
3255 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3256 cfa_off -= 8;
3259 /* Align the frame and spill all FR registers. */
3260 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3261 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3263 gcc_assert (!(cfa_off & 15));
3264 reg = gen_rtx_REG (XFmode, regno);
3265 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
3266 cfa_off -= 16;
3269 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3271 finish_spill_pointers ();
3274 /* Called after register allocation to add any instructions needed for the
3275 epilogue. Using an epilogue insn is favored compared to putting all of the
3276 instructions in output_function_prologue(), since it allows the scheduler
3277 to intermix instructions with the saves of the caller saved registers. In
3278 some cases, it might be necessary to emit a barrier instruction as the last
3279 insn to prevent such scheduling. */
3281 void
3282 ia64_expand_epilogue (int sibcall_p)
3284 rtx insn, reg, alt_reg, ar_unat_save_reg;
3285 int regno, alt_regno, cfa_off;
3287 ia64_compute_frame_size (get_frame_size ());
3289 /* If there is a frame pointer, then we use it instead of the stack
3290 pointer, so that the stack pointer does not need to be valid when
3291 the epilogue starts. See EXIT_IGNORE_STACK. */
3292 if (frame_pointer_needed)
3293 setup_spill_pointers (current_frame_info.n_spilled,
3294 hard_frame_pointer_rtx, 0);
3295 else
3296 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
3297 current_frame_info.total_size);
3299 if (current_frame_info.total_size != 0)
3301 /* ??? At this point we must generate a magic insn that appears to
3302 modify the spill iterators and the frame pointer. This would
3303 allow the most scheduling freedom. For now, just hard stop. */
3304 emit_insn (gen_blockage ());
3307 /* Locate the bottom of the register save area. */
3308 cfa_off = (current_frame_info.spill_cfa_off
3309 + current_frame_info.spill_size
3310 + current_frame_info.extra_spill_size);
3312 /* Restore the predicate registers. */
3313 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3315 if (current_frame_info.r[reg_save_pr] != 0)
3317 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3318 reg_emitted (reg_save_pr);
3320 else
3322 alt_regno = next_scratch_gr_reg ();
3323 alt_reg = gen_rtx_REG (DImode, alt_regno);
3324 do_restore (gen_movdi_x, alt_reg, cfa_off);
3325 cfa_off -= 8;
3327 reg = gen_rtx_REG (DImode, PR_REG (0));
3328 emit_move_insn (reg, alt_reg);
3331 /* Restore the application registers. */
3333 /* Load the saved unat from the stack, but do not restore it until
3334 after the GRs have been restored. */
3335 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3337 if (current_frame_info.r[reg_save_ar_unat] != 0)
3339 ar_unat_save_reg
3340 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3341 reg_emitted (reg_save_ar_unat);
3343 else
3345 alt_regno = next_scratch_gr_reg ();
3346 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3347 current_frame_info.gr_used_mask |= 1 << alt_regno;
3348 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
3349 cfa_off -= 8;
3352 else
3353 ar_unat_save_reg = NULL_RTX;
3355 if (current_frame_info.r[reg_save_ar_pfs] != 0)
3357 reg_emitted (reg_save_ar_pfs);
3358 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_pfs]);
3359 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3360 emit_move_insn (reg, alt_reg);
3362 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3364 alt_regno = next_scratch_gr_reg ();
3365 alt_reg = gen_rtx_REG (DImode, alt_regno);
3366 do_restore (gen_movdi_x, alt_reg, cfa_off);
3367 cfa_off -= 8;
3368 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3369 emit_move_insn (reg, alt_reg);
3372 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3374 if (current_frame_info.r[reg_save_ar_lc] != 0)
3376 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3377 reg_emitted (reg_save_ar_lc);
3379 else
3381 alt_regno = next_scratch_gr_reg ();
3382 alt_reg = gen_rtx_REG (DImode, alt_regno);
3383 do_restore (gen_movdi_x, alt_reg, cfa_off);
3384 cfa_off -= 8;
3386 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3387 emit_move_insn (reg, alt_reg);
3390 /* Restore the return pointer. */
3391 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3393 if (current_frame_info.r[reg_save_b0] != 0)
3395 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3396 reg_emitted (reg_save_b0);
3398 else
3400 alt_regno = next_scratch_gr_reg ();
3401 alt_reg = gen_rtx_REG (DImode, alt_regno);
3402 do_restore (gen_movdi_x, alt_reg, cfa_off);
3403 cfa_off -= 8;
3405 reg = gen_rtx_REG (DImode, BR_REG (0));
3406 emit_move_insn (reg, alt_reg);
3409 /* We should now be at the base of the gr/br/fr spill area. */
3410 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3411 + current_frame_info.spill_size));
3413 /* The GP may be stored on the stack in the prologue, but it's
3414 never restored in the epilogue. Skip the stack slot. */
3415 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
3416 cfa_off -= 8;
3418 /* Restore all general registers. */
3419 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
3420 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3422 reg = gen_rtx_REG (DImode, regno);
3423 do_restore (gen_gr_restore, reg, cfa_off);
3424 cfa_off -= 8;
3427 /* Restore the branch registers. */
3428 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3429 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3431 alt_regno = next_scratch_gr_reg ();
3432 alt_reg = gen_rtx_REG (DImode, alt_regno);
3433 do_restore (gen_movdi_x, alt_reg, cfa_off);
3434 cfa_off -= 8;
3435 reg = gen_rtx_REG (DImode, regno);
3436 emit_move_insn (reg, alt_reg);
3439 /* Restore floating point registers. */
3440 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3441 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3443 gcc_assert (!(cfa_off & 15));
3444 reg = gen_rtx_REG (XFmode, regno);
3445 do_restore (gen_fr_restore_x, reg, cfa_off);
3446 cfa_off -= 16;
3449 /* Restore ar.unat for real. */
3450 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3452 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3453 emit_move_insn (reg, ar_unat_save_reg);
3456 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3458 finish_spill_pointers ();
3460 if (current_frame_info.total_size
3461 || cfun->machine->ia64_eh_epilogue_sp
3462 || frame_pointer_needed)
3464 /* ??? At this point we must generate a magic insn that appears to
3465 modify the spill iterators, the stack pointer, and the frame
3466 pointer. This would allow the most scheduling freedom. For now,
3467 just hard stop. */
3468 emit_insn (gen_blockage ());
3471 if (cfun->machine->ia64_eh_epilogue_sp)
3472 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
3473 else if (frame_pointer_needed)
3475 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
3476 RTX_FRAME_RELATED_P (insn) = 1;
3478 else if (current_frame_info.total_size)
3480 rtx offset, frame_size_rtx;
3482 frame_size_rtx = GEN_INT (current_frame_info.total_size);
3483 if (satisfies_constraint_I (frame_size_rtx))
3484 offset = frame_size_rtx;
3485 else
3487 regno = next_scratch_gr_reg ();
3488 offset = gen_rtx_REG (DImode, regno);
3489 emit_move_insn (offset, frame_size_rtx);
3492 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
3493 offset));
3495 RTX_FRAME_RELATED_P (insn) = 1;
3496 if (GET_CODE (offset) != CONST_INT)
3498 REG_NOTES (insn)
3499 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3500 gen_rtx_SET (VOIDmode,
3501 stack_pointer_rtx,
3502 gen_rtx_PLUS (DImode,
3503 stack_pointer_rtx,
3504 frame_size_rtx)),
3505 REG_NOTES (insn));
3509 if (cfun->machine->ia64_eh_epilogue_bsp)
3510 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
3512 if (! sibcall_p)
3513 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
3514 else
3516 int fp = GR_REG (2);
3517 /* We need a throw away register here, r0 and r1 are reserved, so r2 is the
3518 first available call clobbered register. If there was a frame_pointer
3519 register, we may have swapped the names of r2 and HARD_FRAME_POINTER_REGNUM,
3520 so we have to make sure we're using the string "r2" when emitting
3521 the register name for the assembler. */
3522 if (current_frame_info.r[reg_fp]
3523 && current_frame_info.r[reg_fp] == GR_REG (2))
3524 fp = HARD_FRAME_POINTER_REGNUM;
3526 /* We must emit an alloc to force the input registers to become output
3527 registers. Otherwise, if the callee tries to pass its parameters
3528 through to another call without an intervening alloc, then these
3529 values get lost. */
3530 /* ??? We don't need to preserve all input registers. We only need to
3531 preserve those input registers used as arguments to the sibling call.
3532 It is unclear how to compute that number here. */
3533 if (current_frame_info.n_input_regs != 0)
3535 rtx n_inputs = GEN_INT (current_frame_info.n_input_regs);
3536 insn = emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
3537 const0_rtx, const0_rtx,
3538 n_inputs, const0_rtx));
3539 RTX_FRAME_RELATED_P (insn) = 1;
3544 /* Return 1 if br.ret can do all the work required to return from a
3545 function. */
3548 ia64_direct_return (void)
3550 if (reload_completed && ! frame_pointer_needed)
3552 ia64_compute_frame_size (get_frame_size ());
3554 return (current_frame_info.total_size == 0
3555 && current_frame_info.n_spilled == 0
3556 && current_frame_info.r[reg_save_b0] == 0
3557 && current_frame_info.r[reg_save_pr] == 0
3558 && current_frame_info.r[reg_save_ar_pfs] == 0
3559 && current_frame_info.r[reg_save_ar_unat] == 0
3560 && current_frame_info.r[reg_save_ar_lc] == 0);
3562 return 0;
3565 /* Return the magic cookie that we use to hold the return address
3566 during early compilation. */
3569 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
3571 if (count != 0)
3572 return NULL;
3573 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
3576 /* Split this value after reload, now that we know where the return
3577 address is saved. */
3579 void
3580 ia64_split_return_addr_rtx (rtx dest)
3582 rtx src;
3584 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3586 if (current_frame_info.r[reg_save_b0] != 0)
3588 src = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3589 reg_emitted (reg_save_b0);
3591 else
3593 HOST_WIDE_INT off;
3594 unsigned int regno;
3595 rtx off_r;
3597 /* Compute offset from CFA for BR0. */
3598 /* ??? Must be kept in sync with ia64_expand_prologue. */
3599 off = (current_frame_info.spill_cfa_off
3600 + current_frame_info.spill_size);
3601 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3602 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3603 off -= 8;
3605 /* Convert CFA offset to a register based offset. */
3606 if (frame_pointer_needed)
3607 src = hard_frame_pointer_rtx;
3608 else
3610 src = stack_pointer_rtx;
3611 off += current_frame_info.total_size;
3614 /* Load address into scratch register. */
3615 off_r = GEN_INT (off);
3616 if (satisfies_constraint_I (off_r))
3617 emit_insn (gen_adddi3 (dest, src, off_r));
3618 else
3620 emit_move_insn (dest, off_r);
3621 emit_insn (gen_adddi3 (dest, src, dest));
3624 src = gen_rtx_MEM (Pmode, dest);
3627 else
3628 src = gen_rtx_REG (DImode, BR_REG (0));
3630 emit_move_insn (dest, src);
3634 ia64_hard_regno_rename_ok (int from, int to)
3636 /* Don't clobber any of the registers we reserved for the prologue. */
3637 enum ia64_frame_regs r;
3639 for (r = reg_fp; r <= reg_save_ar_lc; r++)
3640 if (to == current_frame_info.r[r]
3641 || from == current_frame_info.r[r]
3642 || to == emitted_frame_related_regs[r]
3643 || from == emitted_frame_related_regs[r])
3644 return 0;
3646 /* Don't use output registers outside the register frame. */
3647 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
3648 return 0;
3650 /* Retain even/oddness on predicate register pairs. */
3651 if (PR_REGNO_P (from) && PR_REGNO_P (to))
3652 return (from & 1) == (to & 1);
3654 return 1;
3657 /* Target hook for assembling integer objects. Handle word-sized
3658 aligned objects and detect the cases when @fptr is needed. */
3660 static bool
3661 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
3663 if (size == POINTER_SIZE / BITS_PER_UNIT
3664 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
3665 && GET_CODE (x) == SYMBOL_REF
3666 && SYMBOL_REF_FUNCTION_P (x))
3668 static const char * const directive[2][2] = {
3669 /* 64-bit pointer */ /* 32-bit pointer */
3670 { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
3671 { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
3673 fputs (directive[(aligned_p != 0)][POINTER_SIZE == 32], asm_out_file);
3674 output_addr_const (asm_out_file, x);
3675 fputs (")\n", asm_out_file);
3676 return true;
3678 return default_assemble_integer (x, size, aligned_p);
3681 /* Emit the function prologue. */
3683 static void
3684 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3686 int mask, grsave, grsave_prev;
3688 if (current_frame_info.need_regstk)
3689 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
3690 current_frame_info.n_input_regs,
3691 current_frame_info.n_local_regs,
3692 current_frame_info.n_output_regs,
3693 current_frame_info.n_rotate_regs);
3695 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3696 return;
3698 /* Emit the .prologue directive. */
3700 mask = 0;
3701 grsave = grsave_prev = 0;
3702 if (current_frame_info.r[reg_save_b0] != 0)
3704 mask |= 8;
3705 grsave = grsave_prev = current_frame_info.r[reg_save_b0];
3707 if (current_frame_info.r[reg_save_ar_pfs] != 0
3708 && (grsave_prev == 0
3709 || current_frame_info.r[reg_save_ar_pfs] == grsave_prev + 1))
3711 mask |= 4;
3712 if (grsave_prev == 0)
3713 grsave = current_frame_info.r[reg_save_ar_pfs];
3714 grsave_prev = current_frame_info.r[reg_save_ar_pfs];
3716 if (current_frame_info.r[reg_fp] != 0
3717 && (grsave_prev == 0
3718 || current_frame_info.r[reg_fp] == grsave_prev + 1))
3720 mask |= 2;
3721 if (grsave_prev == 0)
3722 grsave = HARD_FRAME_POINTER_REGNUM;
3723 grsave_prev = current_frame_info.r[reg_fp];
3725 if (current_frame_info.r[reg_save_pr] != 0
3726 && (grsave_prev == 0
3727 || current_frame_info.r[reg_save_pr] == grsave_prev + 1))
3729 mask |= 1;
3730 if (grsave_prev == 0)
3731 grsave = current_frame_info.r[reg_save_pr];
3734 if (mask && TARGET_GNU_AS)
3735 fprintf (file, "\t.prologue %d, %d\n", mask,
3736 ia64_dbx_register_number (grsave));
3737 else
3738 fputs ("\t.prologue\n", file);
3740 /* Emit a .spill directive, if necessary, to relocate the base of
3741 the register spill area. */
3742 if (current_frame_info.spill_cfa_off != -16)
3743 fprintf (file, "\t.spill %ld\n",
3744 (long) (current_frame_info.spill_cfa_off
3745 + current_frame_info.spill_size));
3748 /* Emit the .body directive at the scheduled end of the prologue. */
3750 static void
3751 ia64_output_function_end_prologue (FILE *file)
3753 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3754 return;
3756 fputs ("\t.body\n", file);
3759 /* Emit the function epilogue. */
3761 static void
3762 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
3763 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3765 int i;
3767 if (current_frame_info.r[reg_fp])
3769 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3770 reg_names[HARD_FRAME_POINTER_REGNUM]
3771 = reg_names[current_frame_info.r[reg_fp]];
3772 reg_names[current_frame_info.r[reg_fp]] = tmp;
3773 reg_emitted (reg_fp);
3775 if (! TARGET_REG_NAMES)
3777 for (i = 0; i < current_frame_info.n_input_regs; i++)
3778 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
3779 for (i = 0; i < current_frame_info.n_local_regs; i++)
3780 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
3781 for (i = 0; i < current_frame_info.n_output_regs; i++)
3782 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
3785 current_frame_info.initialized = 0;
3789 ia64_dbx_register_number (int regno)
3791 /* In ia64_expand_prologue we quite literally renamed the frame pointer
3792 from its home at loc79 to something inside the register frame. We
3793 must perform the same renumbering here for the debug info. */
3794 if (current_frame_info.r[reg_fp])
3796 if (regno == HARD_FRAME_POINTER_REGNUM)
3797 regno = current_frame_info.r[reg_fp];
3798 else if (regno == current_frame_info.r[reg_fp])
3799 regno = HARD_FRAME_POINTER_REGNUM;
3802 if (IN_REGNO_P (regno))
3803 return 32 + regno - IN_REG (0);
3804 else if (LOC_REGNO_P (regno))
3805 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
3806 else if (OUT_REGNO_P (regno))
3807 return (32 + current_frame_info.n_input_regs
3808 + current_frame_info.n_local_regs + regno - OUT_REG (0));
3809 else
3810 return regno;
3813 void
3814 ia64_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
3816 rtx addr_reg, eight = GEN_INT (8);
3818 /* The Intel assembler requires that the global __ia64_trampoline symbol
3819 be declared explicitly */
3820 if (!TARGET_GNU_AS)
3822 static bool declared_ia64_trampoline = false;
3824 if (!declared_ia64_trampoline)
3826 declared_ia64_trampoline = true;
3827 (*targetm.asm_out.globalize_label) (asm_out_file,
3828 "__ia64_trampoline");
3832 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
3833 addr = convert_memory_address (Pmode, addr);
3834 fnaddr = convert_memory_address (Pmode, fnaddr);
3835 static_chain = convert_memory_address (Pmode, static_chain);
3837 /* Load up our iterator. */
3838 addr_reg = gen_reg_rtx (Pmode);
3839 emit_move_insn (addr_reg, addr);
3841 /* The first two words are the fake descriptor:
3842 __ia64_trampoline, ADDR+16. */
3843 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3844 gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline"));
3845 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3847 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3848 copy_to_reg (plus_constant (addr, 16)));
3849 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3851 /* The third word is the target descriptor. */
3852 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), fnaddr);
3853 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3855 /* The fourth word is the static chain. */
3856 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), static_chain);
3859 /* Do any needed setup for a variadic function. CUM has not been updated
3860 for the last named argument which has type TYPE and mode MODE.
3862 We generate the actual spill instructions during prologue generation. */
3864 static void
3865 ia64_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3866 tree type, int * pretend_size,
3867 int second_time ATTRIBUTE_UNUSED)
3869 CUMULATIVE_ARGS next_cum = *cum;
3871 /* Skip the current argument. */
3872 ia64_function_arg_advance (&next_cum, mode, type, 1);
3874 if (next_cum.words < MAX_ARGUMENT_SLOTS)
3876 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
3877 *pretend_size = n * UNITS_PER_WORD;
3878 cfun->machine->n_varargs = n;
3882 /* Check whether TYPE is a homogeneous floating point aggregate. If
3883 it is, return the mode of the floating point type that appears
3884 in all leafs. If it is not, return VOIDmode.
3886 An aggregate is a homogeneous floating point aggregate is if all
3887 fields/elements in it have the same floating point type (e.g,
3888 SFmode). 128-bit quad-precision floats are excluded.
3890 Variable sized aggregates should never arrive here, since we should
3891 have already decided to pass them by reference. Top-level zero-sized
3892 aggregates are excluded because our parallels crash the middle-end. */
3894 static enum machine_mode
3895 hfa_element_mode (const_tree type, bool nested)
3897 enum machine_mode element_mode = VOIDmode;
3898 enum machine_mode mode;
3899 enum tree_code code = TREE_CODE (type);
3900 int know_element_mode = 0;
3901 tree t;
3903 if (!nested && (!TYPE_SIZE (type) || integer_zerop (TYPE_SIZE (type))))
3904 return VOIDmode;
3906 switch (code)
3908 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
3909 case BOOLEAN_TYPE: case POINTER_TYPE:
3910 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
3911 case LANG_TYPE: case FUNCTION_TYPE:
3912 return VOIDmode;
3914 /* Fortran complex types are supposed to be HFAs, so we need to handle
3915 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
3916 types though. */
3917 case COMPLEX_TYPE:
3918 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
3919 && TYPE_MODE (type) != TCmode)
3920 return GET_MODE_INNER (TYPE_MODE (type));
3921 else
3922 return VOIDmode;
3924 case REAL_TYPE:
3925 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
3926 mode if this is contained within an aggregate. */
3927 if (nested && TYPE_MODE (type) != TFmode)
3928 return TYPE_MODE (type);
3929 else
3930 return VOIDmode;
3932 case ARRAY_TYPE:
3933 return hfa_element_mode (TREE_TYPE (type), 1);
3935 case RECORD_TYPE:
3936 case UNION_TYPE:
3937 case QUAL_UNION_TYPE:
3938 for (t = TYPE_FIELDS (type); t; t = TREE_CHAIN (t))
3940 if (TREE_CODE (t) != FIELD_DECL)
3941 continue;
3943 mode = hfa_element_mode (TREE_TYPE (t), 1);
3944 if (know_element_mode)
3946 if (mode != element_mode)
3947 return VOIDmode;
3949 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
3950 return VOIDmode;
3951 else
3953 know_element_mode = 1;
3954 element_mode = mode;
3957 return element_mode;
3959 default:
3960 /* If we reach here, we probably have some front-end specific type
3961 that the backend doesn't know about. This can happen via the
3962 aggregate_value_p call in init_function_start. All we can do is
3963 ignore unknown tree types. */
3964 return VOIDmode;
3967 return VOIDmode;
3970 /* Return the number of words required to hold a quantity of TYPE and MODE
3971 when passed as an argument. */
3972 static int
3973 ia64_function_arg_words (tree type, enum machine_mode mode)
3975 int words;
3977 if (mode == BLKmode)
3978 words = int_size_in_bytes (type);
3979 else
3980 words = GET_MODE_SIZE (mode);
3982 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
3985 /* Return the number of registers that should be skipped so the current
3986 argument (described by TYPE and WORDS) will be properly aligned.
3988 Integer and float arguments larger than 8 bytes start at the next
3989 even boundary. Aggregates larger than 8 bytes start at the next
3990 even boundary if the aggregate has 16 byte alignment. Note that
3991 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
3992 but are still to be aligned in registers.
3994 ??? The ABI does not specify how to handle aggregates with
3995 alignment from 9 to 15 bytes, or greater than 16. We handle them
3996 all as if they had 16 byte alignment. Such aggregates can occur
3997 only if gcc extensions are used. */
3998 static int
3999 ia64_function_arg_offset (CUMULATIVE_ARGS *cum, tree type, int words)
4001 if ((cum->words & 1) == 0)
4002 return 0;
4004 if (type
4005 && TREE_CODE (type) != INTEGER_TYPE
4006 && TREE_CODE (type) != REAL_TYPE)
4007 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
4008 else
4009 return words > 1;
4012 /* Return rtx for register where argument is passed, or zero if it is passed
4013 on the stack. */
4014 /* ??? 128-bit quad-precision floats are always passed in general
4015 registers. */
4018 ia64_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
4019 int named, int incoming)
4021 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
4022 int words = ia64_function_arg_words (type, mode);
4023 int offset = ia64_function_arg_offset (cum, type, words);
4024 enum machine_mode hfa_mode = VOIDmode;
4026 /* If all argument slots are used, then it must go on the stack. */
4027 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4028 return 0;
4030 /* Check for and handle homogeneous FP aggregates. */
4031 if (type)
4032 hfa_mode = hfa_element_mode (type, 0);
4034 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4035 and unprototyped hfas are passed specially. */
4036 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4038 rtx loc[16];
4039 int i = 0;
4040 int fp_regs = cum->fp_regs;
4041 int int_regs = cum->words + offset;
4042 int hfa_size = GET_MODE_SIZE (hfa_mode);
4043 int byte_size;
4044 int args_byte_size;
4046 /* If prototyped, pass it in FR regs then GR regs.
4047 If not prototyped, pass it in both FR and GR regs.
4049 If this is an SFmode aggregate, then it is possible to run out of
4050 FR regs while GR regs are still left. In that case, we pass the
4051 remaining part in the GR regs. */
4053 /* Fill the FP regs. We do this always. We stop if we reach the end
4054 of the argument, the last FP register, or the last argument slot. */
4056 byte_size = ((mode == BLKmode)
4057 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4058 args_byte_size = int_regs * UNITS_PER_WORD;
4059 offset = 0;
4060 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4061 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
4063 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4064 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
4065 + fp_regs)),
4066 GEN_INT (offset));
4067 offset += hfa_size;
4068 args_byte_size += hfa_size;
4069 fp_regs++;
4072 /* If no prototype, then the whole thing must go in GR regs. */
4073 if (! cum->prototype)
4074 offset = 0;
4075 /* If this is an SFmode aggregate, then we might have some left over
4076 that needs to go in GR regs. */
4077 else if (byte_size != offset)
4078 int_regs += offset / UNITS_PER_WORD;
4080 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
4082 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
4084 enum machine_mode gr_mode = DImode;
4085 unsigned int gr_size;
4087 /* If we have an odd 4 byte hunk because we ran out of FR regs,
4088 then this goes in a GR reg left adjusted/little endian, right
4089 adjusted/big endian. */
4090 /* ??? Currently this is handled wrong, because 4-byte hunks are
4091 always right adjusted/little endian. */
4092 if (offset & 0x4)
4093 gr_mode = SImode;
4094 /* If we have an even 4 byte hunk because the aggregate is a
4095 multiple of 4 bytes in size, then this goes in a GR reg right
4096 adjusted/little endian. */
4097 else if (byte_size - offset == 4)
4098 gr_mode = SImode;
4100 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4101 gen_rtx_REG (gr_mode, (basereg
4102 + int_regs)),
4103 GEN_INT (offset));
4105 gr_size = GET_MODE_SIZE (gr_mode);
4106 offset += gr_size;
4107 if (gr_size == UNITS_PER_WORD
4108 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
4109 int_regs++;
4110 else if (gr_size > UNITS_PER_WORD)
4111 int_regs += gr_size / UNITS_PER_WORD;
4113 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4116 /* Integral and aggregates go in general registers. If we have run out of
4117 FR registers, then FP values must also go in general registers. This can
4118 happen when we have a SFmode HFA. */
4119 else if (mode == TFmode || mode == TCmode
4120 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4122 int byte_size = ((mode == BLKmode)
4123 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4124 if (BYTES_BIG_ENDIAN
4125 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
4126 && byte_size < UNITS_PER_WORD
4127 && byte_size > 0)
4129 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4130 gen_rtx_REG (DImode,
4131 (basereg + cum->words
4132 + offset)),
4133 const0_rtx);
4134 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
4136 else
4137 return gen_rtx_REG (mode, basereg + cum->words + offset);
4141 /* If there is a prototype, then FP values go in a FR register when
4142 named, and in a GR register when unnamed. */
4143 else if (cum->prototype)
4145 if (named)
4146 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
4147 /* In big-endian mode, an anonymous SFmode value must be represented
4148 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
4149 the value into the high half of the general register. */
4150 else if (BYTES_BIG_ENDIAN && mode == SFmode)
4151 return gen_rtx_PARALLEL (mode,
4152 gen_rtvec (1,
4153 gen_rtx_EXPR_LIST (VOIDmode,
4154 gen_rtx_REG (DImode, basereg + cum->words + offset),
4155 const0_rtx)));
4156 else
4157 return gen_rtx_REG (mode, basereg + cum->words + offset);
4159 /* If there is no prototype, then FP values go in both FR and GR
4160 registers. */
4161 else
4163 /* See comment above. */
4164 enum machine_mode inner_mode =
4165 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
4167 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
4168 gen_rtx_REG (mode, (FR_ARG_FIRST
4169 + cum->fp_regs)),
4170 const0_rtx);
4171 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4172 gen_rtx_REG (inner_mode,
4173 (basereg + cum->words
4174 + offset)),
4175 const0_rtx);
4177 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
4181 /* Return number of bytes, at the beginning of the argument, that must be
4182 put in registers. 0 is the argument is entirely in registers or entirely
4183 in memory. */
4185 static int
4186 ia64_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4187 tree type, bool named ATTRIBUTE_UNUSED)
4189 int words = ia64_function_arg_words (type, mode);
4190 int offset = ia64_function_arg_offset (cum, type, words);
4192 /* If all argument slots are used, then it must go on the stack. */
4193 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4194 return 0;
4196 /* It doesn't matter whether the argument goes in FR or GR regs. If
4197 it fits within the 8 argument slots, then it goes entirely in
4198 registers. If it extends past the last argument slot, then the rest
4199 goes on the stack. */
4201 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
4202 return 0;
4204 return (MAX_ARGUMENT_SLOTS - cum->words - offset) * UNITS_PER_WORD;
4207 /* Update CUM to point after this argument. This is patterned after
4208 ia64_function_arg. */
4210 void
4211 ia64_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4212 tree type, int named)
4214 int words = ia64_function_arg_words (type, mode);
4215 int offset = ia64_function_arg_offset (cum, type, words);
4216 enum machine_mode hfa_mode = VOIDmode;
4218 /* If all arg slots are already full, then there is nothing to do. */
4219 if (cum->words >= MAX_ARGUMENT_SLOTS)
4220 return;
4222 cum->words += words + offset;
4224 /* Check for and handle homogeneous FP aggregates. */
4225 if (type)
4226 hfa_mode = hfa_element_mode (type, 0);
4228 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4229 and unprototyped hfas are passed specially. */
4230 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4232 int fp_regs = cum->fp_regs;
4233 /* This is the original value of cum->words + offset. */
4234 int int_regs = cum->words - words;
4235 int hfa_size = GET_MODE_SIZE (hfa_mode);
4236 int byte_size;
4237 int args_byte_size;
4239 /* If prototyped, pass it in FR regs then GR regs.
4240 If not prototyped, pass it in both FR and GR regs.
4242 If this is an SFmode aggregate, then it is possible to run out of
4243 FR regs while GR regs are still left. In that case, we pass the
4244 remaining part in the GR regs. */
4246 /* Fill the FP regs. We do this always. We stop if we reach the end
4247 of the argument, the last FP register, or the last argument slot. */
4249 byte_size = ((mode == BLKmode)
4250 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4251 args_byte_size = int_regs * UNITS_PER_WORD;
4252 offset = 0;
4253 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4254 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
4256 offset += hfa_size;
4257 args_byte_size += hfa_size;
4258 fp_regs++;
4261 cum->fp_regs = fp_regs;
4264 /* Integral and aggregates go in general registers. So do TFmode FP values.
4265 If we have run out of FR registers, then other FP values must also go in
4266 general registers. This can happen when we have a SFmode HFA. */
4267 else if (mode == TFmode || mode == TCmode
4268 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4269 cum->int_regs = cum->words;
4271 /* If there is a prototype, then FP values go in a FR register when
4272 named, and in a GR register when unnamed. */
4273 else if (cum->prototype)
4275 if (! named)
4276 cum->int_regs = cum->words;
4277 else
4278 /* ??? Complex types should not reach here. */
4279 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4281 /* If there is no prototype, then FP values go in both FR and GR
4282 registers. */
4283 else
4285 /* ??? Complex types should not reach here. */
4286 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4287 cum->int_regs = cum->words;
4291 /* Arguments with alignment larger than 8 bytes start at the next even
4292 boundary. On ILP32 HPUX, TFmode arguments start on next even boundary
4293 even though their normal alignment is 8 bytes. See ia64_function_arg. */
4296 ia64_function_arg_boundary (enum machine_mode mode, tree type)
4299 if (mode == TFmode && TARGET_HPUX && TARGET_ILP32)
4300 return PARM_BOUNDARY * 2;
4302 if (type)
4304 if (TYPE_ALIGN (type) > PARM_BOUNDARY)
4305 return PARM_BOUNDARY * 2;
4306 else
4307 return PARM_BOUNDARY;
4310 if (GET_MODE_BITSIZE (mode) > PARM_BOUNDARY)
4311 return PARM_BOUNDARY * 2;
4312 else
4313 return PARM_BOUNDARY;
4316 /* True if it is OK to do sibling call optimization for the specified
4317 call expression EXP. DECL will be the called function, or NULL if
4318 this is an indirect call. */
4319 static bool
4320 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
4322 /* We can't perform a sibcall if the current function has the syscall_linkage
4323 attribute. */
4324 if (lookup_attribute ("syscall_linkage",
4325 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
4326 return false;
4328 /* We must always return with our current GP. This means we can
4329 only sibcall to functions defined in the current module. */
4330 return decl && (*targetm.binds_local_p) (decl);
4334 /* Implement va_arg. */
4336 static tree
4337 ia64_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
4339 /* Variable sized types are passed by reference. */
4340 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
4342 tree ptrtype = build_pointer_type (type);
4343 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
4344 return build_va_arg_indirect_ref (addr);
4347 /* Aggregate arguments with alignment larger than 8 bytes start at
4348 the next even boundary. Integer and floating point arguments
4349 do so if they are larger than 8 bytes, whether or not they are
4350 also aligned larger than 8 bytes. */
4351 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
4352 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
4354 tree t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (valist), valist,
4355 size_int (2 * UNITS_PER_WORD - 1));
4356 t = fold_convert (sizetype, t);
4357 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
4358 size_int (-2 * UNITS_PER_WORD));
4359 t = fold_convert (TREE_TYPE (valist), t);
4360 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (valist), valist, t);
4361 gimplify_and_add (t, pre_p);
4364 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4367 /* Return 1 if function return value returned in memory. Return 0 if it is
4368 in a register. */
4370 static bool
4371 ia64_return_in_memory (const_tree valtype, const_tree fntype ATTRIBUTE_UNUSED)
4373 enum machine_mode mode;
4374 enum machine_mode hfa_mode;
4375 HOST_WIDE_INT byte_size;
4377 mode = TYPE_MODE (valtype);
4378 byte_size = GET_MODE_SIZE (mode);
4379 if (mode == BLKmode)
4381 byte_size = int_size_in_bytes (valtype);
4382 if (byte_size < 0)
4383 return true;
4386 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
4388 hfa_mode = hfa_element_mode (valtype, 0);
4389 if (hfa_mode != VOIDmode)
4391 int hfa_size = GET_MODE_SIZE (hfa_mode);
4393 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
4394 return true;
4395 else
4396 return false;
4398 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
4399 return true;
4400 else
4401 return false;
4404 /* Return rtx for register that holds the function return value. */
4407 ia64_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
4409 enum machine_mode mode;
4410 enum machine_mode hfa_mode;
4412 mode = TYPE_MODE (valtype);
4413 hfa_mode = hfa_element_mode (valtype, 0);
4415 if (hfa_mode != VOIDmode)
4417 rtx loc[8];
4418 int i;
4419 int hfa_size;
4420 int byte_size;
4421 int offset;
4423 hfa_size = GET_MODE_SIZE (hfa_mode);
4424 byte_size = ((mode == BLKmode)
4425 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
4426 offset = 0;
4427 for (i = 0; offset < byte_size; i++)
4429 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4430 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
4431 GEN_INT (offset));
4432 offset += hfa_size;
4434 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4436 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
4437 return gen_rtx_REG (mode, FR_ARG_FIRST);
4438 else
4440 bool need_parallel = false;
4442 /* In big-endian mode, we need to manage the layout of aggregates
4443 in the registers so that we get the bits properly aligned in
4444 the highpart of the registers. */
4445 if (BYTES_BIG_ENDIAN
4446 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
4447 need_parallel = true;
4449 /* Something like struct S { long double x; char a[0] } is not an
4450 HFA structure, and therefore doesn't go in fp registers. But
4451 the middle-end will give it XFmode anyway, and XFmode values
4452 don't normally fit in integer registers. So we need to smuggle
4453 the value inside a parallel. */
4454 else if (mode == XFmode || mode == XCmode || mode == RFmode)
4455 need_parallel = true;
4457 if (need_parallel)
4459 rtx loc[8];
4460 int offset;
4461 int bytesize;
4462 int i;
4464 offset = 0;
4465 bytesize = int_size_in_bytes (valtype);
4466 /* An empty PARALLEL is invalid here, but the return value
4467 doesn't matter for empty structs. */
4468 if (bytesize == 0)
4469 return gen_rtx_REG (mode, GR_RET_FIRST);
4470 for (i = 0; offset < bytesize; i++)
4472 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4473 gen_rtx_REG (DImode,
4474 GR_RET_FIRST + i),
4475 GEN_INT (offset));
4476 offset += UNITS_PER_WORD;
4478 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4481 return gen_rtx_REG (mode, GR_RET_FIRST);
4485 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4486 We need to emit DTP-relative relocations. */
4488 static void
4489 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
4491 gcc_assert (size == 4 || size == 8);
4492 if (size == 4)
4493 fputs ("\tdata4.ua\t@dtprel(", file);
4494 else
4495 fputs ("\tdata8.ua\t@dtprel(", file);
4496 output_addr_const (file, x);
4497 fputs (")", file);
4500 /* Print a memory address as an operand to reference that memory location. */
4502 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
4503 also call this from ia64_print_operand for memory addresses. */
4505 void
4506 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
4507 rtx address ATTRIBUTE_UNUSED)
4511 /* Print an operand to an assembler instruction.
4512 C Swap and print a comparison operator.
4513 D Print an FP comparison operator.
4514 E Print 32 - constant, for SImode shifts as extract.
4515 e Print 64 - constant, for DImode rotates.
4516 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
4517 a floating point register emitted normally.
4518 I Invert a predicate register by adding 1.
4519 J Select the proper predicate register for a condition.
4520 j Select the inverse predicate register for a condition.
4521 O Append .acq for volatile load.
4522 P Postincrement of a MEM.
4523 Q Append .rel for volatile store.
4524 R Print .s .d or nothing for a single, double or no truncation.
4525 S Shift amount for shladd instruction.
4526 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
4527 for Intel assembler.
4528 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
4529 for Intel assembler.
4530 X A pair of floating point registers.
4531 r Print register name, or constant 0 as r0. HP compatibility for
4532 Linux kernel.
4533 v Print vector constant value as an 8-byte integer value. */
4535 void
4536 ia64_print_operand (FILE * file, rtx x, int code)
4538 const char *str;
4540 switch (code)
4542 case 0:
4543 /* Handled below. */
4544 break;
4546 case 'C':
4548 enum rtx_code c = swap_condition (GET_CODE (x));
4549 fputs (GET_RTX_NAME (c), file);
4550 return;
4553 case 'D':
4554 switch (GET_CODE (x))
4556 case NE:
4557 str = "neq";
4558 break;
4559 case UNORDERED:
4560 str = "unord";
4561 break;
4562 case ORDERED:
4563 str = "ord";
4564 break;
4565 case UNLT:
4566 str = "nge";
4567 break;
4568 case UNLE:
4569 str = "ngt";
4570 break;
4571 case UNGT:
4572 str = "nle";
4573 break;
4574 case UNGE:
4575 str = "nlt";
4576 break;
4577 default:
4578 str = GET_RTX_NAME (GET_CODE (x));
4579 break;
4581 fputs (str, file);
4582 return;
4584 case 'E':
4585 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
4586 return;
4588 case 'e':
4589 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
4590 return;
4592 case 'F':
4593 if (x == CONST0_RTX (GET_MODE (x)))
4594 str = reg_names [FR_REG (0)];
4595 else if (x == CONST1_RTX (GET_MODE (x)))
4596 str = reg_names [FR_REG (1)];
4597 else
4599 gcc_assert (GET_CODE (x) == REG);
4600 str = reg_names [REGNO (x)];
4602 fputs (str, file);
4603 return;
4605 case 'I':
4606 fputs (reg_names [REGNO (x) + 1], file);
4607 return;
4609 case 'J':
4610 case 'j':
4612 unsigned int regno = REGNO (XEXP (x, 0));
4613 if (GET_CODE (x) == EQ)
4614 regno += 1;
4615 if (code == 'j')
4616 regno ^= 1;
4617 fputs (reg_names [regno], file);
4619 return;
4621 case 'O':
4622 if (MEM_VOLATILE_P (x))
4623 fputs(".acq", file);
4624 return;
4626 case 'P':
4628 HOST_WIDE_INT value;
4630 switch (GET_CODE (XEXP (x, 0)))
4632 default:
4633 return;
4635 case POST_MODIFY:
4636 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
4637 if (GET_CODE (x) == CONST_INT)
4638 value = INTVAL (x);
4639 else
4641 gcc_assert (GET_CODE (x) == REG);
4642 fprintf (file, ", %s", reg_names[REGNO (x)]);
4643 return;
4645 break;
4647 case POST_INC:
4648 value = GET_MODE_SIZE (GET_MODE (x));
4649 break;
4651 case POST_DEC:
4652 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
4653 break;
4656 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
4657 return;
4660 case 'Q':
4661 if (MEM_VOLATILE_P (x))
4662 fputs(".rel", file);
4663 return;
4665 case 'R':
4666 if (x == CONST0_RTX (GET_MODE (x)))
4667 fputs(".s", file);
4668 else if (x == CONST1_RTX (GET_MODE (x)))
4669 fputs(".d", file);
4670 else if (x == CONST2_RTX (GET_MODE (x)))
4672 else
4673 output_operand_lossage ("invalid %%R value");
4674 return;
4676 case 'S':
4677 fprintf (file, "%d", exact_log2 (INTVAL (x)));
4678 return;
4680 case 'T':
4681 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4683 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
4684 return;
4686 break;
4688 case 'U':
4689 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4691 const char *prefix = "0x";
4692 if (INTVAL (x) & 0x80000000)
4694 fprintf (file, "0xffffffff");
4695 prefix = "";
4697 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
4698 return;
4700 break;
4702 case 'X':
4704 unsigned int regno = REGNO (x);
4705 fprintf (file, "%s, %s", reg_names [regno], reg_names [regno + 1]);
4707 return;
4709 case 'r':
4710 /* If this operand is the constant zero, write it as register zero.
4711 Any register, zero, or CONST_INT value is OK here. */
4712 if (GET_CODE (x) == REG)
4713 fputs (reg_names[REGNO (x)], file);
4714 else if (x == CONST0_RTX (GET_MODE (x)))
4715 fputs ("r0", file);
4716 else if (GET_CODE (x) == CONST_INT)
4717 output_addr_const (file, x);
4718 else
4719 output_operand_lossage ("invalid %%r value");
4720 return;
4722 case 'v':
4723 gcc_assert (GET_CODE (x) == CONST_VECTOR);
4724 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
4725 break;
4727 case '+':
4729 const char *which;
4731 /* For conditional branches, returns or calls, substitute
4732 sptk, dptk, dpnt, or spnt for %s. */
4733 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
4734 if (x)
4736 int pred_val = INTVAL (XEXP (x, 0));
4738 /* Guess top and bottom 10% statically predicted. */
4739 if (pred_val < REG_BR_PROB_BASE / 50
4740 && br_prob_note_reliable_p (x))
4741 which = ".spnt";
4742 else if (pred_val < REG_BR_PROB_BASE / 2)
4743 which = ".dpnt";
4744 else if (pred_val < REG_BR_PROB_BASE / 100 * 98
4745 || !br_prob_note_reliable_p (x))
4746 which = ".dptk";
4747 else
4748 which = ".sptk";
4750 else if (GET_CODE (current_output_insn) == CALL_INSN)
4751 which = ".sptk";
4752 else
4753 which = ".dptk";
4755 fputs (which, file);
4756 return;
4759 case ',':
4760 x = current_insn_predicate;
4761 if (x)
4763 unsigned int regno = REGNO (XEXP (x, 0));
4764 if (GET_CODE (x) == EQ)
4765 regno += 1;
4766 fprintf (file, "(%s) ", reg_names [regno]);
4768 return;
4770 default:
4771 output_operand_lossage ("ia64_print_operand: unknown code");
4772 return;
4775 switch (GET_CODE (x))
4777 /* This happens for the spill/restore instructions. */
4778 case POST_INC:
4779 case POST_DEC:
4780 case POST_MODIFY:
4781 x = XEXP (x, 0);
4782 /* ... fall through ... */
4784 case REG:
4785 fputs (reg_names [REGNO (x)], file);
4786 break;
4788 case MEM:
4790 rtx addr = XEXP (x, 0);
4791 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
4792 addr = XEXP (addr, 0);
4793 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
4794 break;
4797 default:
4798 output_addr_const (file, x);
4799 break;
4802 return;
4805 /* Compute a (partial) cost for rtx X. Return true if the complete
4806 cost has been computed, and false if subexpressions should be
4807 scanned. In either case, *TOTAL contains the cost result. */
4808 /* ??? This is incomplete. */
4810 static bool
4811 ia64_rtx_costs (rtx x, int code, int outer_code, int *total)
4813 switch (code)
4815 case CONST_INT:
4816 switch (outer_code)
4818 case SET:
4819 *total = satisfies_constraint_J (x) ? 0 : COSTS_N_INSNS (1);
4820 return true;
4821 case PLUS:
4822 if (satisfies_constraint_I (x))
4823 *total = 0;
4824 else if (satisfies_constraint_J (x))
4825 *total = 1;
4826 else
4827 *total = COSTS_N_INSNS (1);
4828 return true;
4829 default:
4830 if (satisfies_constraint_K (x) || satisfies_constraint_L (x))
4831 *total = 0;
4832 else
4833 *total = COSTS_N_INSNS (1);
4834 return true;
4837 case CONST_DOUBLE:
4838 *total = COSTS_N_INSNS (1);
4839 return true;
4841 case CONST:
4842 case SYMBOL_REF:
4843 case LABEL_REF:
4844 *total = COSTS_N_INSNS (3);
4845 return true;
4847 case MULT:
4848 /* For multiplies wider than HImode, we have to go to the FPU,
4849 which normally involves copies. Plus there's the latency
4850 of the multiply itself, and the latency of the instructions to
4851 transfer integer regs to FP regs. */
4852 /* ??? Check for FP mode. */
4853 if (GET_MODE_SIZE (GET_MODE (x)) > 2)
4854 *total = COSTS_N_INSNS (10);
4855 else
4856 *total = COSTS_N_INSNS (2);
4857 return true;
4859 case PLUS:
4860 case MINUS:
4861 case ASHIFT:
4862 case ASHIFTRT:
4863 case LSHIFTRT:
4864 *total = COSTS_N_INSNS (1);
4865 return true;
4867 case DIV:
4868 case UDIV:
4869 case MOD:
4870 case UMOD:
4871 /* We make divide expensive, so that divide-by-constant will be
4872 optimized to a multiply. */
4873 *total = COSTS_N_INSNS (60);
4874 return true;
4876 default:
4877 return false;
4881 /* Calculate the cost of moving data from a register in class FROM to
4882 one in class TO, using MODE. */
4885 ia64_register_move_cost (enum machine_mode mode, enum reg_class from,
4886 enum reg_class to)
4888 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
4889 if (to == ADDL_REGS)
4890 to = GR_REGS;
4891 if (from == ADDL_REGS)
4892 from = GR_REGS;
4894 /* All costs are symmetric, so reduce cases by putting the
4895 lower number class as the destination. */
4896 if (from < to)
4898 enum reg_class tmp = to;
4899 to = from, from = tmp;
4902 /* Moving from FR<->GR in XFmode must be more expensive than 2,
4903 so that we get secondary memory reloads. Between FR_REGS,
4904 we have to make this at least as expensive as MEMORY_MOVE_COST
4905 to avoid spectacularly poor register class preferencing. */
4906 if (mode == XFmode || mode == RFmode)
4908 if (to != GR_REGS || from != GR_REGS)
4909 return MEMORY_MOVE_COST (mode, to, 0);
4910 else
4911 return 3;
4914 switch (to)
4916 case PR_REGS:
4917 /* Moving between PR registers takes two insns. */
4918 if (from == PR_REGS)
4919 return 3;
4920 /* Moving between PR and anything but GR is impossible. */
4921 if (from != GR_REGS)
4922 return MEMORY_MOVE_COST (mode, to, 0);
4923 break;
4925 case BR_REGS:
4926 /* Moving between BR and anything but GR is impossible. */
4927 if (from != GR_REGS && from != GR_AND_BR_REGS)
4928 return MEMORY_MOVE_COST (mode, to, 0);
4929 break;
4931 case AR_I_REGS:
4932 case AR_M_REGS:
4933 /* Moving between AR and anything but GR is impossible. */
4934 if (from != GR_REGS)
4935 return MEMORY_MOVE_COST (mode, to, 0);
4936 break;
4938 case GR_REGS:
4939 case FR_REGS:
4940 case FP_REGS:
4941 case GR_AND_FR_REGS:
4942 case GR_AND_BR_REGS:
4943 case ALL_REGS:
4944 break;
4946 default:
4947 gcc_unreachable ();
4950 return 2;
4953 /* Implement PREFERRED_RELOAD_CLASS. Place additional restrictions on CLASS
4954 to use when copying X into that class. */
4956 enum reg_class
4957 ia64_preferred_reload_class (rtx x, enum reg_class class)
4959 switch (class)
4961 case FR_REGS:
4962 case FP_REGS:
4963 /* Don't allow volatile mem reloads into floating point registers.
4964 This is defined to force reload to choose the r/m case instead
4965 of the f/f case when reloading (set (reg fX) (mem/v)). */
4966 if (MEM_P (x) && MEM_VOLATILE_P (x))
4967 return NO_REGS;
4969 /* Force all unrecognized constants into the constant pool. */
4970 if (CONSTANT_P (x))
4971 return NO_REGS;
4972 break;
4974 case AR_M_REGS:
4975 case AR_I_REGS:
4976 if (!OBJECT_P (x))
4977 return NO_REGS;
4978 break;
4980 default:
4981 break;
4984 return class;
4987 /* This function returns the register class required for a secondary
4988 register when copying between one of the registers in CLASS, and X,
4989 using MODE. A return value of NO_REGS means that no secondary register
4990 is required. */
4992 enum reg_class
4993 ia64_secondary_reload_class (enum reg_class class,
4994 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
4996 int regno = -1;
4998 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
4999 regno = true_regnum (x);
5001 switch (class)
5003 case BR_REGS:
5004 case AR_M_REGS:
5005 case AR_I_REGS:
5006 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
5007 interaction. We end up with two pseudos with overlapping lifetimes
5008 both of which are equiv to the same constant, and both which need
5009 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
5010 changes depending on the path length, which means the qty_first_reg
5011 check in make_regs_eqv can give different answers at different times.
5012 At some point I'll probably need a reload_indi pattern to handle
5013 this.
5015 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
5016 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
5017 non-general registers for good measure. */
5018 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
5019 return GR_REGS;
5021 /* This is needed if a pseudo used as a call_operand gets spilled to a
5022 stack slot. */
5023 if (GET_CODE (x) == MEM)
5024 return GR_REGS;
5025 break;
5027 case FR_REGS:
5028 case FP_REGS:
5029 /* Need to go through general registers to get to other class regs. */
5030 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
5031 return GR_REGS;
5033 /* This can happen when a paradoxical subreg is an operand to the
5034 muldi3 pattern. */
5035 /* ??? This shouldn't be necessary after instruction scheduling is
5036 enabled, because paradoxical subregs are not accepted by
5037 register_operand when INSN_SCHEDULING is defined. Or alternatively,
5038 stop the paradoxical subreg stupidity in the *_operand functions
5039 in recog.c. */
5040 if (GET_CODE (x) == MEM
5041 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
5042 || GET_MODE (x) == QImode))
5043 return GR_REGS;
5045 /* This can happen because of the ior/and/etc patterns that accept FP
5046 registers as operands. If the third operand is a constant, then it
5047 needs to be reloaded into a FP register. */
5048 if (GET_CODE (x) == CONST_INT)
5049 return GR_REGS;
5051 /* This can happen because of register elimination in a muldi3 insn.
5052 E.g. `26107 * (unsigned long)&u'. */
5053 if (GET_CODE (x) == PLUS)
5054 return GR_REGS;
5055 break;
5057 case PR_REGS:
5058 /* ??? This happens if we cse/gcse a BImode value across a call,
5059 and the function has a nonlocal goto. This is because global
5060 does not allocate call crossing pseudos to hard registers when
5061 current_function_has_nonlocal_goto is true. This is relatively
5062 common for C++ programs that use exceptions. To reproduce,
5063 return NO_REGS and compile libstdc++. */
5064 if (GET_CODE (x) == MEM)
5065 return GR_REGS;
5067 /* This can happen when we take a BImode subreg of a DImode value,
5068 and that DImode value winds up in some non-GR register. */
5069 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
5070 return GR_REGS;
5071 break;
5073 default:
5074 break;
5077 return NO_REGS;
5081 /* Implement targetm.unspec_may_trap_p hook. */
5082 static int
5083 ia64_unspec_may_trap_p (const_rtx x, unsigned flags)
5085 if (GET_CODE (x) == UNSPEC)
5087 switch (XINT (x, 1))
5089 case UNSPEC_LDA:
5090 case UNSPEC_LDS:
5091 case UNSPEC_LDSA:
5092 case UNSPEC_LDCCLR:
5093 case UNSPEC_CHKACLR:
5094 case UNSPEC_CHKS:
5095 /* These unspecs are just wrappers. */
5096 return may_trap_p_1 (XVECEXP (x, 0, 0), flags);
5100 return default_unspec_may_trap_p (x, flags);
5104 /* Parse the -mfixed-range= option string. */
5106 static void
5107 fix_range (const char *const_str)
5109 int i, first, last;
5110 char *str, *dash, *comma;
5112 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
5113 REG2 are either register names or register numbers. The effect
5114 of this option is to mark the registers in the range from REG1 to
5115 REG2 as ``fixed'' so they won't be used by the compiler. This is
5116 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
5118 i = strlen (const_str);
5119 str = (char *) alloca (i + 1);
5120 memcpy (str, const_str, i + 1);
5122 while (1)
5124 dash = strchr (str, '-');
5125 if (!dash)
5127 warning (0, "value of -mfixed-range must have form REG1-REG2");
5128 return;
5130 *dash = '\0';
5132 comma = strchr (dash + 1, ',');
5133 if (comma)
5134 *comma = '\0';
5136 first = decode_reg_name (str);
5137 if (first < 0)
5139 warning (0, "unknown register name: %s", str);
5140 return;
5143 last = decode_reg_name (dash + 1);
5144 if (last < 0)
5146 warning (0, "unknown register name: %s", dash + 1);
5147 return;
5150 *dash = '-';
5152 if (first > last)
5154 warning (0, "%s-%s is an empty range", str, dash + 1);
5155 return;
5158 for (i = first; i <= last; ++i)
5159 fixed_regs[i] = call_used_regs[i] = 1;
5161 if (!comma)
5162 break;
5164 *comma = ',';
5165 str = comma + 1;
5169 /* Implement TARGET_HANDLE_OPTION. */
5171 static bool
5172 ia64_handle_option (size_t code, const char *arg, int value)
5174 switch (code)
5176 case OPT_mfixed_range_:
5177 fix_range (arg);
5178 return true;
5180 case OPT_mtls_size_:
5181 if (value != 14 && value != 22 && value != 64)
5182 error ("bad value %<%s%> for -mtls-size= switch", arg);
5183 return true;
5185 case OPT_mtune_:
5187 static struct pta
5189 const char *name; /* processor name or nickname. */
5190 enum processor_type processor;
5192 const processor_alias_table[] =
5194 {"itanium", PROCESSOR_ITANIUM},
5195 {"itanium1", PROCESSOR_ITANIUM},
5196 {"merced", PROCESSOR_ITANIUM},
5197 {"itanium2", PROCESSOR_ITANIUM2},
5198 {"mckinley", PROCESSOR_ITANIUM2},
5200 int const pta_size = ARRAY_SIZE (processor_alias_table);
5201 int i;
5203 for (i = 0; i < pta_size; i++)
5204 if (!strcmp (arg, processor_alias_table[i].name))
5206 ia64_tune = processor_alias_table[i].processor;
5207 break;
5209 if (i == pta_size)
5210 error ("bad value %<%s%> for -mtune= switch", arg);
5211 return true;
5214 default:
5215 return true;
5219 /* Implement OVERRIDE_OPTIONS. */
5221 void
5222 ia64_override_options (void)
5224 if (TARGET_AUTO_PIC)
5225 target_flags |= MASK_CONST_GP;
5227 if (TARGET_INLINE_SQRT == INL_MIN_LAT)
5229 warning (0, "not yet implemented: latency-optimized inline square root");
5230 TARGET_INLINE_SQRT = INL_MAX_THR;
5233 ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
5234 flag_schedule_insns_after_reload = 0;
5236 ia64_section_threshold = g_switch_set ? g_switch_value : IA64_DEFAULT_GVALUE;
5238 init_machine_status = ia64_init_machine_status;
5241 /* Initialize the record of emitted frame related registers. */
5243 void ia64_init_expanders (void)
5245 memset (&emitted_frame_related_regs, 0, sizeof (emitted_frame_related_regs));
5248 static struct machine_function *
5249 ia64_init_machine_status (void)
5251 return ggc_alloc_cleared (sizeof (struct machine_function));
5254 static enum attr_itanium_class ia64_safe_itanium_class (rtx);
5255 static enum attr_type ia64_safe_type (rtx);
5257 static enum attr_itanium_class
5258 ia64_safe_itanium_class (rtx insn)
5260 if (recog_memoized (insn) >= 0)
5261 return get_attr_itanium_class (insn);
5262 else
5263 return ITANIUM_CLASS_UNKNOWN;
5266 static enum attr_type
5267 ia64_safe_type (rtx insn)
5269 if (recog_memoized (insn) >= 0)
5270 return get_attr_type (insn);
5271 else
5272 return TYPE_UNKNOWN;
5275 /* The following collection of routines emit instruction group stop bits as
5276 necessary to avoid dependencies. */
5278 /* Need to track some additional registers as far as serialization is
5279 concerned so we can properly handle br.call and br.ret. We could
5280 make these registers visible to gcc, but since these registers are
5281 never explicitly used in gcc generated code, it seems wasteful to
5282 do so (plus it would make the call and return patterns needlessly
5283 complex). */
5284 #define REG_RP (BR_REG (0))
5285 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
5286 /* This is used for volatile asms which may require a stop bit immediately
5287 before and after them. */
5288 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
5289 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
5290 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
5292 /* For each register, we keep track of how it has been written in the
5293 current instruction group.
5295 If a register is written unconditionally (no qualifying predicate),
5296 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
5298 If a register is written if its qualifying predicate P is true, we
5299 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
5300 may be written again by the complement of P (P^1) and when this happens,
5301 WRITE_COUNT gets set to 2.
5303 The result of this is that whenever an insn attempts to write a register
5304 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
5306 If a predicate register is written by a floating-point insn, we set
5307 WRITTEN_BY_FP to true.
5309 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
5310 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
5312 #if GCC_VERSION >= 4000
5313 #define RWS_FIELD_TYPE __extension__ unsigned short
5314 #else
5315 #define RWS_FIELD_TYPE unsigned int
5316 #endif
5317 struct reg_write_state
5319 RWS_FIELD_TYPE write_count : 2;
5320 RWS_FIELD_TYPE first_pred : 10;
5321 RWS_FIELD_TYPE written_by_fp : 1;
5322 RWS_FIELD_TYPE written_by_and : 1;
5323 RWS_FIELD_TYPE written_by_or : 1;
5326 /* Cumulative info for the current instruction group. */
5327 struct reg_write_state rws_sum[NUM_REGS];
5328 #ifdef ENABLE_CHECKING
5329 /* Bitmap whether a register has been written in the current insn. */
5330 HARD_REG_ELT_TYPE rws_insn[(NUM_REGS + HOST_BITS_PER_WIDEST_FAST_INT - 1)
5331 / HOST_BITS_PER_WIDEST_FAST_INT];
5333 static inline void
5334 rws_insn_set (int regno)
5336 gcc_assert (!TEST_HARD_REG_BIT (rws_insn, regno));
5337 SET_HARD_REG_BIT (rws_insn, regno);
5340 static inline int
5341 rws_insn_test (int regno)
5343 return TEST_HARD_REG_BIT (rws_insn, regno);
5345 #else
5346 /* When not checking, track just REG_AR_CFM and REG_VOLATILE. */
5347 unsigned char rws_insn[2];
5349 static inline void
5350 rws_insn_set (int regno)
5352 if (regno == REG_AR_CFM)
5353 rws_insn[0] = 1;
5354 else if (regno == REG_VOLATILE)
5355 rws_insn[1] = 1;
5358 static inline int
5359 rws_insn_test (int regno)
5361 if (regno == REG_AR_CFM)
5362 return rws_insn[0];
5363 if (regno == REG_VOLATILE)
5364 return rws_insn[1];
5365 return 0;
5367 #endif
5369 /* Indicates whether this is the first instruction after a stop bit,
5370 in which case we don't need another stop bit. Without this,
5371 ia64_variable_issue will die when scheduling an alloc. */
5372 static int first_instruction;
5374 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
5375 RTL for one instruction. */
5376 struct reg_flags
5378 unsigned int is_write : 1; /* Is register being written? */
5379 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
5380 unsigned int is_branch : 1; /* Is register used as part of a branch? */
5381 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
5382 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
5383 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
5386 static void rws_update (int, struct reg_flags, int);
5387 static int rws_access_regno (int, struct reg_flags, int);
5388 static int rws_access_reg (rtx, struct reg_flags, int);
5389 static void update_set_flags (rtx, struct reg_flags *);
5390 static int set_src_needs_barrier (rtx, struct reg_flags, int);
5391 static int rtx_needs_barrier (rtx, struct reg_flags, int);
5392 static void init_insn_group_barriers (void);
5393 static int group_barrier_needed (rtx);
5394 static int safe_group_barrier_needed (rtx);
5395 static int in_safe_group_barrier;
5397 /* Update *RWS for REGNO, which is being written by the current instruction,
5398 with predicate PRED, and associated register flags in FLAGS. */
5400 static void
5401 rws_update (int regno, struct reg_flags flags, int pred)
5403 if (pred)
5404 rws_sum[regno].write_count++;
5405 else
5406 rws_sum[regno].write_count = 2;
5407 rws_sum[regno].written_by_fp |= flags.is_fp;
5408 /* ??? Not tracking and/or across differing predicates. */
5409 rws_sum[regno].written_by_and = flags.is_and;
5410 rws_sum[regno].written_by_or = flags.is_or;
5411 rws_sum[regno].first_pred = pred;
5414 /* Handle an access to register REGNO of type FLAGS using predicate register
5415 PRED. Update rws_sum array. Return 1 if this access creates
5416 a dependency with an earlier instruction in the same group. */
5418 static int
5419 rws_access_regno (int regno, struct reg_flags flags, int pred)
5421 int need_barrier = 0;
5423 gcc_assert (regno < NUM_REGS);
5425 if (! PR_REGNO_P (regno))
5426 flags.is_and = flags.is_or = 0;
5428 if (flags.is_write)
5430 int write_count;
5432 rws_insn_set (regno);
5433 write_count = rws_sum[regno].write_count;
5435 switch (write_count)
5437 case 0:
5438 /* The register has not been written yet. */
5439 if (!in_safe_group_barrier)
5440 rws_update (regno, flags, pred);
5441 break;
5443 case 1:
5444 /* The register has been written via a predicate. If this is
5445 not a complementary predicate, then we need a barrier. */
5446 /* ??? This assumes that P and P+1 are always complementary
5447 predicates for P even. */
5448 if (flags.is_and && rws_sum[regno].written_by_and)
5450 else if (flags.is_or && rws_sum[regno].written_by_or)
5452 else if ((rws_sum[regno].first_pred ^ 1) != pred)
5453 need_barrier = 1;
5454 if (!in_safe_group_barrier)
5455 rws_update (regno, flags, pred);
5456 break;
5458 case 2:
5459 /* The register has been unconditionally written already. We
5460 need a barrier. */
5461 if (flags.is_and && rws_sum[regno].written_by_and)
5463 else if (flags.is_or && rws_sum[regno].written_by_or)
5465 else
5466 need_barrier = 1;
5467 if (!in_safe_group_barrier)
5469 rws_sum[regno].written_by_and = flags.is_and;
5470 rws_sum[regno].written_by_or = flags.is_or;
5472 break;
5474 default:
5475 gcc_unreachable ();
5478 else
5480 if (flags.is_branch)
5482 /* Branches have several RAW exceptions that allow to avoid
5483 barriers. */
5485 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
5486 /* RAW dependencies on branch regs are permissible as long
5487 as the writer is a non-branch instruction. Since we
5488 never generate code that uses a branch register written
5489 by a branch instruction, handling this case is
5490 easy. */
5491 return 0;
5493 if (REGNO_REG_CLASS (regno) == PR_REGS
5494 && ! rws_sum[regno].written_by_fp)
5495 /* The predicates of a branch are available within the
5496 same insn group as long as the predicate was written by
5497 something other than a floating-point instruction. */
5498 return 0;
5501 if (flags.is_and && rws_sum[regno].written_by_and)
5502 return 0;
5503 if (flags.is_or && rws_sum[regno].written_by_or)
5504 return 0;
5506 switch (rws_sum[regno].write_count)
5508 case 0:
5509 /* The register has not been written yet. */
5510 break;
5512 case 1:
5513 /* The register has been written via a predicate. If this is
5514 not a complementary predicate, then we need a barrier. */
5515 /* ??? This assumes that P and P+1 are always complementary
5516 predicates for P even. */
5517 if ((rws_sum[regno].first_pred ^ 1) != pred)
5518 need_barrier = 1;
5519 break;
5521 case 2:
5522 /* The register has been unconditionally written already. We
5523 need a barrier. */
5524 need_barrier = 1;
5525 break;
5527 default:
5528 gcc_unreachable ();
5532 return need_barrier;
5535 static int
5536 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
5538 int regno = REGNO (reg);
5539 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
5541 if (n == 1)
5542 return rws_access_regno (regno, flags, pred);
5543 else
5545 int need_barrier = 0;
5546 while (--n >= 0)
5547 need_barrier |= rws_access_regno (regno + n, flags, pred);
5548 return need_barrier;
5552 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
5553 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
5555 static void
5556 update_set_flags (rtx x, struct reg_flags *pflags)
5558 rtx src = SET_SRC (x);
5560 switch (GET_CODE (src))
5562 case CALL:
5563 return;
5565 case IF_THEN_ELSE:
5566 /* There are four cases here:
5567 (1) The destination is (pc), in which case this is a branch,
5568 nothing here applies.
5569 (2) The destination is ar.lc, in which case this is a
5570 doloop_end_internal,
5571 (3) The destination is an fp register, in which case this is
5572 an fselect instruction.
5573 (4) The condition has (unspec [(reg)] UNSPEC_LDC), in which case
5574 this is a check load.
5575 In all cases, nothing we do in this function applies. */
5576 return;
5578 default:
5579 if (COMPARISON_P (src)
5580 && SCALAR_FLOAT_MODE_P (GET_MODE (XEXP (src, 0))))
5581 /* Set pflags->is_fp to 1 so that we know we're dealing
5582 with a floating point comparison when processing the
5583 destination of the SET. */
5584 pflags->is_fp = 1;
5586 /* Discover if this is a parallel comparison. We only handle
5587 and.orcm and or.andcm at present, since we must retain a
5588 strict inverse on the predicate pair. */
5589 else if (GET_CODE (src) == AND)
5590 pflags->is_and = 1;
5591 else if (GET_CODE (src) == IOR)
5592 pflags->is_or = 1;
5594 break;
5598 /* Subroutine of rtx_needs_barrier; this function determines whether the
5599 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
5600 are as in rtx_needs_barrier. COND is an rtx that holds the condition
5601 for this insn. */
5603 static int
5604 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred)
5606 int need_barrier = 0;
5607 rtx dst;
5608 rtx src = SET_SRC (x);
5610 if (GET_CODE (src) == CALL)
5611 /* We don't need to worry about the result registers that
5612 get written by subroutine call. */
5613 return rtx_needs_barrier (src, flags, pred);
5614 else if (SET_DEST (x) == pc_rtx)
5616 /* X is a conditional branch. */
5617 /* ??? This seems redundant, as the caller sets this bit for
5618 all JUMP_INSNs. */
5619 if (!ia64_spec_check_src_p (src))
5620 flags.is_branch = 1;
5621 return rtx_needs_barrier (src, flags, pred);
5624 if (ia64_spec_check_src_p (src))
5625 /* Avoid checking one register twice (in condition
5626 and in 'then' section) for ldc pattern. */
5628 gcc_assert (REG_P (XEXP (src, 2)));
5629 need_barrier = rtx_needs_barrier (XEXP (src, 2), flags, pred);
5631 /* We process MEM below. */
5632 src = XEXP (src, 1);
5635 need_barrier |= rtx_needs_barrier (src, flags, pred);
5637 dst = SET_DEST (x);
5638 if (GET_CODE (dst) == ZERO_EXTRACT)
5640 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
5641 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
5643 return need_barrier;
5646 /* Handle an access to rtx X of type FLAGS using predicate register
5647 PRED. Return 1 if this access creates a dependency with an earlier
5648 instruction in the same group. */
5650 static int
5651 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
5653 int i, j;
5654 int is_complemented = 0;
5655 int need_barrier = 0;
5656 const char *format_ptr;
5657 struct reg_flags new_flags;
5658 rtx cond;
5660 if (! x)
5661 return 0;
5663 new_flags = flags;
5665 switch (GET_CODE (x))
5667 case SET:
5668 update_set_flags (x, &new_flags);
5669 need_barrier = set_src_needs_barrier (x, new_flags, pred);
5670 if (GET_CODE (SET_SRC (x)) != CALL)
5672 new_flags.is_write = 1;
5673 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
5675 break;
5677 case CALL:
5678 new_flags.is_write = 0;
5679 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5681 /* Avoid multiple register writes, in case this is a pattern with
5682 multiple CALL rtx. This avoids a failure in rws_access_reg. */
5683 if (! flags.is_sibcall && ! rws_insn_test (REG_AR_CFM))
5685 new_flags.is_write = 1;
5686 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
5687 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
5688 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5690 break;
5692 case COND_EXEC:
5693 /* X is a predicated instruction. */
5695 cond = COND_EXEC_TEST (x);
5696 gcc_assert (!pred);
5697 need_barrier = rtx_needs_barrier (cond, flags, 0);
5699 if (GET_CODE (cond) == EQ)
5700 is_complemented = 1;
5701 cond = XEXP (cond, 0);
5702 gcc_assert (GET_CODE (cond) == REG
5703 && REGNO_REG_CLASS (REGNO (cond)) == PR_REGS);
5704 pred = REGNO (cond);
5705 if (is_complemented)
5706 ++pred;
5708 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
5709 return need_barrier;
5711 case CLOBBER:
5712 case USE:
5713 /* Clobber & use are for earlier compiler-phases only. */
5714 break;
5716 case ASM_OPERANDS:
5717 case ASM_INPUT:
5718 /* We always emit stop bits for traditional asms. We emit stop bits
5719 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
5720 if (GET_CODE (x) != ASM_OPERANDS
5721 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
5723 /* Avoid writing the register multiple times if we have multiple
5724 asm outputs. This avoids a failure in rws_access_reg. */
5725 if (! rws_insn_test (REG_VOLATILE))
5727 new_flags.is_write = 1;
5728 rws_access_regno (REG_VOLATILE, new_flags, pred);
5730 return 1;
5733 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
5734 We cannot just fall through here since then we would be confused
5735 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
5736 traditional asms unlike their normal usage. */
5738 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
5739 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
5740 need_barrier = 1;
5741 break;
5743 case PARALLEL:
5744 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5746 rtx pat = XVECEXP (x, 0, i);
5747 switch (GET_CODE (pat))
5749 case SET:
5750 update_set_flags (pat, &new_flags);
5751 need_barrier |= set_src_needs_barrier (pat, new_flags, pred);
5752 break;
5754 case USE:
5755 case CALL:
5756 case ASM_OPERANDS:
5757 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5758 break;
5760 case CLOBBER:
5761 case RETURN:
5762 break;
5764 default:
5765 gcc_unreachable ();
5768 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5770 rtx pat = XVECEXP (x, 0, i);
5771 if (GET_CODE (pat) == SET)
5773 if (GET_CODE (SET_SRC (pat)) != CALL)
5775 new_flags.is_write = 1;
5776 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
5777 pred);
5780 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
5781 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5783 break;
5785 case SUBREG:
5786 need_barrier |= rtx_needs_barrier (SUBREG_REG (x), flags, pred);
5787 break;
5788 case REG:
5789 if (REGNO (x) == AR_UNAT_REGNUM)
5791 for (i = 0; i < 64; ++i)
5792 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
5794 else
5795 need_barrier = rws_access_reg (x, flags, pred);
5796 break;
5798 case MEM:
5799 /* Find the regs used in memory address computation. */
5800 new_flags.is_write = 0;
5801 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5802 break;
5804 case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR:
5805 case SYMBOL_REF: case LABEL_REF: case CONST:
5806 break;
5808 /* Operators with side-effects. */
5809 case POST_INC: case POST_DEC:
5810 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
5812 new_flags.is_write = 0;
5813 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5814 new_flags.is_write = 1;
5815 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5816 break;
5818 case POST_MODIFY:
5819 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
5821 new_flags.is_write = 0;
5822 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5823 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5824 new_flags.is_write = 1;
5825 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5826 break;
5828 /* Handle common unary and binary ops for efficiency. */
5829 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
5830 case MOD: case UDIV: case UMOD: case AND: case IOR:
5831 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
5832 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
5833 case NE: case EQ: case GE: case GT: case LE:
5834 case LT: case GEU: case GTU: case LEU: case LTU:
5835 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5836 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5837 break;
5839 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
5840 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
5841 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
5842 case SQRT: case FFS: case POPCOUNT:
5843 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5844 break;
5846 case VEC_SELECT:
5847 /* VEC_SELECT's second argument is a PARALLEL with integers that
5848 describe the elements selected. On ia64, those integers are
5849 always constants. Avoid walking the PARALLEL so that we don't
5850 get confused with "normal" parallels and then die. */
5851 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5852 break;
5854 case UNSPEC:
5855 switch (XINT (x, 1))
5857 case UNSPEC_LTOFF_DTPMOD:
5858 case UNSPEC_LTOFF_DTPREL:
5859 case UNSPEC_DTPREL:
5860 case UNSPEC_LTOFF_TPREL:
5861 case UNSPEC_TPREL:
5862 case UNSPEC_PRED_REL_MUTEX:
5863 case UNSPEC_PIC_CALL:
5864 case UNSPEC_MF:
5865 case UNSPEC_FETCHADD_ACQ:
5866 case UNSPEC_BSP_VALUE:
5867 case UNSPEC_FLUSHRS:
5868 case UNSPEC_BUNDLE_SELECTOR:
5869 break;
5871 case UNSPEC_GR_SPILL:
5872 case UNSPEC_GR_RESTORE:
5874 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
5875 HOST_WIDE_INT bit = (offset >> 3) & 63;
5877 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5878 new_flags.is_write = (XINT (x, 1) == UNSPEC_GR_SPILL);
5879 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
5880 new_flags, pred);
5881 break;
5884 case UNSPEC_FR_SPILL:
5885 case UNSPEC_FR_RESTORE:
5886 case UNSPEC_GETF_EXP:
5887 case UNSPEC_SETF_EXP:
5888 case UNSPEC_ADDP4:
5889 case UNSPEC_FR_SQRT_RECIP_APPROX:
5890 case UNSPEC_FR_SQRT_RECIP_APPROX_RES:
5891 case UNSPEC_LDA:
5892 case UNSPEC_LDS:
5893 case UNSPEC_LDSA:
5894 case UNSPEC_CHKACLR:
5895 case UNSPEC_CHKS:
5896 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5897 break;
5899 case UNSPEC_FR_RECIP_APPROX:
5900 case UNSPEC_SHRP:
5901 case UNSPEC_COPYSIGN:
5902 case UNSPEC_FR_RECIP_APPROX_RES:
5903 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5904 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5905 break;
5907 case UNSPEC_CMPXCHG_ACQ:
5908 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5909 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
5910 break;
5912 default:
5913 gcc_unreachable ();
5915 break;
5917 case UNSPEC_VOLATILE:
5918 switch (XINT (x, 1))
5920 case UNSPECV_ALLOC:
5921 /* Alloc must always be the first instruction of a group.
5922 We force this by always returning true. */
5923 /* ??? We might get better scheduling if we explicitly check for
5924 input/local/output register dependencies, and modify the
5925 scheduler so that alloc is always reordered to the start of
5926 the current group. We could then eliminate all of the
5927 first_instruction code. */
5928 rws_access_regno (AR_PFS_REGNUM, flags, pred);
5930 new_flags.is_write = 1;
5931 rws_access_regno (REG_AR_CFM, new_flags, pred);
5932 return 1;
5934 case UNSPECV_SET_BSP:
5935 need_barrier = 1;
5936 break;
5938 case UNSPECV_BLOCKAGE:
5939 case UNSPECV_INSN_GROUP_BARRIER:
5940 case UNSPECV_BREAK:
5941 case UNSPECV_PSAC_ALL:
5942 case UNSPECV_PSAC_NORMAL:
5943 return 0;
5945 default:
5946 gcc_unreachable ();
5948 break;
5950 case RETURN:
5951 new_flags.is_write = 0;
5952 need_barrier = rws_access_regno (REG_RP, flags, pred);
5953 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
5955 new_flags.is_write = 1;
5956 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5957 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5958 break;
5960 default:
5961 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
5962 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5963 switch (format_ptr[i])
5965 case '0': /* unused field */
5966 case 'i': /* integer */
5967 case 'n': /* note */
5968 case 'w': /* wide integer */
5969 case 's': /* pointer to string */
5970 case 'S': /* optional pointer to string */
5971 break;
5973 case 'e':
5974 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
5975 need_barrier = 1;
5976 break;
5978 case 'E':
5979 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
5980 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
5981 need_barrier = 1;
5982 break;
5984 default:
5985 gcc_unreachable ();
5987 break;
5989 return need_barrier;
5992 /* Clear out the state for group_barrier_needed at the start of a
5993 sequence of insns. */
5995 static void
5996 init_insn_group_barriers (void)
5998 memset (rws_sum, 0, sizeof (rws_sum));
5999 first_instruction = 1;
6002 /* Given the current state, determine whether a group barrier (a stop bit) is
6003 necessary before INSN. Return nonzero if so. This modifies the state to
6004 include the effects of INSN as a side-effect. */
6006 static int
6007 group_barrier_needed (rtx insn)
6009 rtx pat;
6010 int need_barrier = 0;
6011 struct reg_flags flags;
6013 memset (&flags, 0, sizeof (flags));
6014 switch (GET_CODE (insn))
6016 case NOTE:
6017 break;
6019 case BARRIER:
6020 /* A barrier doesn't imply an instruction group boundary. */
6021 break;
6023 case CODE_LABEL:
6024 memset (rws_insn, 0, sizeof (rws_insn));
6025 return 1;
6027 case CALL_INSN:
6028 flags.is_branch = 1;
6029 flags.is_sibcall = SIBLING_CALL_P (insn);
6030 memset (rws_insn, 0, sizeof (rws_insn));
6032 /* Don't bundle a call following another call. */
6033 if ((pat = prev_active_insn (insn))
6034 && GET_CODE (pat) == CALL_INSN)
6036 need_barrier = 1;
6037 break;
6040 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
6041 break;
6043 case JUMP_INSN:
6044 if (!ia64_spec_check_p (insn))
6045 flags.is_branch = 1;
6047 /* Don't bundle a jump following a call. */
6048 if ((pat = prev_active_insn (insn))
6049 && GET_CODE (pat) == CALL_INSN)
6051 need_barrier = 1;
6052 break;
6054 /* FALLTHRU */
6056 case INSN:
6057 if (GET_CODE (PATTERN (insn)) == USE
6058 || GET_CODE (PATTERN (insn)) == CLOBBER)
6059 /* Don't care about USE and CLOBBER "insns"---those are used to
6060 indicate to the optimizer that it shouldn't get rid of
6061 certain operations. */
6062 break;
6064 pat = PATTERN (insn);
6066 /* Ug. Hack hacks hacked elsewhere. */
6067 switch (recog_memoized (insn))
6069 /* We play dependency tricks with the epilogue in order
6070 to get proper schedules. Undo this for dv analysis. */
6071 case CODE_FOR_epilogue_deallocate_stack:
6072 case CODE_FOR_prologue_allocate_stack:
6073 pat = XVECEXP (pat, 0, 0);
6074 break;
6076 /* The pattern we use for br.cloop confuses the code above.
6077 The second element of the vector is representative. */
6078 case CODE_FOR_doloop_end_internal:
6079 pat = XVECEXP (pat, 0, 1);
6080 break;
6082 /* Doesn't generate code. */
6083 case CODE_FOR_pred_rel_mutex:
6084 case CODE_FOR_prologue_use:
6085 return 0;
6087 default:
6088 break;
6091 memset (rws_insn, 0, sizeof (rws_insn));
6092 need_barrier = rtx_needs_barrier (pat, flags, 0);
6094 /* Check to see if the previous instruction was a volatile
6095 asm. */
6096 if (! need_barrier)
6097 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
6098 break;
6100 default:
6101 gcc_unreachable ();
6104 if (first_instruction && INSN_P (insn)
6105 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
6106 && GET_CODE (PATTERN (insn)) != USE
6107 && GET_CODE (PATTERN (insn)) != CLOBBER)
6109 need_barrier = 0;
6110 first_instruction = 0;
6113 return need_barrier;
6116 /* Like group_barrier_needed, but do not clobber the current state. */
6118 static int
6119 safe_group_barrier_needed (rtx insn)
6121 int saved_first_instruction;
6122 int t;
6124 saved_first_instruction = first_instruction;
6125 in_safe_group_barrier = 1;
6127 t = group_barrier_needed (insn);
6129 first_instruction = saved_first_instruction;
6130 in_safe_group_barrier = 0;
6132 return t;
6135 /* Scan the current function and insert stop bits as necessary to
6136 eliminate dependencies. This function assumes that a final
6137 instruction scheduling pass has been run which has already
6138 inserted most of the necessary stop bits. This function only
6139 inserts new ones at basic block boundaries, since these are
6140 invisible to the scheduler. */
6142 static void
6143 emit_insn_group_barriers (FILE *dump)
6145 rtx insn;
6146 rtx last_label = 0;
6147 int insns_since_last_label = 0;
6149 init_insn_group_barriers ();
6151 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6153 if (GET_CODE (insn) == CODE_LABEL)
6155 if (insns_since_last_label)
6156 last_label = insn;
6157 insns_since_last_label = 0;
6159 else if (GET_CODE (insn) == NOTE
6160 && NOTE_KIND (insn) == NOTE_INSN_BASIC_BLOCK)
6162 if (insns_since_last_label)
6163 last_label = insn;
6164 insns_since_last_label = 0;
6166 else if (GET_CODE (insn) == INSN
6167 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
6168 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
6170 init_insn_group_barriers ();
6171 last_label = 0;
6173 else if (INSN_P (insn))
6175 insns_since_last_label = 1;
6177 if (group_barrier_needed (insn))
6179 if (last_label)
6181 if (dump)
6182 fprintf (dump, "Emitting stop before label %d\n",
6183 INSN_UID (last_label));
6184 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
6185 insn = last_label;
6187 init_insn_group_barriers ();
6188 last_label = 0;
6195 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
6196 This function has to emit all necessary group barriers. */
6198 static void
6199 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
6201 rtx insn;
6203 init_insn_group_barriers ();
6205 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6207 if (GET_CODE (insn) == BARRIER)
6209 rtx last = prev_active_insn (insn);
6211 if (! last)
6212 continue;
6213 if (GET_CODE (last) == JUMP_INSN
6214 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
6215 last = prev_active_insn (last);
6216 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
6217 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
6219 init_insn_group_barriers ();
6221 else if (INSN_P (insn))
6223 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
6224 init_insn_group_barriers ();
6225 else if (group_barrier_needed (insn))
6227 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
6228 init_insn_group_barriers ();
6229 group_barrier_needed (insn);
6237 /* Instruction scheduling support. */
6239 #define NR_BUNDLES 10
6241 /* A list of names of all available bundles. */
6243 static const char *bundle_name [NR_BUNDLES] =
6245 ".mii",
6246 ".mmi",
6247 ".mfi",
6248 ".mmf",
6249 #if NR_BUNDLES == 10
6250 ".bbb",
6251 ".mbb",
6252 #endif
6253 ".mib",
6254 ".mmb",
6255 ".mfb",
6256 ".mlx"
6259 /* Nonzero if we should insert stop bits into the schedule. */
6261 int ia64_final_schedule = 0;
6263 /* Codes of the corresponding queried units: */
6265 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
6266 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
6268 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
6269 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
6271 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
6273 /* The following variable value is an insn group barrier. */
6275 static rtx dfa_stop_insn;
6277 /* The following variable value is the last issued insn. */
6279 static rtx last_scheduled_insn;
6281 /* The following variable value is size of the DFA state. */
6283 static size_t dfa_state_size;
6285 /* The following variable value is pointer to a DFA state used as
6286 temporary variable. */
6288 static state_t temp_dfa_state = NULL;
6290 /* The following variable value is DFA state after issuing the last
6291 insn. */
6293 static state_t prev_cycle_state = NULL;
6295 /* The following array element values are TRUE if the corresponding
6296 insn requires to add stop bits before it. */
6298 static char *stops_p = NULL;
6300 /* The following array element values are ZERO for non-speculative
6301 instructions and hold corresponding speculation check number for
6302 speculative instructions. */
6303 static int *spec_check_no = NULL;
6305 /* Size of spec_check_no array. */
6306 static int max_uid = 0;
6308 /* The following variable is used to set up the mentioned above array. */
6310 static int stop_before_p = 0;
6312 /* The following variable value is length of the arrays `clocks' and
6313 `add_cycles'. */
6315 static int clocks_length;
6317 /* The following array element values are cycles on which the
6318 corresponding insn will be issued. The array is used only for
6319 Itanium1. */
6321 static int *clocks;
6323 /* The following array element values are numbers of cycles should be
6324 added to improve insn scheduling for MM_insns for Itanium1. */
6326 static int *add_cycles;
6328 /* The following variable value is number of data speculations in progress. */
6329 static int pending_data_specs = 0;
6331 static rtx ia64_single_set (rtx);
6332 static void ia64_emit_insn_before (rtx, rtx);
6334 /* Map a bundle number to its pseudo-op. */
6336 const char *
6337 get_bundle_name (int b)
6339 return bundle_name[b];
6343 /* Return the maximum number of instructions a cpu can issue. */
6345 static int
6346 ia64_issue_rate (void)
6348 return 6;
6351 /* Helper function - like single_set, but look inside COND_EXEC. */
6353 static rtx
6354 ia64_single_set (rtx insn)
6356 rtx x = PATTERN (insn), ret;
6357 if (GET_CODE (x) == COND_EXEC)
6358 x = COND_EXEC_CODE (x);
6359 if (GET_CODE (x) == SET)
6360 return x;
6362 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
6363 Although they are not classical single set, the second set is there just
6364 to protect it from moving past FP-relative stack accesses. */
6365 switch (recog_memoized (insn))
6367 case CODE_FOR_prologue_allocate_stack:
6368 case CODE_FOR_epilogue_deallocate_stack:
6369 ret = XVECEXP (x, 0, 0);
6370 break;
6372 default:
6373 ret = single_set_2 (insn, x);
6374 break;
6377 return ret;
6380 /* Adjust the cost of a scheduling dependency. Return the new cost of
6381 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
6383 static int
6384 ia64_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
6386 enum attr_itanium_class dep_class;
6387 enum attr_itanium_class insn_class;
6389 if (REG_NOTE_KIND (link) != REG_DEP_OUTPUT)
6390 return cost;
6392 insn_class = ia64_safe_itanium_class (insn);
6393 dep_class = ia64_safe_itanium_class (dep_insn);
6394 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
6395 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
6396 return 0;
6398 return cost;
6401 /* Like emit_insn_before, but skip cycle_display notes.
6402 ??? When cycle display notes are implemented, update this. */
6404 static void
6405 ia64_emit_insn_before (rtx insn, rtx before)
6407 emit_insn_before (insn, before);
6410 /* The following function marks insns who produce addresses for load
6411 and store insns. Such insns will be placed into M slots because it
6412 decrease latency time for Itanium1 (see function
6413 `ia64_produce_address_p' and the DFA descriptions). */
6415 static void
6416 ia64_dependencies_evaluation_hook (rtx head, rtx tail)
6418 rtx insn, next, next_tail;
6420 /* Before reload, which_alternative is not set, which means that
6421 ia64_safe_itanium_class will produce wrong results for (at least)
6422 move instructions. */
6423 if (!reload_completed)
6424 return;
6426 next_tail = NEXT_INSN (tail);
6427 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6428 if (INSN_P (insn))
6429 insn->call = 0;
6430 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6431 if (INSN_P (insn)
6432 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
6434 sd_iterator_def sd_it;
6435 dep_t dep;
6436 bool has_mem_op_consumer_p = false;
6438 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
6440 enum attr_itanium_class c;
6442 if (DEP_TYPE (dep) != REG_DEP_TRUE)
6443 continue;
6445 next = DEP_CON (dep);
6446 c = ia64_safe_itanium_class (next);
6447 if ((c == ITANIUM_CLASS_ST
6448 || c == ITANIUM_CLASS_STF)
6449 && ia64_st_address_bypass_p (insn, next))
6451 has_mem_op_consumer_p = true;
6452 break;
6454 else if ((c == ITANIUM_CLASS_LD
6455 || c == ITANIUM_CLASS_FLD
6456 || c == ITANIUM_CLASS_FLDP)
6457 && ia64_ld_address_bypass_p (insn, next))
6459 has_mem_op_consumer_p = true;
6460 break;
6464 insn->call = has_mem_op_consumer_p;
6468 /* We're beginning a new block. Initialize data structures as necessary. */
6470 static void
6471 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
6472 int sched_verbose ATTRIBUTE_UNUSED,
6473 int max_ready ATTRIBUTE_UNUSED)
6475 #ifdef ENABLE_CHECKING
6476 rtx insn;
6478 if (reload_completed)
6479 for (insn = NEXT_INSN (current_sched_info->prev_head);
6480 insn != current_sched_info->next_tail;
6481 insn = NEXT_INSN (insn))
6482 gcc_assert (!SCHED_GROUP_P (insn));
6483 #endif
6484 last_scheduled_insn = NULL_RTX;
6485 init_insn_group_barriers ();
6488 /* We're beginning a scheduling pass. Check assertion. */
6490 static void
6491 ia64_sched_init_global (FILE *dump ATTRIBUTE_UNUSED,
6492 int sched_verbose ATTRIBUTE_UNUSED,
6493 int max_ready ATTRIBUTE_UNUSED)
6495 gcc_assert (!pending_data_specs);
6498 /* Scheduling pass is now finished. Free/reset static variable. */
6499 static void
6500 ia64_sched_finish_global (FILE *dump ATTRIBUTE_UNUSED,
6501 int sched_verbose ATTRIBUTE_UNUSED)
6503 free (spec_check_no);
6504 spec_check_no = 0;
6505 max_uid = 0;
6508 /* We are about to being issuing insns for this clock cycle.
6509 Override the default sort algorithm to better slot instructions. */
6511 static int
6512 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
6513 int *pn_ready, int clock_var ATTRIBUTE_UNUSED,
6514 int reorder_type)
6516 int n_asms;
6517 int n_ready = *pn_ready;
6518 rtx *e_ready = ready + n_ready;
6519 rtx *insnp;
6521 if (sched_verbose)
6522 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
6524 if (reorder_type == 0)
6526 /* First, move all USEs, CLOBBERs and other crud out of the way. */
6527 n_asms = 0;
6528 for (insnp = ready; insnp < e_ready; insnp++)
6529 if (insnp < e_ready)
6531 rtx insn = *insnp;
6532 enum attr_type t = ia64_safe_type (insn);
6533 if (t == TYPE_UNKNOWN)
6535 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6536 || asm_noperands (PATTERN (insn)) >= 0)
6538 rtx lowest = ready[n_asms];
6539 ready[n_asms] = insn;
6540 *insnp = lowest;
6541 n_asms++;
6543 else
6545 rtx highest = ready[n_ready - 1];
6546 ready[n_ready - 1] = insn;
6547 *insnp = highest;
6548 return 1;
6553 if (n_asms < n_ready)
6555 /* Some normal insns to process. Skip the asms. */
6556 ready += n_asms;
6557 n_ready -= n_asms;
6559 else if (n_ready > 0)
6560 return 1;
6563 if (ia64_final_schedule)
6565 int deleted = 0;
6566 int nr_need_stop = 0;
6568 for (insnp = ready; insnp < e_ready; insnp++)
6569 if (safe_group_barrier_needed (*insnp))
6570 nr_need_stop++;
6572 if (reorder_type == 1 && n_ready == nr_need_stop)
6573 return 0;
6574 if (reorder_type == 0)
6575 return 1;
6576 insnp = e_ready;
6577 /* Move down everything that needs a stop bit, preserving
6578 relative order. */
6579 while (insnp-- > ready + deleted)
6580 while (insnp >= ready + deleted)
6582 rtx insn = *insnp;
6583 if (! safe_group_barrier_needed (insn))
6584 break;
6585 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
6586 *ready = insn;
6587 deleted++;
6589 n_ready -= deleted;
6590 ready += deleted;
6593 return 1;
6596 /* We are about to being issuing insns for this clock cycle. Override
6597 the default sort algorithm to better slot instructions. */
6599 static int
6600 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
6601 int clock_var)
6603 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
6604 pn_ready, clock_var, 0);
6607 /* Like ia64_sched_reorder, but called after issuing each insn.
6608 Override the default sort algorithm to better slot instructions. */
6610 static int
6611 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
6612 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
6613 int *pn_ready, int clock_var)
6615 if (ia64_tune == PROCESSOR_ITANIUM && reload_completed && last_scheduled_insn)
6616 clocks [INSN_UID (last_scheduled_insn)] = clock_var;
6617 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
6618 clock_var, 1);
6621 /* We are about to issue INSN. Return the number of insns left on the
6622 ready queue that can be issued this cycle. */
6624 static int
6625 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
6626 int sched_verbose ATTRIBUTE_UNUSED,
6627 rtx insn ATTRIBUTE_UNUSED,
6628 int can_issue_more ATTRIBUTE_UNUSED)
6630 if (current_sched_info->flags & DO_SPECULATION)
6631 /* Modulo scheduling does not extend h_i_d when emitting
6632 new instructions. Deal with it. */
6634 if (DONE_SPEC (insn) & BEGIN_DATA)
6635 pending_data_specs++;
6636 if (CHECK_SPEC (insn) & BEGIN_DATA)
6637 pending_data_specs--;
6640 last_scheduled_insn = insn;
6641 memcpy (prev_cycle_state, curr_state, dfa_state_size);
6642 if (reload_completed)
6644 int needed = group_barrier_needed (insn);
6646 gcc_assert (!needed);
6647 if (GET_CODE (insn) == CALL_INSN)
6648 init_insn_group_barriers ();
6649 stops_p [INSN_UID (insn)] = stop_before_p;
6650 stop_before_p = 0;
6652 return 1;
6655 /* We are choosing insn from the ready queue. Return nonzero if INSN
6656 can be chosen. */
6658 static int
6659 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn)
6661 gcc_assert (insn && INSN_P (insn));
6662 return ((!reload_completed
6663 || !safe_group_barrier_needed (insn))
6664 && ia64_first_cycle_multipass_dfa_lookahead_guard_spec (insn));
6667 /* We are choosing insn from the ready queue. Return nonzero if INSN
6668 can be chosen. */
6670 static bool
6671 ia64_first_cycle_multipass_dfa_lookahead_guard_spec (const_rtx insn)
6673 gcc_assert (insn && INSN_P (insn));
6674 /* Size of ALAT is 32. As far as we perform conservative data speculation,
6675 we keep ALAT half-empty. */
6676 return (pending_data_specs < 16
6677 || !(TODO_SPEC (insn) & BEGIN_DATA));
6680 /* The following variable value is pseudo-insn used by the DFA insn
6681 scheduler to change the DFA state when the simulated clock is
6682 increased. */
6684 static rtx dfa_pre_cycle_insn;
6686 /* We are about to being issuing INSN. Return nonzero if we cannot
6687 issue it on given cycle CLOCK and return zero if we should not sort
6688 the ready queue on the next clock start. */
6690 static int
6691 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
6692 int clock, int *sort_p)
6694 int setup_clocks_p = FALSE;
6696 gcc_assert (insn && INSN_P (insn));
6697 if ((reload_completed && safe_group_barrier_needed (insn))
6698 || (last_scheduled_insn
6699 && (GET_CODE (last_scheduled_insn) == CALL_INSN
6700 || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6701 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
6703 init_insn_group_barriers ();
6704 if (verbose && dump)
6705 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
6706 last_clock == clock ? " + cycle advance" : "");
6707 stop_before_p = 1;
6708 if (last_clock == clock)
6710 state_transition (curr_state, dfa_stop_insn);
6711 if (TARGET_EARLY_STOP_BITS)
6712 *sort_p = (last_scheduled_insn == NULL_RTX
6713 || GET_CODE (last_scheduled_insn) != CALL_INSN);
6714 else
6715 *sort_p = 0;
6716 return 1;
6718 else if (reload_completed)
6719 setup_clocks_p = TRUE;
6720 if (GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6721 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)
6722 state_reset (curr_state);
6723 else
6725 memcpy (curr_state, prev_cycle_state, dfa_state_size);
6726 state_transition (curr_state, dfa_stop_insn);
6727 state_transition (curr_state, dfa_pre_cycle_insn);
6728 state_transition (curr_state, NULL);
6731 else if (reload_completed)
6732 setup_clocks_p = TRUE;
6733 if (setup_clocks_p && ia64_tune == PROCESSOR_ITANIUM
6734 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6735 && asm_noperands (PATTERN (insn)) < 0)
6737 enum attr_itanium_class c = ia64_safe_itanium_class (insn);
6739 if (c != ITANIUM_CLASS_MMMUL && c != ITANIUM_CLASS_MMSHF)
6741 sd_iterator_def sd_it;
6742 dep_t dep;
6743 int d = -1;
6745 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
6746 if (DEP_TYPE (dep) == REG_DEP_TRUE)
6748 enum attr_itanium_class dep_class;
6749 rtx dep_insn = DEP_PRO (dep);
6751 dep_class = ia64_safe_itanium_class (dep_insn);
6752 if ((dep_class == ITANIUM_CLASS_MMMUL
6753 || dep_class == ITANIUM_CLASS_MMSHF)
6754 && last_clock - clocks [INSN_UID (dep_insn)] < 4
6755 && (d < 0
6756 || last_clock - clocks [INSN_UID (dep_insn)] < d))
6757 d = last_clock - clocks [INSN_UID (dep_insn)];
6759 if (d >= 0)
6760 add_cycles [INSN_UID (insn)] = 3 - d;
6763 return 0;
6766 /* Implement targetm.sched.h_i_d_extended hook.
6767 Extend internal data structures. */
6768 static void
6769 ia64_h_i_d_extended (void)
6771 if (current_sched_info->flags & DO_SPECULATION)
6773 int new_max_uid = get_max_uid () + 1;
6775 spec_check_no = xrecalloc (spec_check_no, new_max_uid,
6776 max_uid, sizeof (*spec_check_no));
6777 max_uid = new_max_uid;
6780 if (stops_p != NULL)
6782 int new_clocks_length = get_max_uid () + 1;
6784 stops_p = xrecalloc (stops_p, new_clocks_length, clocks_length, 1);
6786 if (ia64_tune == PROCESSOR_ITANIUM)
6788 clocks = xrecalloc (clocks, new_clocks_length, clocks_length,
6789 sizeof (int));
6790 add_cycles = xrecalloc (add_cycles, new_clocks_length, clocks_length,
6791 sizeof (int));
6794 clocks_length = new_clocks_length;
6798 /* Constants that help mapping 'enum machine_mode' to int. */
6799 enum SPEC_MODES
6801 SPEC_MODE_INVALID = -1,
6802 SPEC_MODE_FIRST = 0,
6803 SPEC_MODE_FOR_EXTEND_FIRST = 1,
6804 SPEC_MODE_FOR_EXTEND_LAST = 3,
6805 SPEC_MODE_LAST = 8
6808 /* Return index of the MODE. */
6809 static int
6810 ia64_mode_to_int (enum machine_mode mode)
6812 switch (mode)
6814 case BImode: return 0; /* SPEC_MODE_FIRST */
6815 case QImode: return 1; /* SPEC_MODE_FOR_EXTEND_FIRST */
6816 case HImode: return 2;
6817 case SImode: return 3; /* SPEC_MODE_FOR_EXTEND_LAST */
6818 case DImode: return 4;
6819 case SFmode: return 5;
6820 case DFmode: return 6;
6821 case XFmode: return 7;
6822 case TImode:
6823 /* ??? This mode needs testing. Bypasses for ldfp8 instruction are not
6824 mentioned in itanium[12].md. Predicate fp_register_operand also
6825 needs to be defined. Bottom line: better disable for now. */
6826 return SPEC_MODE_INVALID;
6827 default: return SPEC_MODE_INVALID;
6831 /* Provide information about speculation capabilities. */
6832 static void
6833 ia64_set_sched_flags (spec_info_t spec_info)
6835 unsigned int *flags = &(current_sched_info->flags);
6837 if (*flags & SCHED_RGN
6838 || *flags & SCHED_EBB)
6840 int mask = 0;
6842 if ((mflag_sched_br_data_spec && !reload_completed && optimize > 0)
6843 || (mflag_sched_ar_data_spec && reload_completed))
6845 mask |= BEGIN_DATA;
6847 if ((mflag_sched_br_in_data_spec && !reload_completed)
6848 || (mflag_sched_ar_in_data_spec && reload_completed))
6849 mask |= BE_IN_DATA;
6852 if (mflag_sched_control_spec)
6854 mask |= BEGIN_CONTROL;
6856 if (mflag_sched_in_control_spec)
6857 mask |= BE_IN_CONTROL;
6860 if (mask)
6862 *flags |= USE_DEPS_LIST | DO_SPECULATION;
6864 if (mask & BE_IN_SPEC)
6865 *flags |= NEW_BBS;
6867 spec_info->mask = mask;
6868 spec_info->flags = 0;
6870 if ((mask & DATA_SPEC) && mflag_sched_prefer_non_data_spec_insns)
6871 spec_info->flags |= PREFER_NON_DATA_SPEC;
6873 if ((mask & CONTROL_SPEC)
6874 && mflag_sched_prefer_non_control_spec_insns)
6875 spec_info->flags |= PREFER_NON_CONTROL_SPEC;
6877 if (mflag_sched_spec_verbose)
6879 if (sched_verbose >= 1)
6880 spec_info->dump = sched_dump;
6881 else
6882 spec_info->dump = stderr;
6884 else
6885 spec_info->dump = 0;
6887 if (mflag_sched_count_spec_in_critical_path)
6888 spec_info->flags |= COUNT_SPEC_IN_CRITICAL_PATH;
6893 /* Implement targetm.sched.speculate_insn hook.
6894 Check if the INSN can be TS speculative.
6895 If 'no' - return -1.
6896 If 'yes' - generate speculative pattern in the NEW_PAT and return 1.
6897 If current pattern of the INSN already provides TS speculation, return 0. */
6898 static int
6899 ia64_speculate_insn (rtx insn, ds_t ts, rtx *new_pat)
6901 rtx pat, reg, mem, mem_reg;
6902 int mode_no, gen_p = 1;
6903 bool extend_p;
6905 gcc_assert (!(ts & ~BEGIN_SPEC) && ts);
6907 pat = PATTERN (insn);
6909 if (GET_CODE (pat) == COND_EXEC)
6910 pat = COND_EXEC_CODE (pat);
6912 /* This should be a SET ... */
6913 if (GET_CODE (pat) != SET)
6914 return -1;
6916 reg = SET_DEST (pat);
6917 /* ... to the general/fp register ... */
6918 if (!REG_P (reg) || !(GR_REGNO_P (REGNO (reg)) || FP_REGNO_P (REGNO (reg))))
6919 return -1;
6921 /* ... from the mem ... */
6922 mem = SET_SRC (pat);
6924 /* ... that can, possibly, be a zero_extend ... */
6925 if (GET_CODE (mem) == ZERO_EXTEND)
6927 mem = XEXP (mem, 0);
6928 extend_p = true;
6930 else
6931 extend_p = false;
6933 /* ... or a speculative load. */
6934 if (GET_CODE (mem) == UNSPEC)
6936 int code;
6938 code = XINT (mem, 1);
6939 if (code != UNSPEC_LDA && code != UNSPEC_LDS && code != UNSPEC_LDSA)
6940 return -1;
6942 if ((code == UNSPEC_LDA && !(ts & BEGIN_CONTROL))
6943 || (code == UNSPEC_LDS && !(ts & BEGIN_DATA))
6944 || code == UNSPEC_LDSA)
6945 gen_p = 0;
6947 mem = XVECEXP (mem, 0, 0);
6948 gcc_assert (MEM_P (mem));
6951 /* Source should be a mem ... */
6952 if (!MEM_P (mem))
6953 return -1;
6955 /* ... addressed by a register. */
6956 mem_reg = XEXP (mem, 0);
6957 if (!REG_P (mem_reg))
6958 return -1;
6960 /* We should use MEM's mode since REG's mode in presence of ZERO_EXTEND
6961 will always be DImode. */
6962 mode_no = ia64_mode_to_int (GET_MODE (mem));
6964 if (mode_no == SPEC_MODE_INVALID
6965 || (extend_p
6966 && !(SPEC_MODE_FOR_EXTEND_FIRST <= mode_no
6967 && mode_no <= SPEC_MODE_FOR_EXTEND_LAST)))
6968 return -1;
6970 extract_insn_cached (insn);
6971 gcc_assert (reg == recog_data.operand[0] && mem == recog_data.operand[1]);
6973 *new_pat = ia64_gen_spec_insn (insn, ts, mode_no, gen_p != 0, extend_p);
6975 return gen_p;
6978 enum
6980 /* Offset to reach ZERO_EXTEND patterns. */
6981 SPEC_GEN_EXTEND_OFFSET = SPEC_MODE_LAST - SPEC_MODE_FOR_EXTEND_FIRST + 1,
6982 /* Number of patterns for each speculation mode. */
6983 SPEC_N = (SPEC_MODE_LAST
6984 + SPEC_MODE_FOR_EXTEND_LAST - SPEC_MODE_FOR_EXTEND_FIRST + 2)
6987 enum SPEC_GEN_LD_MAP
6989 /* Offset to ld.a patterns. */
6990 SPEC_GEN_A = 0 * SPEC_N,
6991 /* Offset to ld.s patterns. */
6992 SPEC_GEN_S = 1 * SPEC_N,
6993 /* Offset to ld.sa patterns. */
6994 SPEC_GEN_SA = 2 * SPEC_N,
6995 /* Offset to ld.sa patterns. For this patterns corresponding ld.c will
6996 mutate to chk.s. */
6997 SPEC_GEN_SA_FOR_S = 3 * SPEC_N
7000 /* These offsets are used to get (4 * SPEC_N). */
7001 enum SPEC_GEN_CHECK_OFFSET
7003 SPEC_GEN_CHKA_FOR_A_OFFSET = 4 * SPEC_N - SPEC_GEN_A,
7004 SPEC_GEN_CHKA_FOR_SA_OFFSET = 4 * SPEC_N - SPEC_GEN_SA
7007 /* If GEN_P is true, calculate the index of needed speculation check and return
7008 speculative pattern for INSN with speculative mode TS, machine mode
7009 MODE_NO and with ZERO_EXTEND (if EXTEND_P is true).
7010 If GEN_P is false, just calculate the index of needed speculation check. */
7011 static rtx
7012 ia64_gen_spec_insn (rtx insn, ds_t ts, int mode_no, bool gen_p, bool extend_p)
7014 rtx pat, new_pat;
7015 int load_no;
7016 int shift = 0;
7018 static rtx (* const gen_load[]) (rtx, rtx) = {
7019 gen_movbi_advanced,
7020 gen_movqi_advanced,
7021 gen_movhi_advanced,
7022 gen_movsi_advanced,
7023 gen_movdi_advanced,
7024 gen_movsf_advanced,
7025 gen_movdf_advanced,
7026 gen_movxf_advanced,
7027 gen_movti_advanced,
7028 gen_zero_extendqidi2_advanced,
7029 gen_zero_extendhidi2_advanced,
7030 gen_zero_extendsidi2_advanced,
7032 gen_movbi_speculative,
7033 gen_movqi_speculative,
7034 gen_movhi_speculative,
7035 gen_movsi_speculative,
7036 gen_movdi_speculative,
7037 gen_movsf_speculative,
7038 gen_movdf_speculative,
7039 gen_movxf_speculative,
7040 gen_movti_speculative,
7041 gen_zero_extendqidi2_speculative,
7042 gen_zero_extendhidi2_speculative,
7043 gen_zero_extendsidi2_speculative,
7045 gen_movbi_speculative_advanced,
7046 gen_movqi_speculative_advanced,
7047 gen_movhi_speculative_advanced,
7048 gen_movsi_speculative_advanced,
7049 gen_movdi_speculative_advanced,
7050 gen_movsf_speculative_advanced,
7051 gen_movdf_speculative_advanced,
7052 gen_movxf_speculative_advanced,
7053 gen_movti_speculative_advanced,
7054 gen_zero_extendqidi2_speculative_advanced,
7055 gen_zero_extendhidi2_speculative_advanced,
7056 gen_zero_extendsidi2_speculative_advanced,
7058 gen_movbi_speculative_advanced,
7059 gen_movqi_speculative_advanced,
7060 gen_movhi_speculative_advanced,
7061 gen_movsi_speculative_advanced,
7062 gen_movdi_speculative_advanced,
7063 gen_movsf_speculative_advanced,
7064 gen_movdf_speculative_advanced,
7065 gen_movxf_speculative_advanced,
7066 gen_movti_speculative_advanced,
7067 gen_zero_extendqidi2_speculative_advanced,
7068 gen_zero_extendhidi2_speculative_advanced,
7069 gen_zero_extendsidi2_speculative_advanced
7072 load_no = extend_p ? mode_no + SPEC_GEN_EXTEND_OFFSET : mode_no;
7074 if (ts & BEGIN_DATA)
7076 /* We don't need recovery because even if this is ld.sa
7077 ALAT entry will be allocated only if NAT bit is set to zero.
7078 So it is enough to use ld.c here. */
7080 if (ts & BEGIN_CONTROL)
7082 load_no += SPEC_GEN_SA;
7084 if (!mflag_sched_ldc)
7085 shift = SPEC_GEN_CHKA_FOR_SA_OFFSET;
7087 else
7089 load_no += SPEC_GEN_A;
7091 if (!mflag_sched_ldc)
7092 shift = SPEC_GEN_CHKA_FOR_A_OFFSET;
7095 else if (ts & BEGIN_CONTROL)
7097 /* ld.sa can be used instead of ld.s to avoid basic block splitting. */
7098 if (!mflag_control_ldc)
7099 load_no += SPEC_GEN_S;
7100 else
7102 gcc_assert (mflag_sched_ldc);
7103 load_no += SPEC_GEN_SA_FOR_S;
7106 else
7107 gcc_unreachable ();
7109 /* Set the desired check index. We add '1', because zero element in this
7110 array means, that instruction with such uid is non-speculative. */
7111 spec_check_no[INSN_UID (insn)] = load_no + shift + 1;
7113 if (!gen_p)
7114 return 0;
7116 new_pat = gen_load[load_no] (copy_rtx (recog_data.operand[0]),
7117 copy_rtx (recog_data.operand[1]));
7119 pat = PATTERN (insn);
7120 if (GET_CODE (pat) == COND_EXEC)
7121 new_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx
7122 (COND_EXEC_TEST (pat)), new_pat);
7124 return new_pat;
7127 /* Offset to branchy checks. */
7128 enum { SPEC_GEN_CHECK_MUTATION_OFFSET = 5 * SPEC_N };
7130 /* Return nonzero, if INSN needs branchy recovery check. */
7131 static bool
7132 ia64_needs_block_p (const_rtx insn)
7134 int check_no;
7136 check_no = spec_check_no[INSN_UID(insn)] - 1;
7137 gcc_assert (0 <= check_no && check_no < SPEC_GEN_CHECK_MUTATION_OFFSET);
7139 return ((SPEC_GEN_S <= check_no && check_no < SPEC_GEN_S + SPEC_N)
7140 || (4 * SPEC_N <= check_no && check_no < 4 * SPEC_N + SPEC_N));
7143 /* Generate (or regenerate, if (MUTATE_P)) recovery check for INSN.
7144 If (LABEL != 0 || MUTATE_P), generate branchy recovery check.
7145 Otherwise, generate a simple check. */
7146 static rtx
7147 ia64_gen_check (rtx insn, rtx label, bool mutate_p)
7149 rtx op1, pat, check_pat;
7151 static rtx (* const gen_check[]) (rtx, rtx) = {
7152 gen_movbi_clr,
7153 gen_movqi_clr,
7154 gen_movhi_clr,
7155 gen_movsi_clr,
7156 gen_movdi_clr,
7157 gen_movsf_clr,
7158 gen_movdf_clr,
7159 gen_movxf_clr,
7160 gen_movti_clr,
7161 gen_zero_extendqidi2_clr,
7162 gen_zero_extendhidi2_clr,
7163 gen_zero_extendsidi2_clr,
7165 gen_speculation_check_bi,
7166 gen_speculation_check_qi,
7167 gen_speculation_check_hi,
7168 gen_speculation_check_si,
7169 gen_speculation_check_di,
7170 gen_speculation_check_sf,
7171 gen_speculation_check_df,
7172 gen_speculation_check_xf,
7173 gen_speculation_check_ti,
7174 gen_speculation_check_di,
7175 gen_speculation_check_di,
7176 gen_speculation_check_di,
7178 gen_movbi_clr,
7179 gen_movqi_clr,
7180 gen_movhi_clr,
7181 gen_movsi_clr,
7182 gen_movdi_clr,
7183 gen_movsf_clr,
7184 gen_movdf_clr,
7185 gen_movxf_clr,
7186 gen_movti_clr,
7187 gen_zero_extendqidi2_clr,
7188 gen_zero_extendhidi2_clr,
7189 gen_zero_extendsidi2_clr,
7191 gen_movbi_clr,
7192 gen_movqi_clr,
7193 gen_movhi_clr,
7194 gen_movsi_clr,
7195 gen_movdi_clr,
7196 gen_movsf_clr,
7197 gen_movdf_clr,
7198 gen_movxf_clr,
7199 gen_movti_clr,
7200 gen_zero_extendqidi2_clr,
7201 gen_zero_extendhidi2_clr,
7202 gen_zero_extendsidi2_clr,
7204 gen_advanced_load_check_clr_bi,
7205 gen_advanced_load_check_clr_qi,
7206 gen_advanced_load_check_clr_hi,
7207 gen_advanced_load_check_clr_si,
7208 gen_advanced_load_check_clr_di,
7209 gen_advanced_load_check_clr_sf,
7210 gen_advanced_load_check_clr_df,
7211 gen_advanced_load_check_clr_xf,
7212 gen_advanced_load_check_clr_ti,
7213 gen_advanced_load_check_clr_di,
7214 gen_advanced_load_check_clr_di,
7215 gen_advanced_load_check_clr_di,
7217 /* Following checks are generated during mutation. */
7218 gen_advanced_load_check_clr_bi,
7219 gen_advanced_load_check_clr_qi,
7220 gen_advanced_load_check_clr_hi,
7221 gen_advanced_load_check_clr_si,
7222 gen_advanced_load_check_clr_di,
7223 gen_advanced_load_check_clr_sf,
7224 gen_advanced_load_check_clr_df,
7225 gen_advanced_load_check_clr_xf,
7226 gen_advanced_load_check_clr_ti,
7227 gen_advanced_load_check_clr_di,
7228 gen_advanced_load_check_clr_di,
7229 gen_advanced_load_check_clr_di,
7231 0,0,0,0,0,0,0,0,0,0,0,0,
7233 gen_advanced_load_check_clr_bi,
7234 gen_advanced_load_check_clr_qi,
7235 gen_advanced_load_check_clr_hi,
7236 gen_advanced_load_check_clr_si,
7237 gen_advanced_load_check_clr_di,
7238 gen_advanced_load_check_clr_sf,
7239 gen_advanced_load_check_clr_df,
7240 gen_advanced_load_check_clr_xf,
7241 gen_advanced_load_check_clr_ti,
7242 gen_advanced_load_check_clr_di,
7243 gen_advanced_load_check_clr_di,
7244 gen_advanced_load_check_clr_di,
7246 gen_speculation_check_bi,
7247 gen_speculation_check_qi,
7248 gen_speculation_check_hi,
7249 gen_speculation_check_si,
7250 gen_speculation_check_di,
7251 gen_speculation_check_sf,
7252 gen_speculation_check_df,
7253 gen_speculation_check_xf,
7254 gen_speculation_check_ti,
7255 gen_speculation_check_di,
7256 gen_speculation_check_di,
7257 gen_speculation_check_di
7260 extract_insn_cached (insn);
7262 if (label)
7264 gcc_assert (mutate_p || ia64_needs_block_p (insn));
7265 op1 = label;
7267 else
7269 gcc_assert (!mutate_p && !ia64_needs_block_p (insn));
7270 op1 = copy_rtx (recog_data.operand[1]);
7273 if (mutate_p)
7274 /* INSN is ld.c.
7275 Find the speculation check number by searching for original
7276 speculative load in the RESOLVED_DEPS list of INSN.
7277 As long as patterns are unique for each instruction, this can be
7278 accomplished by matching ORIG_PAT fields. */
7280 sd_iterator_def sd_it;
7281 dep_t dep;
7282 int check_no = 0;
7283 rtx orig_pat = ORIG_PAT (insn);
7285 FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
7287 rtx x = DEP_PRO (dep);
7289 if (ORIG_PAT (x) == orig_pat)
7290 check_no = spec_check_no[INSN_UID (x)];
7292 gcc_assert (check_no);
7294 spec_check_no[INSN_UID (insn)] = (check_no
7295 + SPEC_GEN_CHECK_MUTATION_OFFSET);
7298 check_pat = (gen_check[spec_check_no[INSN_UID (insn)] - 1]
7299 (copy_rtx (recog_data.operand[0]), op1));
7301 pat = PATTERN (insn);
7302 if (GET_CODE (pat) == COND_EXEC)
7303 check_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
7304 check_pat);
7306 return check_pat;
7309 /* Return nonzero, if X is branchy recovery check. */
7310 static int
7311 ia64_spec_check_p (rtx x)
7313 x = PATTERN (x);
7314 if (GET_CODE (x) == COND_EXEC)
7315 x = COND_EXEC_CODE (x);
7316 if (GET_CODE (x) == SET)
7317 return ia64_spec_check_src_p (SET_SRC (x));
7318 return 0;
7321 /* Return nonzero, if SRC belongs to recovery check. */
7322 static int
7323 ia64_spec_check_src_p (rtx src)
7325 if (GET_CODE (src) == IF_THEN_ELSE)
7327 rtx t;
7329 t = XEXP (src, 0);
7330 if (GET_CODE (t) == NE)
7332 t = XEXP (t, 0);
7334 if (GET_CODE (t) == UNSPEC)
7336 int code;
7338 code = XINT (t, 1);
7340 if (code == UNSPEC_CHKACLR
7341 || code == UNSPEC_CHKS
7342 || code == UNSPEC_LDCCLR)
7344 gcc_assert (code != 0);
7345 return code;
7350 return 0;
7354 /* The following page contains abstract data `bundle states' which are
7355 used for bundling insns (inserting nops and template generation). */
7357 /* The following describes state of insn bundling. */
7359 struct bundle_state
7361 /* Unique bundle state number to identify them in the debugging
7362 output */
7363 int unique_num;
7364 rtx insn; /* corresponding insn, NULL for the 1st and the last state */
7365 /* number nops before and after the insn */
7366 short before_nops_num, after_nops_num;
7367 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
7368 insn */
7369 int cost; /* cost of the state in cycles */
7370 int accumulated_insns_num; /* number of all previous insns including
7371 nops. L is considered as 2 insns */
7372 int branch_deviation; /* deviation of previous branches from 3rd slots */
7373 struct bundle_state *next; /* next state with the same insn_num */
7374 struct bundle_state *originator; /* originator (previous insn state) */
7375 /* All bundle states are in the following chain. */
7376 struct bundle_state *allocated_states_chain;
7377 /* The DFA State after issuing the insn and the nops. */
7378 state_t dfa_state;
7381 /* The following is map insn number to the corresponding bundle state. */
7383 static struct bundle_state **index_to_bundle_states;
7385 /* The unique number of next bundle state. */
7387 static int bundle_states_num;
7389 /* All allocated bundle states are in the following chain. */
7391 static struct bundle_state *allocated_bundle_states_chain;
7393 /* All allocated but not used bundle states are in the following
7394 chain. */
7396 static struct bundle_state *free_bundle_state_chain;
7399 /* The following function returns a free bundle state. */
7401 static struct bundle_state *
7402 get_free_bundle_state (void)
7404 struct bundle_state *result;
7406 if (free_bundle_state_chain != NULL)
7408 result = free_bundle_state_chain;
7409 free_bundle_state_chain = result->next;
7411 else
7413 result = xmalloc (sizeof (struct bundle_state));
7414 result->dfa_state = xmalloc (dfa_state_size);
7415 result->allocated_states_chain = allocated_bundle_states_chain;
7416 allocated_bundle_states_chain = result;
7418 result->unique_num = bundle_states_num++;
7419 return result;
7423 /* The following function frees given bundle state. */
7425 static void
7426 free_bundle_state (struct bundle_state *state)
7428 state->next = free_bundle_state_chain;
7429 free_bundle_state_chain = state;
7432 /* Start work with abstract data `bundle states'. */
7434 static void
7435 initiate_bundle_states (void)
7437 bundle_states_num = 0;
7438 free_bundle_state_chain = NULL;
7439 allocated_bundle_states_chain = NULL;
7442 /* Finish work with abstract data `bundle states'. */
7444 static void
7445 finish_bundle_states (void)
7447 struct bundle_state *curr_state, *next_state;
7449 for (curr_state = allocated_bundle_states_chain;
7450 curr_state != NULL;
7451 curr_state = next_state)
7453 next_state = curr_state->allocated_states_chain;
7454 free (curr_state->dfa_state);
7455 free (curr_state);
7459 /* Hash table of the bundle states. The key is dfa_state and insn_num
7460 of the bundle states. */
7462 static htab_t bundle_state_table;
7464 /* The function returns hash of BUNDLE_STATE. */
7466 static unsigned
7467 bundle_state_hash (const void *bundle_state)
7469 const struct bundle_state *const state
7470 = (const struct bundle_state *) bundle_state;
7471 unsigned result, i;
7473 for (result = i = 0; i < dfa_state_size; i++)
7474 result += (((unsigned char *) state->dfa_state) [i]
7475 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
7476 return result + state->insn_num;
7479 /* The function returns nonzero if the bundle state keys are equal. */
7481 static int
7482 bundle_state_eq_p (const void *bundle_state_1, const void *bundle_state_2)
7484 const struct bundle_state *const state1
7485 = (const struct bundle_state *) bundle_state_1;
7486 const struct bundle_state *const state2
7487 = (const struct bundle_state *) bundle_state_2;
7489 return (state1->insn_num == state2->insn_num
7490 && memcmp (state1->dfa_state, state2->dfa_state,
7491 dfa_state_size) == 0);
7494 /* The function inserts the BUNDLE_STATE into the hash table. The
7495 function returns nonzero if the bundle has been inserted into the
7496 table. The table contains the best bundle state with given key. */
7498 static int
7499 insert_bundle_state (struct bundle_state *bundle_state)
7501 void **entry_ptr;
7503 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, 1);
7504 if (*entry_ptr == NULL)
7506 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
7507 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
7508 *entry_ptr = (void *) bundle_state;
7509 return TRUE;
7511 else if (bundle_state->cost < ((struct bundle_state *) *entry_ptr)->cost
7512 || (bundle_state->cost == ((struct bundle_state *) *entry_ptr)->cost
7513 && (((struct bundle_state *)*entry_ptr)->accumulated_insns_num
7514 > bundle_state->accumulated_insns_num
7515 || (((struct bundle_state *)
7516 *entry_ptr)->accumulated_insns_num
7517 == bundle_state->accumulated_insns_num
7518 && ((struct bundle_state *)
7519 *entry_ptr)->branch_deviation
7520 > bundle_state->branch_deviation))))
7523 struct bundle_state temp;
7525 temp = *(struct bundle_state *) *entry_ptr;
7526 *(struct bundle_state *) *entry_ptr = *bundle_state;
7527 ((struct bundle_state *) *entry_ptr)->next = temp.next;
7528 *bundle_state = temp;
7530 return FALSE;
7533 /* Start work with the hash table. */
7535 static void
7536 initiate_bundle_state_table (void)
7538 bundle_state_table = htab_create (50, bundle_state_hash, bundle_state_eq_p,
7539 (htab_del) 0);
7542 /* Finish work with the hash table. */
7544 static void
7545 finish_bundle_state_table (void)
7547 htab_delete (bundle_state_table);
7552 /* The following variable is a insn `nop' used to check bundle states
7553 with different number of inserted nops. */
7555 static rtx ia64_nop;
7557 /* The following function tries to issue NOPS_NUM nops for the current
7558 state without advancing processor cycle. If it failed, the
7559 function returns FALSE and frees the current state. */
7561 static int
7562 try_issue_nops (struct bundle_state *curr_state, int nops_num)
7564 int i;
7566 for (i = 0; i < nops_num; i++)
7567 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
7569 free_bundle_state (curr_state);
7570 return FALSE;
7572 return TRUE;
7575 /* The following function tries to issue INSN for the current
7576 state without advancing processor cycle. If it failed, the
7577 function returns FALSE and frees the current state. */
7579 static int
7580 try_issue_insn (struct bundle_state *curr_state, rtx insn)
7582 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
7584 free_bundle_state (curr_state);
7585 return FALSE;
7587 return TRUE;
7590 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
7591 starting with ORIGINATOR without advancing processor cycle. If
7592 TRY_BUNDLE_END_P is TRUE, the function also/only (if
7593 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
7594 If it was successful, the function creates new bundle state and
7595 insert into the hash table and into `index_to_bundle_states'. */
7597 static void
7598 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
7599 rtx insn, int try_bundle_end_p, int only_bundle_end_p)
7601 struct bundle_state *curr_state;
7603 curr_state = get_free_bundle_state ();
7604 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
7605 curr_state->insn = insn;
7606 curr_state->insn_num = originator->insn_num + 1;
7607 curr_state->cost = originator->cost;
7608 curr_state->originator = originator;
7609 curr_state->before_nops_num = before_nops_num;
7610 curr_state->after_nops_num = 0;
7611 curr_state->accumulated_insns_num
7612 = originator->accumulated_insns_num + before_nops_num;
7613 curr_state->branch_deviation = originator->branch_deviation;
7614 gcc_assert (insn);
7615 if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
7617 gcc_assert (GET_MODE (insn) != TImode);
7618 if (!try_issue_nops (curr_state, before_nops_num))
7619 return;
7620 if (!try_issue_insn (curr_state, insn))
7621 return;
7622 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
7623 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
7624 && curr_state->accumulated_insns_num % 3 != 0)
7626 free_bundle_state (curr_state);
7627 return;
7630 else if (GET_MODE (insn) != TImode)
7632 if (!try_issue_nops (curr_state, before_nops_num))
7633 return;
7634 if (!try_issue_insn (curr_state, insn))
7635 return;
7636 curr_state->accumulated_insns_num++;
7637 gcc_assert (GET_CODE (PATTERN (insn)) != ASM_INPUT
7638 && asm_noperands (PATTERN (insn)) < 0);
7640 if (ia64_safe_type (insn) == TYPE_L)
7641 curr_state->accumulated_insns_num++;
7643 else
7645 /* If this is an insn that must be first in a group, then don't allow
7646 nops to be emitted before it. Currently, alloc is the only such
7647 supported instruction. */
7648 /* ??? The bundling automatons should handle this for us, but they do
7649 not yet have support for the first_insn attribute. */
7650 if (before_nops_num > 0 && get_attr_first_insn (insn) == FIRST_INSN_YES)
7652 free_bundle_state (curr_state);
7653 return;
7656 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
7657 state_transition (curr_state->dfa_state, NULL);
7658 curr_state->cost++;
7659 if (!try_issue_nops (curr_state, before_nops_num))
7660 return;
7661 if (!try_issue_insn (curr_state, insn))
7662 return;
7663 curr_state->accumulated_insns_num++;
7664 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
7665 || asm_noperands (PATTERN (insn)) >= 0)
7667 /* Finish bundle containing asm insn. */
7668 curr_state->after_nops_num
7669 = 3 - curr_state->accumulated_insns_num % 3;
7670 curr_state->accumulated_insns_num
7671 += 3 - curr_state->accumulated_insns_num % 3;
7673 else if (ia64_safe_type (insn) == TYPE_L)
7674 curr_state->accumulated_insns_num++;
7676 if (ia64_safe_type (insn) == TYPE_B)
7677 curr_state->branch_deviation
7678 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
7679 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
7681 if (!only_bundle_end_p && insert_bundle_state (curr_state))
7683 state_t dfa_state;
7684 struct bundle_state *curr_state1;
7685 struct bundle_state *allocated_states_chain;
7687 curr_state1 = get_free_bundle_state ();
7688 dfa_state = curr_state1->dfa_state;
7689 allocated_states_chain = curr_state1->allocated_states_chain;
7690 *curr_state1 = *curr_state;
7691 curr_state1->dfa_state = dfa_state;
7692 curr_state1->allocated_states_chain = allocated_states_chain;
7693 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
7694 dfa_state_size);
7695 curr_state = curr_state1;
7697 if (!try_issue_nops (curr_state,
7698 3 - curr_state->accumulated_insns_num % 3))
7699 return;
7700 curr_state->after_nops_num
7701 = 3 - curr_state->accumulated_insns_num % 3;
7702 curr_state->accumulated_insns_num
7703 += 3 - curr_state->accumulated_insns_num % 3;
7705 if (!insert_bundle_state (curr_state))
7706 free_bundle_state (curr_state);
7707 return;
7710 /* The following function returns position in the two window bundle
7711 for given STATE. */
7713 static int
7714 get_max_pos (state_t state)
7716 if (cpu_unit_reservation_p (state, pos_6))
7717 return 6;
7718 else if (cpu_unit_reservation_p (state, pos_5))
7719 return 5;
7720 else if (cpu_unit_reservation_p (state, pos_4))
7721 return 4;
7722 else if (cpu_unit_reservation_p (state, pos_3))
7723 return 3;
7724 else if (cpu_unit_reservation_p (state, pos_2))
7725 return 2;
7726 else if (cpu_unit_reservation_p (state, pos_1))
7727 return 1;
7728 else
7729 return 0;
7732 /* The function returns code of a possible template for given position
7733 and state. The function should be called only with 2 values of
7734 position equal to 3 or 6. We avoid generating F NOPs by putting
7735 templates containing F insns at the end of the template search
7736 because undocumented anomaly in McKinley derived cores which can
7737 cause stalls if an F-unit insn (including a NOP) is issued within a
7738 six-cycle window after reading certain application registers (such
7739 as ar.bsp). Furthermore, power-considerations also argue against
7740 the use of F-unit instructions unless they're really needed. */
7742 static int
7743 get_template (state_t state, int pos)
7745 switch (pos)
7747 case 3:
7748 if (cpu_unit_reservation_p (state, _0mmi_))
7749 return 1;
7750 else if (cpu_unit_reservation_p (state, _0mii_))
7751 return 0;
7752 else if (cpu_unit_reservation_p (state, _0mmb_))
7753 return 7;
7754 else if (cpu_unit_reservation_p (state, _0mib_))
7755 return 6;
7756 else if (cpu_unit_reservation_p (state, _0mbb_))
7757 return 5;
7758 else if (cpu_unit_reservation_p (state, _0bbb_))
7759 return 4;
7760 else if (cpu_unit_reservation_p (state, _0mmf_))
7761 return 3;
7762 else if (cpu_unit_reservation_p (state, _0mfi_))
7763 return 2;
7764 else if (cpu_unit_reservation_p (state, _0mfb_))
7765 return 8;
7766 else if (cpu_unit_reservation_p (state, _0mlx_))
7767 return 9;
7768 else
7769 gcc_unreachable ();
7770 case 6:
7771 if (cpu_unit_reservation_p (state, _1mmi_))
7772 return 1;
7773 else if (cpu_unit_reservation_p (state, _1mii_))
7774 return 0;
7775 else if (cpu_unit_reservation_p (state, _1mmb_))
7776 return 7;
7777 else if (cpu_unit_reservation_p (state, _1mib_))
7778 return 6;
7779 else if (cpu_unit_reservation_p (state, _1mbb_))
7780 return 5;
7781 else if (cpu_unit_reservation_p (state, _1bbb_))
7782 return 4;
7783 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
7784 return 3;
7785 else if (cpu_unit_reservation_p (state, _1mfi_))
7786 return 2;
7787 else if (cpu_unit_reservation_p (state, _1mfb_))
7788 return 8;
7789 else if (cpu_unit_reservation_p (state, _1mlx_))
7790 return 9;
7791 else
7792 gcc_unreachable ();
7793 default:
7794 gcc_unreachable ();
7798 /* The following function returns an insn important for insn bundling
7799 followed by INSN and before TAIL. */
7801 static rtx
7802 get_next_important_insn (rtx insn, rtx tail)
7804 for (; insn && insn != tail; insn = NEXT_INSN (insn))
7805 if (INSN_P (insn)
7806 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
7807 && GET_CODE (PATTERN (insn)) != USE
7808 && GET_CODE (PATTERN (insn)) != CLOBBER)
7809 return insn;
7810 return NULL_RTX;
7813 /* Add a bundle selector TEMPLATE0 before INSN. */
7815 static void
7816 ia64_add_bundle_selector_before (int template0, rtx insn)
7818 rtx b = gen_bundle_selector (GEN_INT (template0));
7820 ia64_emit_insn_before (b, insn);
7821 #if NR_BUNDLES == 10
7822 if ((template0 == 4 || template0 == 5)
7823 && (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS)))
7825 int i;
7826 rtx note = NULL_RTX;
7828 /* In .mbb and .bbb bundles, check if CALL_INSN isn't in the
7829 first or second slot. If it is and has REG_EH_NOTE set, copy it
7830 to following nops, as br.call sets rp to the address of following
7831 bundle and therefore an EH region end must be on a bundle
7832 boundary. */
7833 insn = PREV_INSN (insn);
7834 for (i = 0; i < 3; i++)
7837 insn = next_active_insn (insn);
7838 while (GET_CODE (insn) == INSN
7839 && get_attr_empty (insn) == EMPTY_YES);
7840 if (GET_CODE (insn) == CALL_INSN)
7841 note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
7842 else if (note)
7844 int code;
7846 gcc_assert ((code = recog_memoized (insn)) == CODE_FOR_nop
7847 || code == CODE_FOR_nop_b);
7848 if (find_reg_note (insn, REG_EH_REGION, NULL_RTX))
7849 note = NULL_RTX;
7850 else
7851 REG_NOTES (insn)
7852 = gen_rtx_EXPR_LIST (REG_EH_REGION, XEXP (note, 0),
7853 REG_NOTES (insn));
7857 #endif
7860 /* The following function does insn bundling. Bundling means
7861 inserting templates and nop insns to fit insn groups into permitted
7862 templates. Instruction scheduling uses NDFA (non-deterministic
7863 finite automata) encoding informations about the templates and the
7864 inserted nops. Nondeterminism of the automata permits follows
7865 all possible insn sequences very fast.
7867 Unfortunately it is not possible to get information about inserting
7868 nop insns and used templates from the automata states. The
7869 automata only says that we can issue an insn possibly inserting
7870 some nops before it and using some template. Therefore insn
7871 bundling in this function is implemented by using DFA
7872 (deterministic finite automata). We follow all possible insn
7873 sequences by inserting 0-2 nops (that is what the NDFA describe for
7874 insn scheduling) before/after each insn being bundled. We know the
7875 start of simulated processor cycle from insn scheduling (insn
7876 starting a new cycle has TImode).
7878 Simple implementation of insn bundling would create enormous
7879 number of possible insn sequences satisfying information about new
7880 cycle ticks taken from the insn scheduling. To make the algorithm
7881 practical we use dynamic programming. Each decision (about
7882 inserting nops and implicitly about previous decisions) is described
7883 by structure bundle_state (see above). If we generate the same
7884 bundle state (key is automaton state after issuing the insns and
7885 nops for it), we reuse already generated one. As consequence we
7886 reject some decisions which cannot improve the solution and
7887 reduce memory for the algorithm.
7889 When we reach the end of EBB (extended basic block), we choose the
7890 best sequence and then, moving back in EBB, insert templates for
7891 the best alternative. The templates are taken from querying
7892 automaton state for each insn in chosen bundle states.
7894 So the algorithm makes two (forward and backward) passes through
7895 EBB. There is an additional forward pass through EBB for Itanium1
7896 processor. This pass inserts more nops to make dependency between
7897 a producer insn and MMMUL/MMSHF at least 4 cycles long. */
7899 static void
7900 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
7902 struct bundle_state *curr_state, *next_state, *best_state;
7903 rtx insn, next_insn;
7904 int insn_num;
7905 int i, bundle_end_p, only_bundle_end_p, asm_p;
7906 int pos = 0, max_pos, template0, template1;
7907 rtx b;
7908 rtx nop;
7909 enum attr_type type;
7911 insn_num = 0;
7912 /* Count insns in the EBB. */
7913 for (insn = NEXT_INSN (prev_head_insn);
7914 insn && insn != tail;
7915 insn = NEXT_INSN (insn))
7916 if (INSN_P (insn))
7917 insn_num++;
7918 if (insn_num == 0)
7919 return;
7920 bundling_p = 1;
7921 dfa_clean_insn_cache ();
7922 initiate_bundle_state_table ();
7923 index_to_bundle_states = xmalloc ((insn_num + 2)
7924 * sizeof (struct bundle_state *));
7925 /* First (forward) pass -- generation of bundle states. */
7926 curr_state = get_free_bundle_state ();
7927 curr_state->insn = NULL;
7928 curr_state->before_nops_num = 0;
7929 curr_state->after_nops_num = 0;
7930 curr_state->insn_num = 0;
7931 curr_state->cost = 0;
7932 curr_state->accumulated_insns_num = 0;
7933 curr_state->branch_deviation = 0;
7934 curr_state->next = NULL;
7935 curr_state->originator = NULL;
7936 state_reset (curr_state->dfa_state);
7937 index_to_bundle_states [0] = curr_state;
7938 insn_num = 0;
7939 /* Shift cycle mark if it is put on insn which could be ignored. */
7940 for (insn = NEXT_INSN (prev_head_insn);
7941 insn != tail;
7942 insn = NEXT_INSN (insn))
7943 if (INSN_P (insn)
7944 && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
7945 || GET_CODE (PATTERN (insn)) == USE
7946 || GET_CODE (PATTERN (insn)) == CLOBBER)
7947 && GET_MODE (insn) == TImode)
7949 PUT_MODE (insn, VOIDmode);
7950 for (next_insn = NEXT_INSN (insn);
7951 next_insn != tail;
7952 next_insn = NEXT_INSN (next_insn))
7953 if (INSN_P (next_insn)
7954 && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
7955 && GET_CODE (PATTERN (next_insn)) != USE
7956 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
7958 PUT_MODE (next_insn, TImode);
7959 break;
7962 /* Forward pass: generation of bundle states. */
7963 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
7964 insn != NULL_RTX;
7965 insn = next_insn)
7967 gcc_assert (INSN_P (insn)
7968 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
7969 && GET_CODE (PATTERN (insn)) != USE
7970 && GET_CODE (PATTERN (insn)) != CLOBBER);
7971 type = ia64_safe_type (insn);
7972 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
7973 insn_num++;
7974 index_to_bundle_states [insn_num] = NULL;
7975 for (curr_state = index_to_bundle_states [insn_num - 1];
7976 curr_state != NULL;
7977 curr_state = next_state)
7979 pos = curr_state->accumulated_insns_num % 3;
7980 next_state = curr_state->next;
7981 /* We must fill up the current bundle in order to start a
7982 subsequent asm insn in a new bundle. Asm insn is always
7983 placed in a separate bundle. */
7984 only_bundle_end_p
7985 = (next_insn != NULL_RTX
7986 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
7987 && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
7988 /* We may fill up the current bundle if it is the cycle end
7989 without a group barrier. */
7990 bundle_end_p
7991 = (only_bundle_end_p || next_insn == NULL_RTX
7992 || (GET_MODE (next_insn) == TImode
7993 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
7994 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
7995 || type == TYPE_S
7996 /* We need to insert 2 nops for cases like M_MII. To
7997 guarantee issuing all insns on the same cycle for
7998 Itanium 1, we need to issue 2 nops after the first M
7999 insn (MnnMII where n is a nop insn). */
8000 || ((type == TYPE_M || type == TYPE_A)
8001 && ia64_tune == PROCESSOR_ITANIUM
8002 && !bundle_end_p && pos == 1))
8003 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
8004 only_bundle_end_p);
8005 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
8006 only_bundle_end_p);
8007 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
8008 only_bundle_end_p);
8010 gcc_assert (index_to_bundle_states [insn_num]);
8011 for (curr_state = index_to_bundle_states [insn_num];
8012 curr_state != NULL;
8013 curr_state = curr_state->next)
8014 if (verbose >= 2 && dump)
8016 /* This structure is taken from generated code of the
8017 pipeline hazard recognizer (see file insn-attrtab.c).
8018 Please don't forget to change the structure if a new
8019 automaton is added to .md file. */
8020 struct DFA_chip
8022 unsigned short one_automaton_state;
8023 unsigned short oneb_automaton_state;
8024 unsigned short two_automaton_state;
8025 unsigned short twob_automaton_state;
8028 fprintf
8029 (dump,
8030 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
8031 curr_state->unique_num,
8032 (curr_state->originator == NULL
8033 ? -1 : curr_state->originator->unique_num),
8034 curr_state->cost,
8035 curr_state->before_nops_num, curr_state->after_nops_num,
8036 curr_state->accumulated_insns_num, curr_state->branch_deviation,
8037 (ia64_tune == PROCESSOR_ITANIUM
8038 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
8039 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
8040 INSN_UID (insn));
8044 /* We should find a solution because the 2nd insn scheduling has
8045 found one. */
8046 gcc_assert (index_to_bundle_states [insn_num]);
8047 /* Find a state corresponding to the best insn sequence. */
8048 best_state = NULL;
8049 for (curr_state = index_to_bundle_states [insn_num];
8050 curr_state != NULL;
8051 curr_state = curr_state->next)
8052 /* We are just looking at the states with fully filled up last
8053 bundle. The first we prefer insn sequences with minimal cost
8054 then with minimal inserted nops and finally with branch insns
8055 placed in the 3rd slots. */
8056 if (curr_state->accumulated_insns_num % 3 == 0
8057 && (best_state == NULL || best_state->cost > curr_state->cost
8058 || (best_state->cost == curr_state->cost
8059 && (curr_state->accumulated_insns_num
8060 < best_state->accumulated_insns_num
8061 || (curr_state->accumulated_insns_num
8062 == best_state->accumulated_insns_num
8063 && curr_state->branch_deviation
8064 < best_state->branch_deviation)))))
8065 best_state = curr_state;
8066 /* Second (backward) pass: adding nops and templates. */
8067 insn_num = best_state->before_nops_num;
8068 template0 = template1 = -1;
8069 for (curr_state = best_state;
8070 curr_state->originator != NULL;
8071 curr_state = curr_state->originator)
8073 insn = curr_state->insn;
8074 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
8075 || asm_noperands (PATTERN (insn)) >= 0);
8076 insn_num++;
8077 if (verbose >= 2 && dump)
8079 struct DFA_chip
8081 unsigned short one_automaton_state;
8082 unsigned short oneb_automaton_state;
8083 unsigned short two_automaton_state;
8084 unsigned short twob_automaton_state;
8087 fprintf
8088 (dump,
8089 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
8090 curr_state->unique_num,
8091 (curr_state->originator == NULL
8092 ? -1 : curr_state->originator->unique_num),
8093 curr_state->cost,
8094 curr_state->before_nops_num, curr_state->after_nops_num,
8095 curr_state->accumulated_insns_num, curr_state->branch_deviation,
8096 (ia64_tune == PROCESSOR_ITANIUM
8097 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
8098 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
8099 INSN_UID (insn));
8101 /* Find the position in the current bundle window. The window can
8102 contain at most two bundles. Two bundle window means that
8103 the processor will make two bundle rotation. */
8104 max_pos = get_max_pos (curr_state->dfa_state);
8105 if (max_pos == 6
8106 /* The following (negative template number) means that the
8107 processor did one bundle rotation. */
8108 || (max_pos == 3 && template0 < 0))
8110 /* We are at the end of the window -- find template(s) for
8111 its bundle(s). */
8112 pos = max_pos;
8113 if (max_pos == 3)
8114 template0 = get_template (curr_state->dfa_state, 3);
8115 else
8117 template1 = get_template (curr_state->dfa_state, 3);
8118 template0 = get_template (curr_state->dfa_state, 6);
8121 if (max_pos > 3 && template1 < 0)
8122 /* It may happen when we have the stop inside a bundle. */
8124 gcc_assert (pos <= 3);
8125 template1 = get_template (curr_state->dfa_state, 3);
8126 pos += 3;
8128 if (!asm_p)
8129 /* Emit nops after the current insn. */
8130 for (i = 0; i < curr_state->after_nops_num; i++)
8132 nop = gen_nop ();
8133 emit_insn_after (nop, insn);
8134 pos--;
8135 gcc_assert (pos >= 0);
8136 if (pos % 3 == 0)
8138 /* We are at the start of a bundle: emit the template
8139 (it should be defined). */
8140 gcc_assert (template0 >= 0);
8141 ia64_add_bundle_selector_before (template0, nop);
8142 /* If we have two bundle window, we make one bundle
8143 rotation. Otherwise template0 will be undefined
8144 (negative value). */
8145 template0 = template1;
8146 template1 = -1;
8149 /* Move the position backward in the window. Group barrier has
8150 no slot. Asm insn takes all bundle. */
8151 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
8152 && GET_CODE (PATTERN (insn)) != ASM_INPUT
8153 && asm_noperands (PATTERN (insn)) < 0)
8154 pos--;
8155 /* Long insn takes 2 slots. */
8156 if (ia64_safe_type (insn) == TYPE_L)
8157 pos--;
8158 gcc_assert (pos >= 0);
8159 if (pos % 3 == 0
8160 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
8161 && GET_CODE (PATTERN (insn)) != ASM_INPUT
8162 && asm_noperands (PATTERN (insn)) < 0)
8164 /* The current insn is at the bundle start: emit the
8165 template. */
8166 gcc_assert (template0 >= 0);
8167 ia64_add_bundle_selector_before (template0, insn);
8168 b = PREV_INSN (insn);
8169 insn = b;
8170 /* See comment above in analogous place for emitting nops
8171 after the insn. */
8172 template0 = template1;
8173 template1 = -1;
8175 /* Emit nops after the current insn. */
8176 for (i = 0; i < curr_state->before_nops_num; i++)
8178 nop = gen_nop ();
8179 ia64_emit_insn_before (nop, insn);
8180 nop = PREV_INSN (insn);
8181 insn = nop;
8182 pos--;
8183 gcc_assert (pos >= 0);
8184 if (pos % 3 == 0)
8186 /* See comment above in analogous place for emitting nops
8187 after the insn. */
8188 gcc_assert (template0 >= 0);
8189 ia64_add_bundle_selector_before (template0, insn);
8190 b = PREV_INSN (insn);
8191 insn = b;
8192 template0 = template1;
8193 template1 = -1;
8197 if (ia64_tune == PROCESSOR_ITANIUM)
8198 /* Insert additional cycles for MM-insns (MMMUL and MMSHF).
8199 Itanium1 has a strange design, if the distance between an insn
8200 and dependent MM-insn is less 4 then we have a 6 additional
8201 cycles stall. So we make the distance equal to 4 cycles if it
8202 is less. */
8203 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
8204 insn != NULL_RTX;
8205 insn = next_insn)
8207 gcc_assert (INSN_P (insn)
8208 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
8209 && GET_CODE (PATTERN (insn)) != USE
8210 && GET_CODE (PATTERN (insn)) != CLOBBER);
8211 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
8212 if (INSN_UID (insn) < clocks_length && add_cycles [INSN_UID (insn)])
8213 /* We found a MM-insn which needs additional cycles. */
8215 rtx last;
8216 int i, j, n;
8217 int pred_stop_p;
8219 /* Now we are searching for a template of the bundle in
8220 which the MM-insn is placed and the position of the
8221 insn in the bundle (0, 1, 2). Also we are searching
8222 for that there is a stop before the insn. */
8223 last = prev_active_insn (insn);
8224 pred_stop_p = recog_memoized (last) == CODE_FOR_insn_group_barrier;
8225 if (pred_stop_p)
8226 last = prev_active_insn (last);
8227 n = 0;
8228 for (;; last = prev_active_insn (last))
8229 if (recog_memoized (last) == CODE_FOR_bundle_selector)
8231 template0 = XINT (XVECEXP (PATTERN (last), 0, 0), 0);
8232 if (template0 == 9)
8233 /* The insn is in MLX bundle. Change the template
8234 onto MFI because we will add nops before the
8235 insn. It simplifies subsequent code a lot. */
8236 PATTERN (last)
8237 = gen_bundle_selector (const2_rtx); /* -> MFI */
8238 break;
8240 else if (recog_memoized (last) != CODE_FOR_insn_group_barrier
8241 && (ia64_safe_itanium_class (last)
8242 != ITANIUM_CLASS_IGNORE))
8243 n++;
8244 /* Some check of correctness: the stop is not at the
8245 bundle start, there are no more 3 insns in the bundle,
8246 and the MM-insn is not at the start of bundle with
8247 template MLX. */
8248 gcc_assert ((!pred_stop_p || n)
8249 && n <= 2
8250 && (template0 != 9 || !n));
8251 /* Put nops after the insn in the bundle. */
8252 for (j = 3 - n; j > 0; j --)
8253 ia64_emit_insn_before (gen_nop (), insn);
8254 /* It takes into account that we will add more N nops
8255 before the insn lately -- please see code below. */
8256 add_cycles [INSN_UID (insn)]--;
8257 if (!pred_stop_p || add_cycles [INSN_UID (insn)])
8258 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8259 insn);
8260 if (pred_stop_p)
8261 add_cycles [INSN_UID (insn)]--;
8262 for (i = add_cycles [INSN_UID (insn)]; i > 0; i--)
8264 /* Insert "MII;" template. */
8265 ia64_emit_insn_before (gen_bundle_selector (const0_rtx),
8266 insn);
8267 ia64_emit_insn_before (gen_nop (), insn);
8268 ia64_emit_insn_before (gen_nop (), insn);
8269 if (i > 1)
8271 /* To decrease code size, we use "MI;I;"
8272 template. */
8273 ia64_emit_insn_before
8274 (gen_insn_group_barrier (GEN_INT (3)), insn);
8275 i--;
8277 ia64_emit_insn_before (gen_nop (), insn);
8278 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8279 insn);
8281 /* Put the MM-insn in the same slot of a bundle with the
8282 same template as the original one. */
8283 ia64_add_bundle_selector_before (template0, insn);
8284 /* To put the insn in the same slot, add necessary number
8285 of nops. */
8286 for (j = n; j > 0; j --)
8287 ia64_emit_insn_before (gen_nop (), insn);
8288 /* Put the stop if the original bundle had it. */
8289 if (pred_stop_p)
8290 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8291 insn);
8294 free (index_to_bundle_states);
8295 finish_bundle_state_table ();
8296 bundling_p = 0;
8297 dfa_clean_insn_cache ();
8300 /* The following function is called at the end of scheduling BB or
8301 EBB. After reload, it inserts stop bits and does insn bundling. */
8303 static void
8304 ia64_sched_finish (FILE *dump, int sched_verbose)
8306 if (sched_verbose)
8307 fprintf (dump, "// Finishing schedule.\n");
8308 if (!reload_completed)
8309 return;
8310 if (reload_completed)
8312 final_emit_insn_group_barriers (dump);
8313 bundling (dump, sched_verbose, current_sched_info->prev_head,
8314 current_sched_info->next_tail);
8315 if (sched_verbose && dump)
8316 fprintf (dump, "// finishing %d-%d\n",
8317 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
8318 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
8320 return;
8324 /* The following function inserts stop bits in scheduled BB or EBB. */
8326 static void
8327 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
8329 rtx insn;
8330 int need_barrier_p = 0;
8331 rtx prev_insn = NULL_RTX;
8333 init_insn_group_barriers ();
8335 for (insn = NEXT_INSN (current_sched_info->prev_head);
8336 insn != current_sched_info->next_tail;
8337 insn = NEXT_INSN (insn))
8339 if (GET_CODE (insn) == BARRIER)
8341 rtx last = prev_active_insn (insn);
8343 if (! last)
8344 continue;
8345 if (GET_CODE (last) == JUMP_INSN
8346 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
8347 last = prev_active_insn (last);
8348 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
8349 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
8351 init_insn_group_barriers ();
8352 need_barrier_p = 0;
8353 prev_insn = NULL_RTX;
8355 else if (INSN_P (insn))
8357 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
8359 init_insn_group_barriers ();
8360 need_barrier_p = 0;
8361 prev_insn = NULL_RTX;
8363 else if (need_barrier_p || group_barrier_needed (insn))
8365 if (TARGET_EARLY_STOP_BITS)
8367 rtx last;
8369 for (last = insn;
8370 last != current_sched_info->prev_head;
8371 last = PREV_INSN (last))
8372 if (INSN_P (last) && GET_MODE (last) == TImode
8373 && stops_p [INSN_UID (last)])
8374 break;
8375 if (last == current_sched_info->prev_head)
8376 last = insn;
8377 last = prev_active_insn (last);
8378 if (last
8379 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
8380 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
8381 last);
8382 init_insn_group_barriers ();
8383 for (last = NEXT_INSN (last);
8384 last != insn;
8385 last = NEXT_INSN (last))
8386 if (INSN_P (last))
8387 group_barrier_needed (last);
8389 else
8391 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8392 insn);
8393 init_insn_group_barriers ();
8395 group_barrier_needed (insn);
8396 prev_insn = NULL_RTX;
8398 else if (recog_memoized (insn) >= 0)
8399 prev_insn = insn;
8400 need_barrier_p = (GET_CODE (insn) == CALL_INSN
8401 || GET_CODE (PATTERN (insn)) == ASM_INPUT
8402 || asm_noperands (PATTERN (insn)) >= 0);
8409 /* If the following function returns TRUE, we will use the DFA
8410 insn scheduler. */
8412 static int
8413 ia64_first_cycle_multipass_dfa_lookahead (void)
8415 return (reload_completed ? 6 : 4);
8418 /* The following function initiates variable `dfa_pre_cycle_insn'. */
8420 static void
8421 ia64_init_dfa_pre_cycle_insn (void)
8423 if (temp_dfa_state == NULL)
8425 dfa_state_size = state_size ();
8426 temp_dfa_state = xmalloc (dfa_state_size);
8427 prev_cycle_state = xmalloc (dfa_state_size);
8429 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
8430 PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
8431 recog_memoized (dfa_pre_cycle_insn);
8432 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
8433 PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX;
8434 recog_memoized (dfa_stop_insn);
8437 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
8438 used by the DFA insn scheduler. */
8440 static rtx
8441 ia64_dfa_pre_cycle_insn (void)
8443 return dfa_pre_cycle_insn;
8446 /* The following function returns TRUE if PRODUCER (of type ilog or
8447 ld) produces address for CONSUMER (of type st or stf). */
8450 ia64_st_address_bypass_p (rtx producer, rtx consumer)
8452 rtx dest, reg, mem;
8454 gcc_assert (producer && consumer);
8455 dest = ia64_single_set (producer);
8456 gcc_assert (dest);
8457 reg = SET_DEST (dest);
8458 gcc_assert (reg);
8459 if (GET_CODE (reg) == SUBREG)
8460 reg = SUBREG_REG (reg);
8461 gcc_assert (GET_CODE (reg) == REG);
8463 dest = ia64_single_set (consumer);
8464 gcc_assert (dest);
8465 mem = SET_DEST (dest);
8466 gcc_assert (mem && GET_CODE (mem) == MEM);
8467 return reg_mentioned_p (reg, mem);
8470 /* The following function returns TRUE if PRODUCER (of type ilog or
8471 ld) produces address for CONSUMER (of type ld or fld). */
8474 ia64_ld_address_bypass_p (rtx producer, rtx consumer)
8476 rtx dest, src, reg, mem;
8478 gcc_assert (producer && consumer);
8479 dest = ia64_single_set (producer);
8480 gcc_assert (dest);
8481 reg = SET_DEST (dest);
8482 gcc_assert (reg);
8483 if (GET_CODE (reg) == SUBREG)
8484 reg = SUBREG_REG (reg);
8485 gcc_assert (GET_CODE (reg) == REG);
8487 src = ia64_single_set (consumer);
8488 gcc_assert (src);
8489 mem = SET_SRC (src);
8490 gcc_assert (mem);
8492 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
8493 mem = XVECEXP (mem, 0, 0);
8494 else if (GET_CODE (mem) == IF_THEN_ELSE)
8495 /* ??? Is this bypass necessary for ld.c? */
8497 gcc_assert (XINT (XEXP (XEXP (mem, 0), 0), 1) == UNSPEC_LDCCLR);
8498 mem = XEXP (mem, 1);
8501 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
8502 mem = XEXP (mem, 0);
8504 if (GET_CODE (mem) == UNSPEC)
8506 int c = XINT (mem, 1);
8508 gcc_assert (c == UNSPEC_LDA || c == UNSPEC_LDS || c == UNSPEC_LDSA);
8509 mem = XVECEXP (mem, 0, 0);
8512 /* Note that LO_SUM is used for GOT loads. */
8513 gcc_assert (GET_CODE (mem) == LO_SUM || GET_CODE (mem) == MEM);
8515 return reg_mentioned_p (reg, mem);
8518 /* The following function returns TRUE if INSN produces address for a
8519 load/store insn. We will place such insns into M slot because it
8520 decreases its latency time. */
8523 ia64_produce_address_p (rtx insn)
8525 return insn->call;
8529 /* Emit pseudo-ops for the assembler to describe predicate relations.
8530 At present this assumes that we only consider predicate pairs to
8531 be mutex, and that the assembler can deduce proper values from
8532 straight-line code. */
8534 static void
8535 emit_predicate_relation_info (void)
8537 basic_block bb;
8539 FOR_EACH_BB_REVERSE (bb)
8541 int r;
8542 rtx head = BB_HEAD (bb);
8544 /* We only need such notes at code labels. */
8545 if (GET_CODE (head) != CODE_LABEL)
8546 continue;
8547 if (NOTE_INSN_BASIC_BLOCK_P (NEXT_INSN (head)))
8548 head = NEXT_INSN (head);
8550 /* Skip p0, which may be thought to be live due to (reg:DI p0)
8551 grabbing the entire block of predicate registers. */
8552 for (r = PR_REG (2); r < PR_REG (64); r += 2)
8553 if (REGNO_REG_SET_P (df_get_live_in (bb), r))
8555 rtx p = gen_rtx_REG (BImode, r);
8556 rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
8557 if (head == BB_END (bb))
8558 BB_END (bb) = n;
8559 head = n;
8563 /* Look for conditional calls that do not return, and protect predicate
8564 relations around them. Otherwise the assembler will assume the call
8565 returns, and complain about uses of call-clobbered predicates after
8566 the call. */
8567 FOR_EACH_BB_REVERSE (bb)
8569 rtx insn = BB_HEAD (bb);
8571 while (1)
8573 if (GET_CODE (insn) == CALL_INSN
8574 && GET_CODE (PATTERN (insn)) == COND_EXEC
8575 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
8577 rtx b = emit_insn_before (gen_safe_across_calls_all (), insn);
8578 rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn);
8579 if (BB_HEAD (bb) == insn)
8580 BB_HEAD (bb) = b;
8581 if (BB_END (bb) == insn)
8582 BB_END (bb) = a;
8585 if (insn == BB_END (bb))
8586 break;
8587 insn = NEXT_INSN (insn);
8592 /* Perform machine dependent operations on the rtl chain INSNS. */
8594 static void
8595 ia64_reorg (void)
8597 /* We are freeing block_for_insn in the toplev to keep compatibility
8598 with old MDEP_REORGS that are not CFG based. Recompute it now. */
8599 compute_bb_for_insn ();
8601 /* If optimizing, we'll have split before scheduling. */
8602 if (optimize == 0)
8603 split_all_insns ();
8605 if (optimize && ia64_flag_schedule_insns2 && dbg_cnt (ia64_sched2))
8607 timevar_push (TV_SCHED2);
8608 ia64_final_schedule = 1;
8610 initiate_bundle_states ();
8611 ia64_nop = make_insn_raw (gen_nop ());
8612 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
8613 recog_memoized (ia64_nop);
8614 clocks_length = get_max_uid () + 1;
8615 stops_p = xcalloc (1, clocks_length);
8616 if (ia64_tune == PROCESSOR_ITANIUM)
8618 clocks = xcalloc (clocks_length, sizeof (int));
8619 add_cycles = xcalloc (clocks_length, sizeof (int));
8621 if (ia64_tune == PROCESSOR_ITANIUM2)
8623 pos_1 = get_cpu_unit_code ("2_1");
8624 pos_2 = get_cpu_unit_code ("2_2");
8625 pos_3 = get_cpu_unit_code ("2_3");
8626 pos_4 = get_cpu_unit_code ("2_4");
8627 pos_5 = get_cpu_unit_code ("2_5");
8628 pos_6 = get_cpu_unit_code ("2_6");
8629 _0mii_ = get_cpu_unit_code ("2b_0mii.");
8630 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
8631 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
8632 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
8633 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
8634 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
8635 _0mib_ = get_cpu_unit_code ("2b_0mib.");
8636 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
8637 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
8638 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
8639 _1mii_ = get_cpu_unit_code ("2b_1mii.");
8640 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
8641 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
8642 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
8643 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
8644 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
8645 _1mib_ = get_cpu_unit_code ("2b_1mib.");
8646 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
8647 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
8648 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
8650 else
8652 pos_1 = get_cpu_unit_code ("1_1");
8653 pos_2 = get_cpu_unit_code ("1_2");
8654 pos_3 = get_cpu_unit_code ("1_3");
8655 pos_4 = get_cpu_unit_code ("1_4");
8656 pos_5 = get_cpu_unit_code ("1_5");
8657 pos_6 = get_cpu_unit_code ("1_6");
8658 _0mii_ = get_cpu_unit_code ("1b_0mii.");
8659 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
8660 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
8661 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
8662 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
8663 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
8664 _0mib_ = get_cpu_unit_code ("1b_0mib.");
8665 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
8666 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
8667 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
8668 _1mii_ = get_cpu_unit_code ("1b_1mii.");
8669 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
8670 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
8671 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
8672 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
8673 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
8674 _1mib_ = get_cpu_unit_code ("1b_1mib.");
8675 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
8676 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
8677 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
8679 schedule_ebbs ();
8680 /* We cannot reuse this one because it has been corrupted by the
8681 evil glat. */
8682 finish_bundle_states ();
8683 if (ia64_tune == PROCESSOR_ITANIUM)
8685 free (add_cycles);
8686 free (clocks);
8688 free (stops_p);
8689 stops_p = NULL;
8690 emit_insn_group_barriers (dump_file);
8692 ia64_final_schedule = 0;
8693 timevar_pop (TV_SCHED2);
8695 else
8696 emit_all_insn_group_barriers (dump_file);
8698 df_analyze ();
8700 /* A call must not be the last instruction in a function, so that the
8701 return address is still within the function, so that unwinding works
8702 properly. Note that IA-64 differs from dwarf2 on this point. */
8703 if (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
8705 rtx insn;
8706 int saw_stop = 0;
8708 insn = get_last_insn ();
8709 if (! INSN_P (insn))
8710 insn = prev_active_insn (insn);
8711 /* Skip over insns that expand to nothing. */
8712 while (GET_CODE (insn) == INSN && get_attr_empty (insn) == EMPTY_YES)
8714 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
8715 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
8716 saw_stop = 1;
8717 insn = prev_active_insn (insn);
8719 if (GET_CODE (insn) == CALL_INSN)
8721 if (! saw_stop)
8722 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
8723 emit_insn (gen_break_f ());
8724 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
8728 emit_predicate_relation_info ();
8730 if (ia64_flag_var_tracking)
8732 timevar_push (TV_VAR_TRACKING);
8733 variable_tracking_main ();
8734 timevar_pop (TV_VAR_TRACKING);
8736 df_finish_pass (false);
8739 /* Return true if REGNO is used by the epilogue. */
8742 ia64_epilogue_uses (int regno)
8744 switch (regno)
8746 case R_GR (1):
8747 /* With a call to a function in another module, we will write a new
8748 value to "gp". After returning from such a call, we need to make
8749 sure the function restores the original gp-value, even if the
8750 function itself does not use the gp anymore. */
8751 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
8753 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
8754 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
8755 /* For functions defined with the syscall_linkage attribute, all
8756 input registers are marked as live at all function exits. This
8757 prevents the register allocator from using the input registers,
8758 which in turn makes it possible to restart a system call after
8759 an interrupt without having to save/restore the input registers.
8760 This also prevents kernel data from leaking to application code. */
8761 return lookup_attribute ("syscall_linkage",
8762 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
8764 case R_BR (0):
8765 /* Conditional return patterns can't represent the use of `b0' as
8766 the return address, so we force the value live this way. */
8767 return 1;
8769 case AR_PFS_REGNUM:
8770 /* Likewise for ar.pfs, which is used by br.ret. */
8771 return 1;
8773 default:
8774 return 0;
8778 /* Return true if REGNO is used by the frame unwinder. */
8781 ia64_eh_uses (int regno)
8783 enum ia64_frame_regs r;
8785 if (! reload_completed)
8786 return 0;
8788 if (regno == 0)
8789 return 0;
8791 for (r = reg_save_b0; r <= reg_save_ar_lc; r++)
8792 if (regno == current_frame_info.r[r]
8793 || regno == emitted_frame_related_regs[r])
8794 return 1;
8796 return 0;
8799 /* Return true if this goes in small data/bss. */
8801 /* ??? We could also support own long data here. Generating movl/add/ld8
8802 instead of addl,ld8/ld8. This makes the code bigger, but should make the
8803 code faster because there is one less load. This also includes incomplete
8804 types which can't go in sdata/sbss. */
8806 static bool
8807 ia64_in_small_data_p (const_tree exp)
8809 if (TARGET_NO_SDATA)
8810 return false;
8812 /* We want to merge strings, so we never consider them small data. */
8813 if (TREE_CODE (exp) == STRING_CST)
8814 return false;
8816 /* Functions are never small data. */
8817 if (TREE_CODE (exp) == FUNCTION_DECL)
8818 return false;
8820 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
8822 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
8824 if (strcmp (section, ".sdata") == 0
8825 || strncmp (section, ".sdata.", 7) == 0
8826 || strncmp (section, ".gnu.linkonce.s.", 16) == 0
8827 || strcmp (section, ".sbss") == 0
8828 || strncmp (section, ".sbss.", 6) == 0
8829 || strncmp (section, ".gnu.linkonce.sb.", 17) == 0)
8830 return true;
8832 else
8834 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
8836 /* If this is an incomplete type with size 0, then we can't put it
8837 in sdata because it might be too big when completed. */
8838 if (size > 0 && size <= ia64_section_threshold)
8839 return true;
8842 return false;
8845 /* Output assembly directives for prologue regions. */
8847 /* The current basic block number. */
8849 static bool last_block;
8851 /* True if we need a copy_state command at the start of the next block. */
8853 static bool need_copy_state;
8855 #ifndef MAX_ARTIFICIAL_LABEL_BYTES
8856 # define MAX_ARTIFICIAL_LABEL_BYTES 30
8857 #endif
8859 /* Emit a debugging label after a call-frame-related insn. We'd
8860 rather output the label right away, but we'd have to output it
8861 after, not before, the instruction, and the instruction has not
8862 been output yet. So we emit the label after the insn, delete it to
8863 avoid introducing basic blocks, and mark it as preserved, such that
8864 it is still output, given that it is referenced in debug info. */
8866 static const char *
8867 ia64_emit_deleted_label_after_insn (rtx insn)
8869 char label[MAX_ARTIFICIAL_LABEL_BYTES];
8870 rtx lb = gen_label_rtx ();
8871 rtx label_insn = emit_label_after (lb, insn);
8873 LABEL_PRESERVE_P (lb) = 1;
8875 delete_insn (label_insn);
8877 ASM_GENERATE_INTERNAL_LABEL (label, "L", CODE_LABEL_NUMBER (label_insn));
8879 return xstrdup (label);
8882 /* Define the CFA after INSN with the steady-state definition. */
8884 static void
8885 ia64_dwarf2out_def_steady_cfa (rtx insn)
8887 rtx fp = frame_pointer_needed
8888 ? hard_frame_pointer_rtx
8889 : stack_pointer_rtx;
8891 dwarf2out_def_cfa
8892 (ia64_emit_deleted_label_after_insn (insn),
8893 REGNO (fp),
8894 ia64_initial_elimination_offset
8895 (REGNO (arg_pointer_rtx), REGNO (fp))
8896 + ARG_POINTER_CFA_OFFSET (current_function_decl));
8899 /* The generic dwarf2 frame debug info generator does not define a
8900 separate region for the very end of the epilogue, so refrain from
8901 doing so in the IA64-specific code as well. */
8903 #define IA64_CHANGE_CFA_IN_EPILOGUE 0
8905 /* The function emits unwind directives for the start of an epilogue. */
8907 static void
8908 process_epilogue (FILE *asm_out_file, rtx insn, bool unwind, bool frame)
8910 /* If this isn't the last block of the function, then we need to label the
8911 current state, and copy it back in at the start of the next block. */
8913 if (!last_block)
8915 if (unwind)
8916 fprintf (asm_out_file, "\t.label_state %d\n",
8917 ++cfun->machine->state_num);
8918 need_copy_state = true;
8921 if (unwind)
8922 fprintf (asm_out_file, "\t.restore sp\n");
8923 if (IA64_CHANGE_CFA_IN_EPILOGUE && frame)
8924 dwarf2out_def_cfa (ia64_emit_deleted_label_after_insn (insn),
8925 STACK_POINTER_REGNUM, INCOMING_FRAME_SP_OFFSET);
8928 /* This function processes a SET pattern looking for specific patterns
8929 which result in emitting an assembly directive required for unwinding. */
8931 static int
8932 process_set (FILE *asm_out_file, rtx pat, rtx insn, bool unwind, bool frame)
8934 rtx src = SET_SRC (pat);
8935 rtx dest = SET_DEST (pat);
8936 int src_regno, dest_regno;
8938 /* Look for the ALLOC insn. */
8939 if (GET_CODE (src) == UNSPEC_VOLATILE
8940 && XINT (src, 1) == UNSPECV_ALLOC
8941 && GET_CODE (dest) == REG)
8943 dest_regno = REGNO (dest);
8945 /* If this is the final destination for ar.pfs, then this must
8946 be the alloc in the prologue. */
8947 if (dest_regno == current_frame_info.r[reg_save_ar_pfs])
8949 if (unwind)
8950 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
8951 ia64_dbx_register_number (dest_regno));
8953 else
8955 /* This must be an alloc before a sibcall. We must drop the
8956 old frame info. The easiest way to drop the old frame
8957 info is to ensure we had a ".restore sp" directive
8958 followed by a new prologue. If the procedure doesn't
8959 have a memory-stack frame, we'll issue a dummy ".restore
8960 sp" now. */
8961 if (current_frame_info.total_size == 0 && !frame_pointer_needed)
8962 /* if haven't done process_epilogue() yet, do it now */
8963 process_epilogue (asm_out_file, insn, unwind, frame);
8964 if (unwind)
8965 fprintf (asm_out_file, "\t.prologue\n");
8967 return 1;
8970 /* Look for SP = .... */
8971 if (GET_CODE (dest) == REG && REGNO (dest) == STACK_POINTER_REGNUM)
8973 if (GET_CODE (src) == PLUS)
8975 rtx op0 = XEXP (src, 0);
8976 rtx op1 = XEXP (src, 1);
8978 gcc_assert (op0 == dest && GET_CODE (op1) == CONST_INT);
8980 if (INTVAL (op1) < 0)
8982 gcc_assert (!frame_pointer_needed);
8983 if (unwind)
8984 fprintf (asm_out_file, "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
8985 -INTVAL (op1));
8986 if (frame)
8987 ia64_dwarf2out_def_steady_cfa (insn);
8989 else
8990 process_epilogue (asm_out_file, insn, unwind, frame);
8992 else
8994 gcc_assert (GET_CODE (src) == REG
8995 && REGNO (src) == HARD_FRAME_POINTER_REGNUM);
8996 process_epilogue (asm_out_file, insn, unwind, frame);
8999 return 1;
9002 /* Register move we need to look at. */
9003 if (GET_CODE (dest) == REG && GET_CODE (src) == REG)
9005 src_regno = REGNO (src);
9006 dest_regno = REGNO (dest);
9008 switch (src_regno)
9010 case BR_REG (0):
9011 /* Saving return address pointer. */
9012 gcc_assert (dest_regno == current_frame_info.r[reg_save_b0]);
9013 if (unwind)
9014 fprintf (asm_out_file, "\t.save rp, r%d\n",
9015 ia64_dbx_register_number (dest_regno));
9016 return 1;
9018 case PR_REG (0):
9019 gcc_assert (dest_regno == current_frame_info.r[reg_save_pr]);
9020 if (unwind)
9021 fprintf (asm_out_file, "\t.save pr, r%d\n",
9022 ia64_dbx_register_number (dest_regno));
9023 return 1;
9025 case AR_UNAT_REGNUM:
9026 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_unat]);
9027 if (unwind)
9028 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
9029 ia64_dbx_register_number (dest_regno));
9030 return 1;
9032 case AR_LC_REGNUM:
9033 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_lc]);
9034 if (unwind)
9035 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
9036 ia64_dbx_register_number (dest_regno));
9037 return 1;
9039 case STACK_POINTER_REGNUM:
9040 gcc_assert (dest_regno == HARD_FRAME_POINTER_REGNUM
9041 && frame_pointer_needed);
9042 if (unwind)
9043 fprintf (asm_out_file, "\t.vframe r%d\n",
9044 ia64_dbx_register_number (dest_regno));
9045 if (frame)
9046 ia64_dwarf2out_def_steady_cfa (insn);
9047 return 1;
9049 default:
9050 /* Everything else should indicate being stored to memory. */
9051 gcc_unreachable ();
9055 /* Memory store we need to look at. */
9056 if (GET_CODE (dest) == MEM && GET_CODE (src) == REG)
9058 long off;
9059 rtx base;
9060 const char *saveop;
9062 if (GET_CODE (XEXP (dest, 0)) == REG)
9064 base = XEXP (dest, 0);
9065 off = 0;
9067 else
9069 gcc_assert (GET_CODE (XEXP (dest, 0)) == PLUS
9070 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT);
9071 base = XEXP (XEXP (dest, 0), 0);
9072 off = INTVAL (XEXP (XEXP (dest, 0), 1));
9075 if (base == hard_frame_pointer_rtx)
9077 saveop = ".savepsp";
9078 off = - off;
9080 else
9082 gcc_assert (base == stack_pointer_rtx);
9083 saveop = ".savesp";
9086 src_regno = REGNO (src);
9087 switch (src_regno)
9089 case BR_REG (0):
9090 gcc_assert (!current_frame_info.r[reg_save_b0]);
9091 if (unwind)
9092 fprintf (asm_out_file, "\t%s rp, %ld\n", saveop, off);
9093 return 1;
9095 case PR_REG (0):
9096 gcc_assert (!current_frame_info.r[reg_save_pr]);
9097 if (unwind)
9098 fprintf (asm_out_file, "\t%s pr, %ld\n", saveop, off);
9099 return 1;
9101 case AR_LC_REGNUM:
9102 gcc_assert (!current_frame_info.r[reg_save_ar_lc]);
9103 if (unwind)
9104 fprintf (asm_out_file, "\t%s ar.lc, %ld\n", saveop, off);
9105 return 1;
9107 case AR_PFS_REGNUM:
9108 gcc_assert (!current_frame_info.r[reg_save_ar_pfs]);
9109 if (unwind)
9110 fprintf (asm_out_file, "\t%s ar.pfs, %ld\n", saveop, off);
9111 return 1;
9113 case AR_UNAT_REGNUM:
9114 gcc_assert (!current_frame_info.r[reg_save_ar_unat]);
9115 if (unwind)
9116 fprintf (asm_out_file, "\t%s ar.unat, %ld\n", saveop, off);
9117 return 1;
9119 case GR_REG (4):
9120 case GR_REG (5):
9121 case GR_REG (6):
9122 case GR_REG (7):
9123 if (unwind)
9124 fprintf (asm_out_file, "\t.save.g 0x%x\n",
9125 1 << (src_regno - GR_REG (4)));
9126 return 1;
9128 case BR_REG (1):
9129 case BR_REG (2):
9130 case BR_REG (3):
9131 case BR_REG (4):
9132 case BR_REG (5):
9133 if (unwind)
9134 fprintf (asm_out_file, "\t.save.b 0x%x\n",
9135 1 << (src_regno - BR_REG (1)));
9136 return 1;
9138 case FR_REG (2):
9139 case FR_REG (3):
9140 case FR_REG (4):
9141 case FR_REG (5):
9142 if (unwind)
9143 fprintf (asm_out_file, "\t.save.f 0x%x\n",
9144 1 << (src_regno - FR_REG (2)));
9145 return 1;
9147 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
9148 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
9149 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
9150 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
9151 if (unwind)
9152 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
9153 1 << (src_regno - FR_REG (12)));
9154 return 1;
9156 default:
9157 return 0;
9161 return 0;
9165 /* This function looks at a single insn and emits any directives
9166 required to unwind this insn. */
9167 void
9168 process_for_unwind_directive (FILE *asm_out_file, rtx insn)
9170 bool unwind = (flag_unwind_tables
9171 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS));
9172 bool frame = dwarf2out_do_frame ();
9174 if (unwind || frame)
9176 rtx pat;
9178 if (NOTE_INSN_BASIC_BLOCK_P (insn))
9180 last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
9182 /* Restore unwind state from immediately before the epilogue. */
9183 if (need_copy_state)
9185 if (unwind)
9187 fprintf (asm_out_file, "\t.body\n");
9188 fprintf (asm_out_file, "\t.copy_state %d\n",
9189 cfun->machine->state_num);
9191 if (IA64_CHANGE_CFA_IN_EPILOGUE && frame)
9192 ia64_dwarf2out_def_steady_cfa (insn);
9193 need_copy_state = false;
9197 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
9198 return;
9200 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
9201 if (pat)
9202 pat = XEXP (pat, 0);
9203 else
9204 pat = PATTERN (insn);
9206 switch (GET_CODE (pat))
9208 case SET:
9209 process_set (asm_out_file, pat, insn, unwind, frame);
9210 break;
9212 case PARALLEL:
9214 int par_index;
9215 int limit = XVECLEN (pat, 0);
9216 for (par_index = 0; par_index < limit; par_index++)
9218 rtx x = XVECEXP (pat, 0, par_index);
9219 if (GET_CODE (x) == SET)
9220 process_set (asm_out_file, x, insn, unwind, frame);
9222 break;
9225 default:
9226 gcc_unreachable ();
9232 enum ia64_builtins
9234 IA64_BUILTIN_BSP,
9235 IA64_BUILTIN_FLUSHRS
9238 void
9239 ia64_init_builtins (void)
9241 tree fpreg_type;
9242 tree float80_type;
9244 /* The __fpreg type. */
9245 fpreg_type = make_node (REAL_TYPE);
9246 TYPE_PRECISION (fpreg_type) = 82;
9247 layout_type (fpreg_type);
9248 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
9250 /* The __float80 type. */
9251 float80_type = make_node (REAL_TYPE);
9252 TYPE_PRECISION (float80_type) = 80;
9253 layout_type (float80_type);
9254 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
9256 /* The __float128 type. */
9257 if (!TARGET_HPUX)
9259 tree float128_type = make_node (REAL_TYPE);
9260 TYPE_PRECISION (float128_type) = 128;
9261 layout_type (float128_type);
9262 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
9264 else
9265 /* Under HPUX, this is a synonym for "long double". */
9266 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
9267 "__float128");
9269 #define def_builtin(name, type, code) \
9270 add_builtin_function ((name), (type), (code), BUILT_IN_MD, \
9271 NULL, NULL_TREE)
9273 def_builtin ("__builtin_ia64_bsp",
9274 build_function_type (ptr_type_node, void_list_node),
9275 IA64_BUILTIN_BSP);
9277 def_builtin ("__builtin_ia64_flushrs",
9278 build_function_type (void_type_node, void_list_node),
9279 IA64_BUILTIN_FLUSHRS);
9281 #undef def_builtin
9283 if (TARGET_HPUX)
9285 if (built_in_decls [BUILT_IN_FINITE])
9286 set_user_assembler_name (built_in_decls [BUILT_IN_FINITE],
9287 "_Isfinite");
9288 if (built_in_decls [BUILT_IN_FINITEF])
9289 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEF],
9290 "_Isfinitef");
9291 if (built_in_decls [BUILT_IN_FINITEL])
9292 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEL],
9293 "_Isfinitef128");
9298 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9299 enum machine_mode mode ATTRIBUTE_UNUSED,
9300 int ignore ATTRIBUTE_UNUSED)
9302 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9303 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
9305 switch (fcode)
9307 case IA64_BUILTIN_BSP:
9308 if (! target || ! register_operand (target, DImode))
9309 target = gen_reg_rtx (DImode);
9310 emit_insn (gen_bsp_value (target));
9311 #ifdef POINTERS_EXTEND_UNSIGNED
9312 target = convert_memory_address (ptr_mode, target);
9313 #endif
9314 return target;
9316 case IA64_BUILTIN_FLUSHRS:
9317 emit_insn (gen_flushrs ());
9318 return const0_rtx;
9320 default:
9321 break;
9324 return NULL_RTX;
9327 /* For the HP-UX IA64 aggregate parameters are passed stored in the
9328 most significant bits of the stack slot. */
9330 enum direction
9331 ia64_hpux_function_arg_padding (enum machine_mode mode, const_tree type)
9333 /* Exception to normal case for structures/unions/etc. */
9335 if (type && AGGREGATE_TYPE_P (type)
9336 && int_size_in_bytes (type) < UNITS_PER_WORD)
9337 return upward;
9339 /* Fall back to the default. */
9340 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
9343 /* Emit text to declare externally defined variables and functions, because
9344 the Intel assembler does not support undefined externals. */
9346 void
9347 ia64_asm_output_external (FILE *file, tree decl, const char *name)
9349 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
9350 set in order to avoid putting out names that are never really
9351 used. */
9352 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
9354 /* maybe_assemble_visibility will return 1 if the assembler
9355 visibility directive is output. */
9356 int need_visibility = ((*targetm.binds_local_p) (decl)
9357 && maybe_assemble_visibility (decl));
9359 /* GNU as does not need anything here, but the HP linker does
9360 need something for external functions. */
9361 if ((TARGET_HPUX_LD || !TARGET_GNU_AS)
9362 && TREE_CODE (decl) == FUNCTION_DECL)
9363 (*targetm.asm_out.globalize_decl_name) (file, decl);
9364 else if (need_visibility && !TARGET_GNU_AS)
9365 (*targetm.asm_out.globalize_label) (file, name);
9369 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
9370 modes of word_mode and larger. Rename the TFmode libfuncs using the
9371 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
9372 backward compatibility. */
9374 static void
9375 ia64_init_libfuncs (void)
9377 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
9378 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
9379 set_optab_libfunc (smod_optab, SImode, "__modsi3");
9380 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
9382 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
9383 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
9384 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
9385 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
9386 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
9388 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
9389 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
9390 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
9391 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
9392 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
9393 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
9395 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
9396 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
9397 set_conv_libfunc (sfix_optab, TImode, TFmode, "_U_Qfcnvfxt_quad_to_quad");
9398 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
9399 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
9401 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
9402 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
9403 set_conv_libfunc (sfloat_optab, TFmode, TImode, "_U_Qfcnvxf_quad_to_quad");
9404 /* HP-UX 11.23 libc does not have a function for unsigned
9405 SImode-to-TFmode conversion. */
9406 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxuf_dbl_to_quad");
9409 /* Rename all the TFmode libfuncs using the HPUX conventions. */
9411 static void
9412 ia64_hpux_init_libfuncs (void)
9414 ia64_init_libfuncs ();
9416 /* The HP SI millicode division and mod functions expect DI arguments.
9417 By turning them off completely we avoid using both libgcc and the
9418 non-standard millicode routines and use the HP DI millicode routines
9419 instead. */
9421 set_optab_libfunc (sdiv_optab, SImode, 0);
9422 set_optab_libfunc (udiv_optab, SImode, 0);
9423 set_optab_libfunc (smod_optab, SImode, 0);
9424 set_optab_libfunc (umod_optab, SImode, 0);
9426 set_optab_libfunc (sdiv_optab, DImode, "__milli_divI");
9427 set_optab_libfunc (udiv_optab, DImode, "__milli_divU");
9428 set_optab_libfunc (smod_optab, DImode, "__milli_remI");
9429 set_optab_libfunc (umod_optab, DImode, "__milli_remU");
9431 /* HP-UX libc has TF min/max/abs routines in it. */
9432 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
9433 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
9434 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
9436 /* ia64_expand_compare uses this. */
9437 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
9439 /* These should never be used. */
9440 set_optab_libfunc (eq_optab, TFmode, 0);
9441 set_optab_libfunc (ne_optab, TFmode, 0);
9442 set_optab_libfunc (gt_optab, TFmode, 0);
9443 set_optab_libfunc (ge_optab, TFmode, 0);
9444 set_optab_libfunc (lt_optab, TFmode, 0);
9445 set_optab_libfunc (le_optab, TFmode, 0);
9448 /* Rename the division and modulus functions in VMS. */
9450 static void
9451 ia64_vms_init_libfuncs (void)
9453 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
9454 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
9455 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
9456 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
9457 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
9458 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
9459 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
9460 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
9463 /* Rename the TFmode libfuncs available from soft-fp in glibc using
9464 the HPUX conventions. */
9466 static void
9467 ia64_sysv4_init_libfuncs (void)
9469 ia64_init_libfuncs ();
9471 /* These functions are not part of the HPUX TFmode interface. We
9472 use them instead of _U_Qfcmp, which doesn't work the way we
9473 expect. */
9474 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
9475 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
9476 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
9477 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
9478 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
9479 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
9481 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
9482 glibc doesn't have them. */
9485 /* For HPUX, it is illegal to have relocations in shared segments. */
9487 static int
9488 ia64_hpux_reloc_rw_mask (void)
9490 return 3;
9493 /* For others, relax this so that relocations to local data goes in
9494 read-only segments, but we still cannot allow global relocations
9495 in read-only segments. */
9497 static int
9498 ia64_reloc_rw_mask (void)
9500 return flag_pic ? 3 : 2;
9503 /* Return the section to use for X. The only special thing we do here
9504 is to honor small data. */
9506 static section *
9507 ia64_select_rtx_section (enum machine_mode mode, rtx x,
9508 unsigned HOST_WIDE_INT align)
9510 if (GET_MODE_SIZE (mode) > 0
9511 && GET_MODE_SIZE (mode) <= ia64_section_threshold
9512 && !TARGET_NO_SDATA)
9513 return sdata_section;
9514 else
9515 return default_elf_select_rtx_section (mode, x, align);
9518 static unsigned int
9519 ia64_section_type_flags (tree decl, const char *name, int reloc)
9521 unsigned int flags = 0;
9523 if (strcmp (name, ".sdata") == 0
9524 || strncmp (name, ".sdata.", 7) == 0
9525 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9526 || strncmp (name, ".sdata2.", 8) == 0
9527 || strncmp (name, ".gnu.linkonce.s2.", 17) == 0
9528 || strcmp (name, ".sbss") == 0
9529 || strncmp (name, ".sbss.", 6) == 0
9530 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9531 flags = SECTION_SMALL;
9533 flags |= default_section_type_flags (decl, name, reloc);
9534 return flags;
9537 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
9538 structure type and that the address of that type should be passed
9539 in out0, rather than in r8. */
9541 static bool
9542 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
9544 tree ret_type = TREE_TYPE (fntype);
9546 /* The Itanium C++ ABI requires that out0, rather than r8, be used
9547 as the structure return address parameter, if the return value
9548 type has a non-trivial copy constructor or destructor. It is not
9549 clear if this same convention should be used for other
9550 programming languages. Until G++ 3.4, we incorrectly used r8 for
9551 these return values. */
9552 return (abi_version_at_least (2)
9553 && ret_type
9554 && TYPE_MODE (ret_type) == BLKmode
9555 && TREE_ADDRESSABLE (ret_type)
9556 && strcmp (lang_hooks.name, "GNU C++") == 0);
9559 /* Output the assembler code for a thunk function. THUNK_DECL is the
9560 declaration for the thunk function itself, FUNCTION is the decl for
9561 the target function. DELTA is an immediate constant offset to be
9562 added to THIS. If VCALL_OFFSET is nonzero, the word at
9563 *(*this + vcall_offset) should be added to THIS. */
9565 static void
9566 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
9567 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9568 tree function)
9570 rtx this, insn, funexp;
9571 unsigned int this_parmno;
9572 unsigned int this_regno;
9573 rtx delta_rtx;
9575 reload_completed = 1;
9576 epilogue_completed = 1;
9578 /* Set things up as ia64_expand_prologue might. */
9579 last_scratch_gr_reg = 15;
9581 memset (&current_frame_info, 0, sizeof (current_frame_info));
9582 current_frame_info.spill_cfa_off = -16;
9583 current_frame_info.n_input_regs = 1;
9584 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
9586 /* Mark the end of the (empty) prologue. */
9587 emit_note (NOTE_INSN_PROLOGUE_END);
9589 /* Figure out whether "this" will be the first parameter (the
9590 typical case) or the second parameter (as happens when the
9591 virtual function returns certain class objects). */
9592 this_parmno
9593 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
9594 ? 1 : 0);
9595 this_regno = IN_REG (this_parmno);
9596 if (!TARGET_REG_NAMES)
9597 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
9599 this = gen_rtx_REG (Pmode, this_regno);
9601 /* Apply the constant offset, if required. */
9602 delta_rtx = GEN_INT (delta);
9603 if (TARGET_ILP32)
9605 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
9606 REG_POINTER (tmp) = 1;
9607 if (delta && satisfies_constraint_I (delta_rtx))
9609 emit_insn (gen_ptr_extend_plus_imm (this, tmp, delta_rtx));
9610 delta = 0;
9612 else
9613 emit_insn (gen_ptr_extend (this, tmp));
9615 if (delta)
9617 if (!satisfies_constraint_I (delta_rtx))
9619 rtx tmp = gen_rtx_REG (Pmode, 2);
9620 emit_move_insn (tmp, delta_rtx);
9621 delta_rtx = tmp;
9623 emit_insn (gen_adddi3 (this, this, delta_rtx));
9626 /* Apply the offset from the vtable, if required. */
9627 if (vcall_offset)
9629 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
9630 rtx tmp = gen_rtx_REG (Pmode, 2);
9632 if (TARGET_ILP32)
9634 rtx t = gen_rtx_REG (ptr_mode, 2);
9635 REG_POINTER (t) = 1;
9636 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this));
9637 if (satisfies_constraint_I (vcall_offset_rtx))
9639 emit_insn (gen_ptr_extend_plus_imm (tmp, t, vcall_offset_rtx));
9640 vcall_offset = 0;
9642 else
9643 emit_insn (gen_ptr_extend (tmp, t));
9645 else
9646 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
9648 if (vcall_offset)
9650 if (!satisfies_constraint_J (vcall_offset_rtx))
9652 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
9653 emit_move_insn (tmp2, vcall_offset_rtx);
9654 vcall_offset_rtx = tmp2;
9656 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
9659 if (TARGET_ILP32)
9660 emit_insn (gen_zero_extendsidi2 (tmp, gen_rtx_MEM (ptr_mode, tmp)));
9661 else
9662 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
9664 emit_insn (gen_adddi3 (this, this, tmp));
9667 /* Generate a tail call to the target function. */
9668 if (! TREE_USED (function))
9670 assemble_external (function);
9671 TREE_USED (function) = 1;
9673 funexp = XEXP (DECL_RTL (function), 0);
9674 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
9675 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
9676 insn = get_last_insn ();
9677 SIBLING_CALL_P (insn) = 1;
9679 /* Code generation for calls relies on splitting. */
9680 reload_completed = 1;
9681 epilogue_completed = 1;
9682 try_split (PATTERN (insn), insn, 0);
9684 emit_barrier ();
9686 /* Run just enough of rest_of_compilation to get the insns emitted.
9687 There's not really enough bulk here to make other passes such as
9688 instruction scheduling worth while. Note that use_thunk calls
9689 assemble_start_function and assemble_end_function. */
9691 insn_locators_alloc ();
9692 emit_all_insn_group_barriers (NULL);
9693 insn = get_insns ();
9694 shorten_branches (insn);
9695 final_start_function (insn, file, 1);
9696 final (insn, file, 1);
9697 final_end_function ();
9698 free_after_compilation (cfun);
9700 reload_completed = 0;
9701 epilogue_completed = 0;
9704 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9706 static rtx
9707 ia64_struct_value_rtx (tree fntype,
9708 int incoming ATTRIBUTE_UNUSED)
9710 if (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype))
9711 return NULL_RTX;
9712 return gen_rtx_REG (Pmode, GR_REG (8));
9715 static bool
9716 ia64_scalar_mode_supported_p (enum machine_mode mode)
9718 switch (mode)
9720 case QImode:
9721 case HImode:
9722 case SImode:
9723 case DImode:
9724 case TImode:
9725 return true;
9727 case SFmode:
9728 case DFmode:
9729 case XFmode:
9730 case RFmode:
9731 return true;
9733 case TFmode:
9734 return TARGET_HPUX;
9736 default:
9737 return false;
9741 static bool
9742 ia64_vector_mode_supported_p (enum machine_mode mode)
9744 switch (mode)
9746 case V8QImode:
9747 case V4HImode:
9748 case V2SImode:
9749 return true;
9751 case V2SFmode:
9752 return true;
9754 default:
9755 return false;
9759 /* Implement the FUNCTION_PROFILER macro. */
9761 void
9762 ia64_output_function_profiler (FILE *file, int labelno)
9764 bool indirect_call;
9766 /* If the function needs a static chain and the static chain
9767 register is r15, we use an indirect call so as to bypass
9768 the PLT stub in case the executable is dynamically linked,
9769 because the stub clobbers r15 as per 5.3.6 of the psABI.
9770 We don't need to do that in non canonical PIC mode. */
9772 if (cfun->static_chain_decl && !TARGET_NO_PIC && !TARGET_AUTO_PIC)
9774 gcc_assert (STATIC_CHAIN_REGNUM == 15);
9775 indirect_call = true;
9777 else
9778 indirect_call = false;
9780 if (TARGET_GNU_AS)
9781 fputs ("\t.prologue 4, r40\n", file);
9782 else
9783 fputs ("\t.prologue\n\t.save ar.pfs, r40\n", file);
9784 fputs ("\talloc out0 = ar.pfs, 8, 0, 4, 0\n", file);
9786 if (NO_PROFILE_COUNTERS)
9787 fputs ("\tmov out3 = r0\n", file);
9788 else
9790 char buf[20];
9791 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
9793 if (TARGET_AUTO_PIC)
9794 fputs ("\tmovl out3 = @gprel(", file);
9795 else
9796 fputs ("\taddl out3 = @ltoff(", file);
9797 assemble_name (file, buf);
9798 if (TARGET_AUTO_PIC)
9799 fputs (")\n", file);
9800 else
9801 fputs ("), r1\n", file);
9804 if (indirect_call)
9805 fputs ("\taddl r14 = @ltoff(@fptr(_mcount)), r1\n", file);
9806 fputs ("\t;;\n", file);
9808 fputs ("\t.save rp, r42\n", file);
9809 fputs ("\tmov out2 = b0\n", file);
9810 if (indirect_call)
9811 fputs ("\tld8 r14 = [r14]\n\t;;\n", file);
9812 fputs ("\t.body\n", file);
9813 fputs ("\tmov out1 = r1\n", file);
9814 if (indirect_call)
9816 fputs ("\tld8 r16 = [r14], 8\n\t;;\n", file);
9817 fputs ("\tmov b6 = r16\n", file);
9818 fputs ("\tld8 r1 = [r14]\n", file);
9819 fputs ("\tbr.call.sptk.many b0 = b6\n\t;;\n", file);
9821 else
9822 fputs ("\tbr.call.sptk.many b0 = _mcount\n\t;;\n", file);
9825 static GTY(()) rtx mcount_func_rtx;
9826 static rtx
9827 gen_mcount_func_rtx (void)
9829 if (!mcount_func_rtx)
9830 mcount_func_rtx = init_one_libfunc ("_mcount");
9831 return mcount_func_rtx;
9834 void
9835 ia64_profile_hook (int labelno)
9837 rtx label, ip;
9839 if (NO_PROFILE_COUNTERS)
9840 label = const0_rtx;
9841 else
9843 char buf[30];
9844 const char *label_name;
9845 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
9846 label_name = (*targetm.strip_name_encoding) (ggc_strdup (buf));
9847 label = gen_rtx_SYMBOL_REF (Pmode, label_name);
9848 SYMBOL_REF_FLAGS (label) = SYMBOL_FLAG_LOCAL;
9850 ip = gen_reg_rtx (Pmode);
9851 emit_insn (gen_ip_value (ip));
9852 emit_library_call (gen_mcount_func_rtx (), LCT_NORMAL,
9853 VOIDmode, 3,
9854 gen_rtx_REG (Pmode, BR_REG (0)), Pmode,
9855 ip, Pmode,
9856 label, Pmode);
9859 /* Return the mangling of TYPE if it is an extended fundamental type. */
9861 static const char *
9862 ia64_mangle_type (const_tree type)
9864 type = TYPE_MAIN_VARIANT (type);
9866 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
9867 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
9868 return NULL;
9870 /* On HP-UX, "long double" is mangled as "e" so __float128 is
9871 mangled as "e". */
9872 if (!TARGET_HPUX && TYPE_MODE (type) == TFmode)
9873 return "g";
9874 /* On HP-UX, "e" is not available as a mangling of __float80 so use
9875 an extended mangling. Elsewhere, "e" is available since long
9876 double is 80 bits. */
9877 if (TYPE_MODE (type) == XFmode)
9878 return TARGET_HPUX ? "u9__float80" : "e";
9879 if (TYPE_MODE (type) == RFmode)
9880 return "u7__fpreg";
9881 return NULL;
9884 /* Return the diagnostic message string if conversion from FROMTYPE to
9885 TOTYPE is not allowed, NULL otherwise. */
9886 static const char *
9887 ia64_invalid_conversion (const_tree fromtype, const_tree totype)
9889 /* Reject nontrivial conversion to or from __fpreg. */
9890 if (TYPE_MODE (fromtype) == RFmode
9891 && TYPE_MODE (totype) != RFmode
9892 && TYPE_MODE (totype) != VOIDmode)
9893 return N_("invalid conversion from %<__fpreg%>");
9894 if (TYPE_MODE (totype) == RFmode
9895 && TYPE_MODE (fromtype) != RFmode)
9896 return N_("invalid conversion to %<__fpreg%>");
9897 return NULL;
9900 /* Return the diagnostic message string if the unary operation OP is
9901 not permitted on TYPE, NULL otherwise. */
9902 static const char *
9903 ia64_invalid_unary_op (int op, const_tree type)
9905 /* Reject operations on __fpreg other than unary + or &. */
9906 if (TYPE_MODE (type) == RFmode
9907 && op != CONVERT_EXPR
9908 && op != ADDR_EXPR)
9909 return N_("invalid operation on %<__fpreg%>");
9910 return NULL;
9913 /* Return the diagnostic message string if the binary operation OP is
9914 not permitted on TYPE1 and TYPE2, NULL otherwise. */
9915 static const char *
9916 ia64_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
9918 /* Reject operations on __fpreg. */
9919 if (TYPE_MODE (type1) == RFmode || TYPE_MODE (type2) == RFmode)
9920 return N_("invalid operation on %<__fpreg%>");
9921 return NULL;
9924 /* Implement overriding of the optimization options. */
9925 void
9926 ia64_optimization_options (int level ATTRIBUTE_UNUSED,
9927 int size ATTRIBUTE_UNUSED)
9929 /* Let the scheduler form additional regions. */
9930 set_param_value ("max-sched-extend-regions-iters", 2);
9932 /* Set the default values for cache-related parameters. */
9933 set_param_value ("simultaneous-prefetches", 6);
9934 set_param_value ("l1-cache-line-size", 32);
9938 /* HP-UX version_id attribute.
9939 For object foo, if the version_id is set to 1234 put out an alias
9940 of '.alias foo "foo{1234}" We can't use "foo{1234}" in anything
9941 other than an alias statement because it is an illegal symbol name. */
9943 static tree
9944 ia64_handle_version_id_attribute (tree *node ATTRIBUTE_UNUSED,
9945 tree name ATTRIBUTE_UNUSED,
9946 tree args,
9947 int flags ATTRIBUTE_UNUSED,
9948 bool *no_add_attrs)
9950 tree arg = TREE_VALUE (args);
9952 if (TREE_CODE (arg) != STRING_CST)
9954 error("version attribute is not a string");
9955 *no_add_attrs = true;
9956 return NULL_TREE;
9958 return NULL_TREE;
9961 /* Target hook for c_mode_for_suffix. */
9963 static enum machine_mode
9964 ia64_c_mode_for_suffix (char suffix)
9966 if (suffix == 'q')
9967 return TFmode;
9968 if (suffix == 'w')
9969 return XFmode;
9971 return VOIDmode;
9974 #include "gt-ia64.h"