2007-05-30 H.J. Lu <hongjiu.lu@intel.com>
[official-gcc.git] / gcc / config / ia64 / ia64.c
blob5e1328b3bfbe57f9ffdefd1e8a9448c9b77182ca
1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
3 Free Software Foundation, Inc.
4 Contributed by James E. Wilson <wilson@cygnus.com> and
5 David Mosberger <davidm@hpl.hp.com>.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
12 any later version.
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to
21 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
22 Boston, MA 02110-1301, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "recog.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "except.h"
42 #include "function.h"
43 #include "ggc.h"
44 #include "basic-block.h"
45 #include "toplev.h"
46 #include "sched-int.h"
47 #include "timevar.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "tm_p.h"
51 #include "hashtab.h"
52 #include "langhooks.h"
53 #include "cfglayout.h"
54 #include "tree-gimple.h"
55 #include "intl.h"
56 #include "debug.h"
57 #include "params.h"
58 #include "tm-constrs.h"
60 /* This is used for communication between ASM_OUTPUT_LABEL and
61 ASM_OUTPUT_LABELREF. */
62 int ia64_asm_output_label = 0;
64 /* Define the information needed to generate branch and scc insns. This is
65 stored from the compare operation. */
66 struct rtx_def * ia64_compare_op0;
67 struct rtx_def * ia64_compare_op1;
69 /* Register names for ia64_expand_prologue. */
70 static const char * const ia64_reg_numbers[96] =
71 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
72 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
73 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
74 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
75 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
76 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
77 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
78 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
79 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
80 "r104","r105","r106","r107","r108","r109","r110","r111",
81 "r112","r113","r114","r115","r116","r117","r118","r119",
82 "r120","r121","r122","r123","r124","r125","r126","r127"};
84 /* ??? These strings could be shared with REGISTER_NAMES. */
85 static const char * const ia64_input_reg_names[8] =
86 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
88 /* ??? These strings could be shared with REGISTER_NAMES. */
89 static const char * const ia64_local_reg_names[80] =
90 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
91 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
92 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
93 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
94 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
95 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
96 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
97 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
98 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
99 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
101 /* ??? These strings could be shared with REGISTER_NAMES. */
102 static const char * const ia64_output_reg_names[8] =
103 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
105 /* Which cpu are we scheduling for. */
106 enum processor_type ia64_tune = PROCESSOR_ITANIUM2;
108 /* Determines whether we run our final scheduling pass or not. We always
109 avoid the normal second scheduling pass. */
110 static int ia64_flag_schedule_insns2;
112 /* Determines whether we run variable tracking in machine dependent
113 reorganization. */
114 static int ia64_flag_var_tracking;
116 /* Variables which are this size or smaller are put in the sdata/sbss
117 sections. */
119 unsigned int ia64_section_threshold;
121 /* The following variable is used by the DFA insn scheduler. The value is
122 TRUE if we do insn bundling instead of insn scheduling. */
123 int bundling_p = 0;
125 /* Structure to be filled in by ia64_compute_frame_size with register
126 save masks and offsets for the current function. */
128 struct ia64_frame_info
130 HOST_WIDE_INT total_size; /* size of the stack frame, not including
131 the caller's scratch area. */
132 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
133 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
134 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
135 HARD_REG_SET mask; /* mask of saved registers. */
136 unsigned int gr_used_mask; /* mask of registers in use as gr spill
137 registers or long-term scratches. */
138 int n_spilled; /* number of spilled registers. */
139 int reg_fp; /* register for fp. */
140 int reg_save_b0; /* save register for b0. */
141 int reg_save_pr; /* save register for prs. */
142 int reg_save_ar_pfs; /* save register for ar.pfs. */
143 int reg_save_ar_unat; /* save register for ar.unat. */
144 int reg_save_ar_lc; /* save register for ar.lc. */
145 int reg_save_gp; /* save register for gp. */
146 int n_input_regs; /* number of input registers used. */
147 int n_local_regs; /* number of local registers used. */
148 int n_output_regs; /* number of output registers used. */
149 int n_rotate_regs; /* number of rotating registers used. */
151 char need_regstk; /* true if a .regstk directive needed. */
152 char initialized; /* true if the data is finalized. */
155 /* Current frame information calculated by ia64_compute_frame_size. */
156 static struct ia64_frame_info current_frame_info;
158 static int ia64_first_cycle_multipass_dfa_lookahead (void);
159 static void ia64_dependencies_evaluation_hook (rtx, rtx);
160 static void ia64_init_dfa_pre_cycle_insn (void);
161 static rtx ia64_dfa_pre_cycle_insn (void);
162 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
163 static bool ia64_first_cycle_multipass_dfa_lookahead_guard_spec (rtx);
164 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
165 static void ia64_h_i_d_extended (void);
166 static int ia64_mode_to_int (enum machine_mode);
167 static void ia64_set_sched_flags (spec_info_t);
168 static int ia64_speculate_insn (rtx, ds_t, rtx *);
169 static rtx ia64_gen_spec_insn (rtx, ds_t, int, bool, bool);
170 static bool ia64_needs_block_p (rtx);
171 static rtx ia64_gen_check (rtx, rtx, bool);
172 static int ia64_spec_check_p (rtx);
173 static int ia64_spec_check_src_p (rtx);
174 static rtx gen_tls_get_addr (void);
175 static rtx gen_thread_pointer (void);
176 static int find_gr_spill (int);
177 static int next_scratch_gr_reg (void);
178 static void mark_reg_gr_used_mask (rtx, void *);
179 static void ia64_compute_frame_size (HOST_WIDE_INT);
180 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
181 static void finish_spill_pointers (void);
182 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
183 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
184 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
185 static rtx gen_movdi_x (rtx, rtx, rtx);
186 static rtx gen_fr_spill_x (rtx, rtx, rtx);
187 static rtx gen_fr_restore_x (rtx, rtx, rtx);
189 static enum machine_mode hfa_element_mode (tree, bool);
190 static void ia64_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
191 tree, int *, int);
192 static int ia64_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
193 tree, bool);
194 static bool ia64_function_ok_for_sibcall (tree, tree);
195 static bool ia64_return_in_memory (tree, tree);
196 static bool ia64_rtx_costs (rtx, int, int, int *);
197 static void fix_range (const char *);
198 static bool ia64_handle_option (size_t, const char *, int);
199 static struct machine_function * ia64_init_machine_status (void);
200 static void emit_insn_group_barriers (FILE *);
201 static void emit_all_insn_group_barriers (FILE *);
202 static void final_emit_insn_group_barriers (FILE *);
203 static void emit_predicate_relation_info (void);
204 static void ia64_reorg (void);
205 static bool ia64_in_small_data_p (tree);
206 static void process_epilogue (FILE *, rtx, bool, bool);
207 static int process_set (FILE *, rtx, rtx, bool, bool);
209 static bool ia64_assemble_integer (rtx, unsigned int, int);
210 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
211 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
212 static void ia64_output_function_end_prologue (FILE *);
214 static int ia64_issue_rate (void);
215 static int ia64_adjust_cost (rtx, rtx, rtx, int);
216 static void ia64_sched_init (FILE *, int, int);
217 static void ia64_sched_init_global (FILE *, int, int);
218 static void ia64_sched_finish_global (FILE *, int);
219 static void ia64_sched_finish (FILE *, int);
220 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
221 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
222 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
223 static int ia64_variable_issue (FILE *, int, rtx, int);
225 static struct bundle_state *get_free_bundle_state (void);
226 static void free_bundle_state (struct bundle_state *);
227 static void initiate_bundle_states (void);
228 static void finish_bundle_states (void);
229 static unsigned bundle_state_hash (const void *);
230 static int bundle_state_eq_p (const void *, const void *);
231 static int insert_bundle_state (struct bundle_state *);
232 static void initiate_bundle_state_table (void);
233 static void finish_bundle_state_table (void);
234 static int try_issue_nops (struct bundle_state *, int);
235 static int try_issue_insn (struct bundle_state *, rtx);
236 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
237 static int get_max_pos (state_t);
238 static int get_template (state_t, int);
240 static rtx get_next_important_insn (rtx, rtx);
241 static void bundling (FILE *, int, rtx, rtx);
243 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
244 HOST_WIDE_INT, tree);
245 static void ia64_file_start (void);
246 static void ia64_globalize_decl_name (FILE *, tree);
248 static int ia64_hpux_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
249 static int ia64_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
250 static section *ia64_select_rtx_section (enum machine_mode, rtx,
251 unsigned HOST_WIDE_INT);
252 static void ia64_output_dwarf_dtprel (FILE *, int, rtx)
253 ATTRIBUTE_UNUSED;
254 static unsigned int ia64_section_type_flags (tree, const char *, int);
255 static void ia64_init_libfuncs (void)
256 ATTRIBUTE_UNUSED;
257 static void ia64_hpux_init_libfuncs (void)
258 ATTRIBUTE_UNUSED;
259 static void ia64_sysv4_init_libfuncs (void)
260 ATTRIBUTE_UNUSED;
261 static void ia64_vms_init_libfuncs (void)
262 ATTRIBUTE_UNUSED;
264 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
265 static tree ia64_handle_version_id_attribute (tree *, tree, tree, int, bool *);
266 static void ia64_encode_section_info (tree, rtx, int);
267 static rtx ia64_struct_value_rtx (tree, int);
268 static tree ia64_gimplify_va_arg (tree, tree, tree *, tree *);
269 static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
270 static bool ia64_vector_mode_supported_p (enum machine_mode mode);
271 static bool ia64_cannot_force_const_mem (rtx);
272 static const char *ia64_mangle_fundamental_type (tree);
273 static const char *ia64_invalid_conversion (tree, tree);
274 static const char *ia64_invalid_unary_op (int, tree);
275 static const char *ia64_invalid_binary_op (int, tree, tree);
277 /* Table of valid machine attributes. */
278 static const struct attribute_spec ia64_attribute_table[] =
280 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
281 { "syscall_linkage", 0, 0, false, true, true, NULL },
282 { "model", 1, 1, true, false, false, ia64_handle_model_attribute },
283 { "version_id", 1, 1, true, false, false,
284 ia64_handle_version_id_attribute },
285 { NULL, 0, 0, false, false, false, NULL }
288 /* Initialize the GCC target structure. */
289 #undef TARGET_ATTRIBUTE_TABLE
290 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
292 #undef TARGET_INIT_BUILTINS
293 #define TARGET_INIT_BUILTINS ia64_init_builtins
295 #undef TARGET_EXPAND_BUILTIN
296 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
298 #undef TARGET_ASM_BYTE_OP
299 #define TARGET_ASM_BYTE_OP "\tdata1\t"
300 #undef TARGET_ASM_ALIGNED_HI_OP
301 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
302 #undef TARGET_ASM_ALIGNED_SI_OP
303 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
304 #undef TARGET_ASM_ALIGNED_DI_OP
305 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
306 #undef TARGET_ASM_UNALIGNED_HI_OP
307 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
308 #undef TARGET_ASM_UNALIGNED_SI_OP
309 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
310 #undef TARGET_ASM_UNALIGNED_DI_OP
311 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
312 #undef TARGET_ASM_INTEGER
313 #define TARGET_ASM_INTEGER ia64_assemble_integer
315 #undef TARGET_ASM_FUNCTION_PROLOGUE
316 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
317 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
318 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
319 #undef TARGET_ASM_FUNCTION_EPILOGUE
320 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
322 #undef TARGET_IN_SMALL_DATA_P
323 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
325 #undef TARGET_SCHED_ADJUST_COST
326 #define TARGET_SCHED_ADJUST_COST ia64_adjust_cost
327 #undef TARGET_SCHED_ISSUE_RATE
328 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
329 #undef TARGET_SCHED_VARIABLE_ISSUE
330 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
331 #undef TARGET_SCHED_INIT
332 #define TARGET_SCHED_INIT ia64_sched_init
333 #undef TARGET_SCHED_FINISH
334 #define TARGET_SCHED_FINISH ia64_sched_finish
335 #undef TARGET_SCHED_INIT_GLOBAL
336 #define TARGET_SCHED_INIT_GLOBAL ia64_sched_init_global
337 #undef TARGET_SCHED_FINISH_GLOBAL
338 #define TARGET_SCHED_FINISH_GLOBAL ia64_sched_finish_global
339 #undef TARGET_SCHED_REORDER
340 #define TARGET_SCHED_REORDER ia64_sched_reorder
341 #undef TARGET_SCHED_REORDER2
342 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
344 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
345 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
347 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
348 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
350 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
351 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
352 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
353 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
355 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
356 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
357 ia64_first_cycle_multipass_dfa_lookahead_guard
359 #undef TARGET_SCHED_DFA_NEW_CYCLE
360 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
362 #undef TARGET_SCHED_H_I_D_EXTENDED
363 #define TARGET_SCHED_H_I_D_EXTENDED ia64_h_i_d_extended
365 #undef TARGET_SCHED_SET_SCHED_FLAGS
366 #define TARGET_SCHED_SET_SCHED_FLAGS ia64_set_sched_flags
368 #undef TARGET_SCHED_SPECULATE_INSN
369 #define TARGET_SCHED_SPECULATE_INSN ia64_speculate_insn
371 #undef TARGET_SCHED_NEEDS_BLOCK_P
372 #define TARGET_SCHED_NEEDS_BLOCK_P ia64_needs_block_p
374 #undef TARGET_SCHED_GEN_CHECK
375 #define TARGET_SCHED_GEN_CHECK ia64_gen_check
377 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC
378 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC\
379 ia64_first_cycle_multipass_dfa_lookahead_guard_spec
381 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
382 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
383 #undef TARGET_ARG_PARTIAL_BYTES
384 #define TARGET_ARG_PARTIAL_BYTES ia64_arg_partial_bytes
386 #undef TARGET_ASM_OUTPUT_MI_THUNK
387 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
388 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
389 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
391 #undef TARGET_ASM_FILE_START
392 #define TARGET_ASM_FILE_START ia64_file_start
394 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
395 #define TARGET_ASM_GLOBALIZE_DECL_NAME ia64_globalize_decl_name
397 #undef TARGET_RTX_COSTS
398 #define TARGET_RTX_COSTS ia64_rtx_costs
399 #undef TARGET_ADDRESS_COST
400 #define TARGET_ADDRESS_COST hook_int_rtx_0
402 #undef TARGET_MACHINE_DEPENDENT_REORG
403 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
405 #undef TARGET_ENCODE_SECTION_INFO
406 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
408 #undef TARGET_SECTION_TYPE_FLAGS
409 #define TARGET_SECTION_TYPE_FLAGS ia64_section_type_flags
411 #ifdef HAVE_AS_TLS
412 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
413 #define TARGET_ASM_OUTPUT_DWARF_DTPREL ia64_output_dwarf_dtprel
414 #endif
416 /* ??? ABI doesn't allow us to define this. */
417 #if 0
418 #undef TARGET_PROMOTE_FUNCTION_ARGS
419 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
420 #endif
422 /* ??? ABI doesn't allow us to define this. */
423 #if 0
424 #undef TARGET_PROMOTE_FUNCTION_RETURN
425 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
426 #endif
428 /* ??? Investigate. */
429 #if 0
430 #undef TARGET_PROMOTE_PROTOTYPES
431 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
432 #endif
434 #undef TARGET_STRUCT_VALUE_RTX
435 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
436 #undef TARGET_RETURN_IN_MEMORY
437 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
438 #undef TARGET_SETUP_INCOMING_VARARGS
439 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
440 #undef TARGET_STRICT_ARGUMENT_NAMING
441 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
442 #undef TARGET_MUST_PASS_IN_STACK
443 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
445 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
446 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
448 #undef TARGET_UNWIND_EMIT
449 #define TARGET_UNWIND_EMIT process_for_unwind_directive
451 #undef TARGET_SCALAR_MODE_SUPPORTED_P
452 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
453 #undef TARGET_VECTOR_MODE_SUPPORTED_P
454 #define TARGET_VECTOR_MODE_SUPPORTED_P ia64_vector_mode_supported_p
456 /* ia64 architecture manual 4.4.7: ... reads, writes, and flushes may occur
457 in an order different from the specified program order. */
458 #undef TARGET_RELAXED_ORDERING
459 #define TARGET_RELAXED_ORDERING true
461 #undef TARGET_DEFAULT_TARGET_FLAGS
462 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
463 #undef TARGET_HANDLE_OPTION
464 #define TARGET_HANDLE_OPTION ia64_handle_option
466 #undef TARGET_CANNOT_FORCE_CONST_MEM
467 #define TARGET_CANNOT_FORCE_CONST_MEM ia64_cannot_force_const_mem
469 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
470 #define TARGET_MANGLE_FUNDAMENTAL_TYPE ia64_mangle_fundamental_type
472 #undef TARGET_INVALID_CONVERSION
473 #define TARGET_INVALID_CONVERSION ia64_invalid_conversion
474 #undef TARGET_INVALID_UNARY_OP
475 #define TARGET_INVALID_UNARY_OP ia64_invalid_unary_op
476 #undef TARGET_INVALID_BINARY_OP
477 #define TARGET_INVALID_BINARY_OP ia64_invalid_binary_op
479 struct gcc_target targetm = TARGET_INITIALIZER;
481 typedef enum
483 ADDR_AREA_NORMAL, /* normal address area */
484 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
486 ia64_addr_area;
488 static GTY(()) tree small_ident1;
489 static GTY(()) tree small_ident2;
491 static void
492 init_idents (void)
494 if (small_ident1 == 0)
496 small_ident1 = get_identifier ("small");
497 small_ident2 = get_identifier ("__small__");
501 /* Retrieve the address area that has been chosen for the given decl. */
503 static ia64_addr_area
504 ia64_get_addr_area (tree decl)
506 tree model_attr;
508 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
509 if (model_attr)
511 tree id;
513 init_idents ();
514 id = TREE_VALUE (TREE_VALUE (model_attr));
515 if (id == small_ident1 || id == small_ident2)
516 return ADDR_AREA_SMALL;
518 return ADDR_AREA_NORMAL;
521 static tree
522 ia64_handle_model_attribute (tree *node, tree name, tree args,
523 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
525 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
526 ia64_addr_area area;
527 tree arg, decl = *node;
529 init_idents ();
530 arg = TREE_VALUE (args);
531 if (arg == small_ident1 || arg == small_ident2)
533 addr_area = ADDR_AREA_SMALL;
535 else
537 warning (OPT_Wattributes, "invalid argument of %qs attribute",
538 IDENTIFIER_POINTER (name));
539 *no_add_attrs = true;
542 switch (TREE_CODE (decl))
544 case VAR_DECL:
545 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
546 == FUNCTION_DECL)
547 && !TREE_STATIC (decl))
549 error ("%Jan address area attribute cannot be specified for "
550 "local variables", decl);
551 *no_add_attrs = true;
553 area = ia64_get_addr_area (decl);
554 if (area != ADDR_AREA_NORMAL && addr_area != area)
556 error ("address area of %q+D conflicts with previous "
557 "declaration", decl);
558 *no_add_attrs = true;
560 break;
562 case FUNCTION_DECL:
563 error ("%Jaddress area attribute cannot be specified for functions",
564 decl);
565 *no_add_attrs = true;
566 break;
568 default:
569 warning (OPT_Wattributes, "%qs attribute ignored",
570 IDENTIFIER_POINTER (name));
571 *no_add_attrs = true;
572 break;
575 return NULL_TREE;
578 static void
579 ia64_encode_addr_area (tree decl, rtx symbol)
581 int flags;
583 flags = SYMBOL_REF_FLAGS (symbol);
584 switch (ia64_get_addr_area (decl))
586 case ADDR_AREA_NORMAL: break;
587 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
588 default: gcc_unreachable ();
590 SYMBOL_REF_FLAGS (symbol) = flags;
593 static void
594 ia64_encode_section_info (tree decl, rtx rtl, int first)
596 default_encode_section_info (decl, rtl, first);
598 /* Careful not to prod global register variables. */
599 if (TREE_CODE (decl) == VAR_DECL
600 && GET_CODE (DECL_RTL (decl)) == MEM
601 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
602 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
603 ia64_encode_addr_area (decl, XEXP (rtl, 0));
606 /* Return 1 if the operands of a move are ok. */
609 ia64_move_ok (rtx dst, rtx src)
611 /* If we're under init_recog_no_volatile, we'll not be able to use
612 memory_operand. So check the code directly and don't worry about
613 the validity of the underlying address, which should have been
614 checked elsewhere anyway. */
615 if (GET_CODE (dst) != MEM)
616 return 1;
617 if (GET_CODE (src) == MEM)
618 return 0;
619 if (register_operand (src, VOIDmode))
620 return 1;
622 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
623 if (INTEGRAL_MODE_P (GET_MODE (dst)))
624 return src == const0_rtx;
625 else
626 return satisfies_constraint_G (src);
629 /* Return 1 if the operands are ok for a floating point load pair. */
632 ia64_load_pair_ok (rtx dst, rtx src)
634 if (GET_CODE (dst) != REG || !FP_REGNO_P (REGNO (dst)))
635 return 0;
636 if (GET_CODE (src) != MEM || MEM_VOLATILE_P (src))
637 return 0;
638 switch (GET_CODE (XEXP (src, 0)))
640 case REG:
641 case POST_INC:
642 break;
643 case POST_DEC:
644 return 0;
645 case POST_MODIFY:
647 rtx adjust = XEXP (XEXP (XEXP (src, 0), 1), 1);
649 if (GET_CODE (adjust) != CONST_INT
650 || INTVAL (adjust) != GET_MODE_SIZE (GET_MODE (src)))
651 return 0;
653 break;
654 default:
655 abort ();
657 return 1;
661 addp4_optimize_ok (rtx op1, rtx op2)
663 return (basereg_operand (op1, GET_MODE(op1)) !=
664 basereg_operand (op2, GET_MODE(op2)));
667 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
668 Return the length of the field, or <= 0 on failure. */
671 ia64_depz_field_mask (rtx rop, rtx rshift)
673 unsigned HOST_WIDE_INT op = INTVAL (rop);
674 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
676 /* Get rid of the zero bits we're shifting in. */
677 op >>= shift;
679 /* We must now have a solid block of 1's at bit 0. */
680 return exact_log2 (op + 1);
683 /* Return the TLS model to use for ADDR. */
685 static enum tls_model
686 tls_symbolic_operand_type (rtx addr)
688 enum tls_model tls_kind = 0;
690 if (GET_CODE (addr) == CONST)
692 if (GET_CODE (XEXP (addr, 0)) == PLUS
693 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF)
694 tls_kind = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (addr, 0), 0));
696 else if (GET_CODE (addr) == SYMBOL_REF)
697 tls_kind = SYMBOL_REF_TLS_MODEL (addr);
699 return tls_kind;
702 /* Return true if X is a constant that is valid for some immediate
703 field in an instruction. */
705 bool
706 ia64_legitimate_constant_p (rtx x)
708 switch (GET_CODE (x))
710 case CONST_INT:
711 case LABEL_REF:
712 return true;
714 case CONST_DOUBLE:
715 if (GET_MODE (x) == VOIDmode)
716 return true;
717 return satisfies_constraint_G (x);
719 case CONST:
720 case SYMBOL_REF:
721 /* ??? Short term workaround for PR 28490. We must make the code here
722 match the code in ia64_expand_move and move_operand, even though they
723 are both technically wrong. */
724 if (tls_symbolic_operand_type (x) == 0)
726 HOST_WIDE_INT addend = 0;
727 rtx op = x;
729 if (GET_CODE (op) == CONST
730 && GET_CODE (XEXP (op, 0)) == PLUS
731 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
733 addend = INTVAL (XEXP (XEXP (op, 0), 1));
734 op = XEXP (XEXP (op, 0), 0);
737 if (any_offset_symbol_operand (op, GET_MODE (op))
738 || function_operand (op, GET_MODE (op)))
739 return true;
740 if (aligned_offset_symbol_operand (op, GET_MODE (op)))
741 return (addend & 0x3fff) == 0;
742 return false;
744 return false;
746 case CONST_VECTOR:
748 enum machine_mode mode = GET_MODE (x);
750 if (mode == V2SFmode)
751 return satisfies_constraint_Y (x);
753 return (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
754 && GET_MODE_SIZE (mode) <= 8);
757 default:
758 return false;
762 /* Don't allow TLS addresses to get spilled to memory. */
764 static bool
765 ia64_cannot_force_const_mem (rtx x)
767 return tls_symbolic_operand_type (x) != 0;
770 /* Expand a symbolic constant load. */
772 bool
773 ia64_expand_load_address (rtx dest, rtx src)
775 gcc_assert (GET_CODE (dest) == REG);
777 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
778 having to pointer-extend the value afterward. Other forms of address
779 computation below are also more natural to compute as 64-bit quantities.
780 If we've been given an SImode destination register, change it. */
781 if (GET_MODE (dest) != Pmode)
782 dest = gen_rtx_REG_offset (dest, Pmode, REGNO (dest), 0);
784 if (TARGET_NO_PIC)
785 return false;
786 if (small_addr_symbolic_operand (src, VOIDmode))
787 return false;
789 if (TARGET_AUTO_PIC)
790 emit_insn (gen_load_gprel64 (dest, src));
791 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
792 emit_insn (gen_load_fptr (dest, src));
793 else if (sdata_symbolic_operand (src, VOIDmode))
794 emit_insn (gen_load_gprel (dest, src));
795 else
797 HOST_WIDE_INT addend = 0;
798 rtx tmp;
800 /* We did split constant offsets in ia64_expand_move, and we did try
801 to keep them split in move_operand, but we also allowed reload to
802 rematerialize arbitrary constants rather than spill the value to
803 the stack and reload it. So we have to be prepared here to split
804 them apart again. */
805 if (GET_CODE (src) == CONST)
807 HOST_WIDE_INT hi, lo;
809 hi = INTVAL (XEXP (XEXP (src, 0), 1));
810 lo = ((hi & 0x3fff) ^ 0x2000) - 0x2000;
811 hi = hi - lo;
813 if (lo != 0)
815 addend = lo;
816 src = plus_constant (XEXP (XEXP (src, 0), 0), hi);
820 tmp = gen_rtx_HIGH (Pmode, src);
821 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
822 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
824 tmp = gen_rtx_LO_SUM (Pmode, dest, src);
825 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
827 if (addend)
829 tmp = gen_rtx_PLUS (Pmode, dest, GEN_INT (addend));
830 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
834 return true;
837 static GTY(()) rtx gen_tls_tga;
838 static rtx
839 gen_tls_get_addr (void)
841 if (!gen_tls_tga)
842 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
843 return gen_tls_tga;
846 static GTY(()) rtx thread_pointer_rtx;
847 static rtx
848 gen_thread_pointer (void)
850 if (!thread_pointer_rtx)
851 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
852 return thread_pointer_rtx;
855 static rtx
856 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1,
857 rtx orig_op1, HOST_WIDE_INT addend)
859 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
860 rtx orig_op0 = op0;
861 HOST_WIDE_INT addend_lo, addend_hi;
863 switch (tls_kind)
865 case TLS_MODEL_GLOBAL_DYNAMIC:
866 start_sequence ();
868 tga_op1 = gen_reg_rtx (Pmode);
869 emit_insn (gen_load_dtpmod (tga_op1, op1));
871 tga_op2 = gen_reg_rtx (Pmode);
872 emit_insn (gen_load_dtprel (tga_op2, op1));
874 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
875 LCT_CONST, Pmode, 2, tga_op1,
876 Pmode, tga_op2, Pmode);
878 insns = get_insns ();
879 end_sequence ();
881 if (GET_MODE (op0) != Pmode)
882 op0 = tga_ret;
883 emit_libcall_block (insns, op0, tga_ret, op1);
884 break;
886 case TLS_MODEL_LOCAL_DYNAMIC:
887 /* ??? This isn't the completely proper way to do local-dynamic
888 If the call to __tls_get_addr is used only by a single symbol,
889 then we should (somehow) move the dtprel to the second arg
890 to avoid the extra add. */
891 start_sequence ();
893 tga_op1 = gen_reg_rtx (Pmode);
894 emit_insn (gen_load_dtpmod (tga_op1, op1));
896 tga_op2 = const0_rtx;
898 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
899 LCT_CONST, Pmode, 2, tga_op1,
900 Pmode, tga_op2, Pmode);
902 insns = get_insns ();
903 end_sequence ();
905 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
906 UNSPEC_LD_BASE);
907 tmp = gen_reg_rtx (Pmode);
908 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
910 if (!register_operand (op0, Pmode))
911 op0 = gen_reg_rtx (Pmode);
912 if (TARGET_TLS64)
914 emit_insn (gen_load_dtprel (op0, op1));
915 emit_insn (gen_adddi3 (op0, tmp, op0));
917 else
918 emit_insn (gen_add_dtprel (op0, op1, tmp));
919 break;
921 case TLS_MODEL_INITIAL_EXEC:
922 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
923 addend_hi = addend - addend_lo;
925 op1 = plus_constant (op1, addend_hi);
926 addend = addend_lo;
928 tmp = gen_reg_rtx (Pmode);
929 emit_insn (gen_load_tprel (tmp, op1));
931 if (!register_operand (op0, Pmode))
932 op0 = gen_reg_rtx (Pmode);
933 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
934 break;
936 case TLS_MODEL_LOCAL_EXEC:
937 if (!register_operand (op0, Pmode))
938 op0 = gen_reg_rtx (Pmode);
940 op1 = orig_op1;
941 addend = 0;
942 if (TARGET_TLS64)
944 emit_insn (gen_load_tprel (op0, op1));
945 emit_insn (gen_adddi3 (op0, op0, gen_thread_pointer ()));
947 else
948 emit_insn (gen_add_tprel (op0, op1, gen_thread_pointer ()));
949 break;
951 default:
952 gcc_unreachable ();
955 if (addend)
956 op0 = expand_simple_binop (Pmode, PLUS, op0, GEN_INT (addend),
957 orig_op0, 1, OPTAB_DIRECT);
958 if (orig_op0 == op0)
959 return NULL_RTX;
960 if (GET_MODE (orig_op0) == Pmode)
961 return op0;
962 return gen_lowpart (GET_MODE (orig_op0), op0);
966 ia64_expand_move (rtx op0, rtx op1)
968 enum machine_mode mode = GET_MODE (op0);
970 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
971 op1 = force_reg (mode, op1);
973 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
975 HOST_WIDE_INT addend = 0;
976 enum tls_model tls_kind;
977 rtx sym = op1;
979 if (GET_CODE (op1) == CONST
980 && GET_CODE (XEXP (op1, 0)) == PLUS
981 && GET_CODE (XEXP (XEXP (op1, 0), 1)) == CONST_INT)
983 addend = INTVAL (XEXP (XEXP (op1, 0), 1));
984 sym = XEXP (XEXP (op1, 0), 0);
987 tls_kind = tls_symbolic_operand_type (sym);
988 if (tls_kind)
989 return ia64_expand_tls_address (tls_kind, op0, sym, op1, addend);
991 if (any_offset_symbol_operand (sym, mode))
992 addend = 0;
993 else if (aligned_offset_symbol_operand (sym, mode))
995 HOST_WIDE_INT addend_lo, addend_hi;
997 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
998 addend_hi = addend - addend_lo;
1000 if (addend_lo != 0)
1002 op1 = plus_constant (sym, addend_hi);
1003 addend = addend_lo;
1005 else
1006 addend = 0;
1008 else
1009 op1 = sym;
1011 if (reload_completed)
1013 /* We really should have taken care of this offset earlier. */
1014 gcc_assert (addend == 0);
1015 if (ia64_expand_load_address (op0, op1))
1016 return NULL_RTX;
1019 if (addend)
1021 rtx subtarget = no_new_pseudos ? op0 : gen_reg_rtx (mode);
1023 emit_insn (gen_rtx_SET (VOIDmode, subtarget, op1));
1025 op1 = expand_simple_binop (mode, PLUS, subtarget,
1026 GEN_INT (addend), op0, 1, OPTAB_DIRECT);
1027 if (op0 == op1)
1028 return NULL_RTX;
1032 return op1;
1035 /* Split a move from OP1 to OP0 conditional on COND. */
1037 void
1038 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
1040 rtx insn, first = get_last_insn ();
1042 emit_move_insn (op0, op1);
1044 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
1045 if (INSN_P (insn))
1046 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
1047 PATTERN (insn));
1050 /* Split a post-reload TImode or TFmode reference into two DImode
1051 components. This is made extra difficult by the fact that we do
1052 not get any scratch registers to work with, because reload cannot
1053 be prevented from giving us a scratch that overlaps the register
1054 pair involved. So instead, when addressing memory, we tweak the
1055 pointer register up and back down with POST_INCs. Or up and not
1056 back down when we can get away with it.
1058 REVERSED is true when the loads must be done in reversed order
1059 (high word first) for correctness. DEAD is true when the pointer
1060 dies with the second insn we generate and therefore the second
1061 address must not carry a postmodify.
1063 May return an insn which is to be emitted after the moves. */
1065 static rtx
1066 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
1068 rtx fixup = 0;
1070 switch (GET_CODE (in))
1072 case REG:
1073 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
1074 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
1075 break;
1077 case CONST_INT:
1078 case CONST_DOUBLE:
1079 /* Cannot occur reversed. */
1080 gcc_assert (!reversed);
1082 if (GET_MODE (in) != TFmode)
1083 split_double (in, &out[0], &out[1]);
1084 else
1085 /* split_double does not understand how to split a TFmode
1086 quantity into a pair of DImode constants. */
1088 REAL_VALUE_TYPE r;
1089 unsigned HOST_WIDE_INT p[2];
1090 long l[4]; /* TFmode is 128 bits */
1092 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
1093 real_to_target (l, &r, TFmode);
1095 if (FLOAT_WORDS_BIG_ENDIAN)
1097 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
1098 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
1100 else
1102 p[0] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
1103 p[1] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
1105 out[0] = GEN_INT (p[0]);
1106 out[1] = GEN_INT (p[1]);
1108 break;
1110 case MEM:
1112 rtx base = XEXP (in, 0);
1113 rtx offset;
1115 switch (GET_CODE (base))
1117 case REG:
1118 if (!reversed)
1120 out[0] = adjust_automodify_address
1121 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1122 out[1] = adjust_automodify_address
1123 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
1125 else
1127 /* Reversal requires a pre-increment, which can only
1128 be done as a separate insn. */
1129 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
1130 out[0] = adjust_automodify_address
1131 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
1132 out[1] = adjust_address (in, DImode, 0);
1134 break;
1136 case POST_INC:
1137 gcc_assert (!reversed && !dead);
1139 /* Just do the increment in two steps. */
1140 out[0] = adjust_automodify_address (in, DImode, 0, 0);
1141 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1142 break;
1144 case POST_DEC:
1145 gcc_assert (!reversed && !dead);
1147 /* Add 8, subtract 24. */
1148 base = XEXP (base, 0);
1149 out[0] = adjust_automodify_address
1150 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1151 out[1] = adjust_automodify_address
1152 (in, DImode,
1153 gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -24)),
1155 break;
1157 case POST_MODIFY:
1158 gcc_assert (!reversed && !dead);
1160 /* Extract and adjust the modification. This case is
1161 trickier than the others, because we might have an
1162 index register, or we might have a combined offset that
1163 doesn't fit a signed 9-bit displacement field. We can
1164 assume the incoming expression is already legitimate. */
1165 offset = XEXP (base, 1);
1166 base = XEXP (base, 0);
1168 out[0] = adjust_automodify_address
1169 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1171 if (GET_CODE (XEXP (offset, 1)) == REG)
1173 /* Can't adjust the postmodify to match. Emit the
1174 original, then a separate addition insn. */
1175 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1176 fixup = gen_adddi3 (base, base, GEN_INT (-8));
1178 else
1180 gcc_assert (GET_CODE (XEXP (offset, 1)) == CONST_INT);
1181 if (INTVAL (XEXP (offset, 1)) < -256 + 8)
1183 /* Again the postmodify cannot be made to match,
1184 but in this case it's more efficient to get rid
1185 of the postmodify entirely and fix up with an
1186 add insn. */
1187 out[1] = adjust_automodify_address (in, DImode, base, 8);
1188 fixup = gen_adddi3
1189 (base, base, GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
1191 else
1193 /* Combined offset still fits in the displacement field.
1194 (We cannot overflow it at the high end.) */
1195 out[1] = adjust_automodify_address
1196 (in, DImode, gen_rtx_POST_MODIFY
1197 (Pmode, base, gen_rtx_PLUS
1198 (Pmode, base,
1199 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
1203 break;
1205 default:
1206 gcc_unreachable ();
1208 break;
1211 default:
1212 gcc_unreachable ();
1215 return fixup;
1218 /* Split a TImode or TFmode move instruction after reload.
1219 This is used by *movtf_internal and *movti_internal. */
1220 void
1221 ia64_split_tmode_move (rtx operands[])
1223 rtx in[2], out[2], insn;
1224 rtx fixup[2];
1225 bool dead = false;
1226 bool reversed = false;
1228 /* It is possible for reload to decide to overwrite a pointer with
1229 the value it points to. In that case we have to do the loads in
1230 the appropriate order so that the pointer is not destroyed too
1231 early. Also we must not generate a postmodify for that second
1232 load, or rws_access_regno will die. */
1233 if (GET_CODE (operands[1]) == MEM
1234 && reg_overlap_mentioned_p (operands[0], operands[1]))
1236 rtx base = XEXP (operands[1], 0);
1237 while (GET_CODE (base) != REG)
1238 base = XEXP (base, 0);
1240 if (REGNO (base) == REGNO (operands[0]))
1241 reversed = true;
1242 dead = true;
1244 /* Another reason to do the moves in reversed order is if the first
1245 element of the target register pair is also the second element of
1246 the source register pair. */
1247 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1248 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1249 reversed = true;
1251 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1252 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1254 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1255 if (GET_CODE (EXP) == MEM \
1256 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1257 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1258 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1259 REG_NOTES (INSN) = gen_rtx_EXPR_LIST (REG_INC, \
1260 XEXP (XEXP (EXP, 0), 0), \
1261 REG_NOTES (INSN))
1263 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1264 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1265 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1267 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1268 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1269 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1271 if (fixup[0])
1272 emit_insn (fixup[0]);
1273 if (fixup[1])
1274 emit_insn (fixup[1]);
1276 #undef MAYBE_ADD_REG_INC_NOTE
1279 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1280 through memory plus an extra GR scratch register. Except that you can
1281 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1282 SECONDARY_RELOAD_CLASS, but not both.
1284 We got into problems in the first place by allowing a construct like
1285 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1286 This solution attempts to prevent this situation from occurring. When
1287 we see something like the above, we spill the inner register to memory. */
1289 static rtx
1290 spill_xfmode_rfmode_operand (rtx in, int force, enum machine_mode mode)
1292 if (GET_CODE (in) == SUBREG
1293 && GET_MODE (SUBREG_REG (in)) == TImode
1294 && GET_CODE (SUBREG_REG (in)) == REG)
1296 rtx memt = assign_stack_temp (TImode, 16, 0);
1297 emit_move_insn (memt, SUBREG_REG (in));
1298 return adjust_address (memt, mode, 0);
1300 else if (force && GET_CODE (in) == REG)
1302 rtx memx = assign_stack_temp (mode, 16, 0);
1303 emit_move_insn (memx, in);
1304 return memx;
1306 else
1307 return in;
1310 /* Expand the movxf or movrf pattern (MODE says which) with the given
1311 OPERANDS, returning true if the pattern should then invoke
1312 DONE. */
1314 bool
1315 ia64_expand_movxf_movrf (enum machine_mode mode, rtx operands[])
1317 rtx op0 = operands[0];
1319 if (GET_CODE (op0) == SUBREG)
1320 op0 = SUBREG_REG (op0);
1322 /* We must support XFmode loads into general registers for stdarg/vararg,
1323 unprototyped calls, and a rare case where a long double is passed as
1324 an argument after a float HFA fills the FP registers. We split them into
1325 DImode loads for convenience. We also need to support XFmode stores
1326 for the last case. This case does not happen for stdarg/vararg routines,
1327 because we do a block store to memory of unnamed arguments. */
1329 if (GET_CODE (op0) == REG && GR_REGNO_P (REGNO (op0)))
1331 rtx out[2];
1333 /* We're hoping to transform everything that deals with XFmode
1334 quantities and GR registers early in the compiler. */
1335 gcc_assert (!no_new_pseudos);
1337 /* Struct to register can just use TImode instead. */
1338 if ((GET_CODE (operands[1]) == SUBREG
1339 && GET_MODE (SUBREG_REG (operands[1])) == TImode)
1340 || (GET_CODE (operands[1]) == REG
1341 && GR_REGNO_P (REGNO (operands[1]))))
1343 rtx op1 = operands[1];
1345 if (GET_CODE (op1) == SUBREG)
1346 op1 = SUBREG_REG (op1);
1347 else
1348 op1 = gen_rtx_REG (TImode, REGNO (op1));
1350 emit_move_insn (gen_rtx_REG (TImode, REGNO (op0)), op1);
1351 return true;
1354 if (GET_CODE (operands[1]) == CONST_DOUBLE)
1356 /* Don't word-swap when reading in the constant. */
1357 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0)),
1358 operand_subword (operands[1], WORDS_BIG_ENDIAN,
1359 0, mode));
1360 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0) + 1),
1361 operand_subword (operands[1], !WORDS_BIG_ENDIAN,
1362 0, mode));
1363 return true;
1366 /* If the quantity is in a register not known to be GR, spill it. */
1367 if (register_operand (operands[1], mode))
1368 operands[1] = spill_xfmode_rfmode_operand (operands[1], 1, mode);
1370 gcc_assert (GET_CODE (operands[1]) == MEM);
1372 /* Don't word-swap when reading in the value. */
1373 out[0] = gen_rtx_REG (DImode, REGNO (op0));
1374 out[1] = gen_rtx_REG (DImode, REGNO (op0) + 1);
1376 emit_move_insn (out[0], adjust_address (operands[1], DImode, 0));
1377 emit_move_insn (out[1], adjust_address (operands[1], DImode, 8));
1378 return true;
1381 if (GET_CODE (operands[1]) == REG && GR_REGNO_P (REGNO (operands[1])))
1383 /* We're hoping to transform everything that deals with XFmode
1384 quantities and GR registers early in the compiler. */
1385 gcc_assert (!no_new_pseudos);
1387 /* Op0 can't be a GR_REG here, as that case is handled above.
1388 If op0 is a register, then we spill op1, so that we now have a
1389 MEM operand. This requires creating an XFmode subreg of a TImode reg
1390 to force the spill. */
1391 if (register_operand (operands[0], mode))
1393 rtx op1 = gen_rtx_REG (TImode, REGNO (operands[1]));
1394 op1 = gen_rtx_SUBREG (mode, op1, 0);
1395 operands[1] = spill_xfmode_rfmode_operand (op1, 0, mode);
1398 else
1400 rtx in[2];
1402 gcc_assert (GET_CODE (operands[0]) == MEM);
1404 /* Don't word-swap when writing out the value. */
1405 in[0] = gen_rtx_REG (DImode, REGNO (operands[1]));
1406 in[1] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
1408 emit_move_insn (adjust_address (operands[0], DImode, 0), in[0]);
1409 emit_move_insn (adjust_address (operands[0], DImode, 8), in[1]);
1410 return true;
1414 if (!reload_in_progress && !reload_completed)
1416 operands[1] = spill_xfmode_rfmode_operand (operands[1], 0, mode);
1418 if (GET_MODE (op0) == TImode && GET_CODE (op0) == REG)
1420 rtx memt, memx, in = operands[1];
1421 if (CONSTANT_P (in))
1422 in = validize_mem (force_const_mem (mode, in));
1423 if (GET_CODE (in) == MEM)
1424 memt = adjust_address (in, TImode, 0);
1425 else
1427 memt = assign_stack_temp (TImode, 16, 0);
1428 memx = adjust_address (memt, mode, 0);
1429 emit_move_insn (memx, in);
1431 emit_move_insn (op0, memt);
1432 return true;
1435 if (!ia64_move_ok (operands[0], operands[1]))
1436 operands[1] = force_reg (mode, operands[1]);
1439 return false;
1442 /* Emit comparison instruction if necessary, returning the expression
1443 that holds the compare result in the proper mode. */
1445 static GTY(()) rtx cmptf_libfunc;
1448 ia64_expand_compare (enum rtx_code code, enum machine_mode mode)
1450 rtx op0 = ia64_compare_op0, op1 = ia64_compare_op1;
1451 rtx cmp;
1453 /* If we have a BImode input, then we already have a compare result, and
1454 do not need to emit another comparison. */
1455 if (GET_MODE (op0) == BImode)
1457 gcc_assert ((code == NE || code == EQ) && op1 == const0_rtx);
1458 cmp = op0;
1460 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1461 magic number as its third argument, that indicates what to do.
1462 The return value is an integer to be compared against zero. */
1463 else if (GET_MODE (op0) == TFmode)
1465 enum qfcmp_magic {
1466 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1467 QCMP_UNORD = 2,
1468 QCMP_EQ = 4,
1469 QCMP_LT = 8,
1470 QCMP_GT = 16
1471 } magic;
1472 enum rtx_code ncode;
1473 rtx ret, insns;
1475 gcc_assert (cmptf_libfunc && GET_MODE (op1) == TFmode);
1476 switch (code)
1478 /* 1 = equal, 0 = not equal. Equality operators do
1479 not raise FP_INVALID when given an SNaN operand. */
1480 case EQ: magic = QCMP_EQ; ncode = NE; break;
1481 case NE: magic = QCMP_EQ; ncode = EQ; break;
1482 /* isunordered() from C99. */
1483 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1484 case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
1485 /* Relational operators raise FP_INVALID when given
1486 an SNaN operand. */
1487 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1488 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1489 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1490 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1491 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1492 Expanders for buneq etc. weuld have to be added to ia64.md
1493 for this to be useful. */
1494 default: gcc_unreachable ();
1497 start_sequence ();
1499 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1500 op0, TFmode, op1, TFmode,
1501 GEN_INT (magic), DImode);
1502 cmp = gen_reg_rtx (BImode);
1503 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1504 gen_rtx_fmt_ee (ncode, BImode,
1505 ret, const0_rtx)));
1507 insns = get_insns ();
1508 end_sequence ();
1510 emit_libcall_block (insns, cmp, cmp,
1511 gen_rtx_fmt_ee (code, BImode, op0, op1));
1512 code = NE;
1514 else
1516 cmp = gen_reg_rtx (BImode);
1517 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1518 gen_rtx_fmt_ee (code, BImode, op0, op1)));
1519 code = NE;
1522 return gen_rtx_fmt_ee (code, mode, cmp, const0_rtx);
1525 /* Generate an integral vector comparison. Return true if the condition has
1526 been reversed, and so the sense of the comparison should be inverted. */
1528 static bool
1529 ia64_expand_vecint_compare (enum rtx_code code, enum machine_mode mode,
1530 rtx dest, rtx op0, rtx op1)
1532 bool negate = false;
1533 rtx x;
1535 /* Canonicalize the comparison to EQ, GT, GTU. */
1536 switch (code)
1538 case EQ:
1539 case GT:
1540 case GTU:
1541 break;
1543 case NE:
1544 case LE:
1545 case LEU:
1546 code = reverse_condition (code);
1547 negate = true;
1548 break;
1550 case GE:
1551 case GEU:
1552 code = reverse_condition (code);
1553 negate = true;
1554 /* FALLTHRU */
1556 case LT:
1557 case LTU:
1558 code = swap_condition (code);
1559 x = op0, op0 = op1, op1 = x;
1560 break;
1562 default:
1563 gcc_unreachable ();
1566 /* Unsigned parallel compare is not supported by the hardware. Play some
1567 tricks to turn this into a signed comparison against 0. */
1568 if (code == GTU)
1570 switch (mode)
1572 case V2SImode:
1574 rtx t1, t2, mask;
1576 /* Perform a parallel modulo subtraction. */
1577 t1 = gen_reg_rtx (V2SImode);
1578 emit_insn (gen_subv2si3 (t1, op0, op1));
1580 /* Extract the original sign bit of op0. */
1581 mask = GEN_INT (-0x80000000);
1582 mask = gen_rtx_CONST_VECTOR (V2SImode, gen_rtvec (2, mask, mask));
1583 mask = force_reg (V2SImode, mask);
1584 t2 = gen_reg_rtx (V2SImode);
1585 emit_insn (gen_andv2si3 (t2, op0, mask));
1587 /* XOR it back into the result of the subtraction. This results
1588 in the sign bit set iff we saw unsigned underflow. */
1589 x = gen_reg_rtx (V2SImode);
1590 emit_insn (gen_xorv2si3 (x, t1, t2));
1592 code = GT;
1593 op0 = x;
1594 op1 = CONST0_RTX (mode);
1596 break;
1598 case V8QImode:
1599 case V4HImode:
1600 /* Perform a parallel unsigned saturating subtraction. */
1601 x = gen_reg_rtx (mode);
1602 emit_insn (gen_rtx_SET (VOIDmode, x,
1603 gen_rtx_US_MINUS (mode, op0, op1)));
1605 code = EQ;
1606 op0 = x;
1607 op1 = CONST0_RTX (mode);
1608 negate = !negate;
1609 break;
1611 default:
1612 gcc_unreachable ();
1616 x = gen_rtx_fmt_ee (code, mode, op0, op1);
1617 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
1619 return negate;
1622 /* Emit an integral vector conditional move. */
1624 void
1625 ia64_expand_vecint_cmov (rtx operands[])
1627 enum machine_mode mode = GET_MODE (operands[0]);
1628 enum rtx_code code = GET_CODE (operands[3]);
1629 bool negate;
1630 rtx cmp, x, ot, of;
1632 cmp = gen_reg_rtx (mode);
1633 negate = ia64_expand_vecint_compare (code, mode, cmp,
1634 operands[4], operands[5]);
1636 ot = operands[1+negate];
1637 of = operands[2-negate];
1639 if (ot == CONST0_RTX (mode))
1641 if (of == CONST0_RTX (mode))
1643 emit_move_insn (operands[0], ot);
1644 return;
1647 x = gen_rtx_NOT (mode, cmp);
1648 x = gen_rtx_AND (mode, x, of);
1649 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1651 else if (of == CONST0_RTX (mode))
1653 x = gen_rtx_AND (mode, cmp, ot);
1654 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1656 else
1658 rtx t, f;
1660 t = gen_reg_rtx (mode);
1661 x = gen_rtx_AND (mode, cmp, operands[1+negate]);
1662 emit_insn (gen_rtx_SET (VOIDmode, t, x));
1664 f = gen_reg_rtx (mode);
1665 x = gen_rtx_NOT (mode, cmp);
1666 x = gen_rtx_AND (mode, x, operands[2-negate]);
1667 emit_insn (gen_rtx_SET (VOIDmode, f, x));
1669 x = gen_rtx_IOR (mode, t, f);
1670 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1674 /* Emit an integral vector min or max operation. Return true if all done. */
1676 bool
1677 ia64_expand_vecint_minmax (enum rtx_code code, enum machine_mode mode,
1678 rtx operands[])
1680 rtx xops[6];
1682 /* These four combinations are supported directly. */
1683 if (mode == V8QImode && (code == UMIN || code == UMAX))
1684 return false;
1685 if (mode == V4HImode && (code == SMIN || code == SMAX))
1686 return false;
1688 /* This combination can be implemented with only saturating subtraction. */
1689 if (mode == V4HImode && code == UMAX)
1691 rtx x, tmp = gen_reg_rtx (mode);
1693 x = gen_rtx_US_MINUS (mode, operands[1], operands[2]);
1694 emit_insn (gen_rtx_SET (VOIDmode, tmp, x));
1696 emit_insn (gen_addv4hi3 (operands[0], tmp, operands[2]));
1697 return true;
1700 /* Everything else implemented via vector comparisons. */
1701 xops[0] = operands[0];
1702 xops[4] = xops[1] = operands[1];
1703 xops[5] = xops[2] = operands[2];
1705 switch (code)
1707 case UMIN:
1708 code = LTU;
1709 break;
1710 case UMAX:
1711 code = GTU;
1712 break;
1713 case SMIN:
1714 code = LT;
1715 break;
1716 case SMAX:
1717 code = GT;
1718 break;
1719 default:
1720 gcc_unreachable ();
1722 xops[3] = gen_rtx_fmt_ee (code, VOIDmode, operands[1], operands[2]);
1724 ia64_expand_vecint_cmov (xops);
1725 return true;
1728 /* Emit an integral vector widening sum operations. */
1730 void
1731 ia64_expand_widen_sum (rtx operands[3], bool unsignedp)
1733 rtx l, h, x, s;
1734 enum machine_mode wmode, mode;
1735 rtx (*unpack_l) (rtx, rtx, rtx);
1736 rtx (*unpack_h) (rtx, rtx, rtx);
1737 rtx (*plus) (rtx, rtx, rtx);
1739 wmode = GET_MODE (operands[0]);
1740 mode = GET_MODE (operands[1]);
1742 switch (mode)
1744 case V8QImode:
1745 unpack_l = gen_unpack1_l;
1746 unpack_h = gen_unpack1_h;
1747 plus = gen_addv4hi3;
1748 break;
1749 case V4HImode:
1750 unpack_l = gen_unpack2_l;
1751 unpack_h = gen_unpack2_h;
1752 plus = gen_addv2si3;
1753 break;
1754 default:
1755 gcc_unreachable ();
1758 /* Fill in x with the sign extension of each element in op1. */
1759 if (unsignedp)
1760 x = CONST0_RTX (mode);
1761 else
1763 bool neg;
1765 x = gen_reg_rtx (mode);
1767 neg = ia64_expand_vecint_compare (LT, mode, x, operands[1],
1768 CONST0_RTX (mode));
1769 gcc_assert (!neg);
1772 l = gen_reg_rtx (wmode);
1773 h = gen_reg_rtx (wmode);
1774 s = gen_reg_rtx (wmode);
1776 emit_insn (unpack_l (gen_lowpart (mode, l), operands[1], x));
1777 emit_insn (unpack_h (gen_lowpart (mode, h), operands[1], x));
1778 emit_insn (plus (s, l, operands[2]));
1779 emit_insn (plus (operands[0], h, s));
1782 /* Emit a signed or unsigned V8QI dot product operation. */
1784 void
1785 ia64_expand_dot_prod_v8qi (rtx operands[4], bool unsignedp)
1787 rtx l1, l2, h1, h2, x1, x2, p1, p2, p3, p4, s1, s2, s3;
1789 /* Fill in x1 and x2 with the sign extension of each element. */
1790 if (unsignedp)
1791 x1 = x2 = CONST0_RTX (V8QImode);
1792 else
1794 bool neg;
1796 x1 = gen_reg_rtx (V8QImode);
1797 x2 = gen_reg_rtx (V8QImode);
1799 neg = ia64_expand_vecint_compare (LT, V8QImode, x1, operands[1],
1800 CONST0_RTX (V8QImode));
1801 gcc_assert (!neg);
1802 neg = ia64_expand_vecint_compare (LT, V8QImode, x2, operands[2],
1803 CONST0_RTX (V8QImode));
1804 gcc_assert (!neg);
1807 l1 = gen_reg_rtx (V4HImode);
1808 l2 = gen_reg_rtx (V4HImode);
1809 h1 = gen_reg_rtx (V4HImode);
1810 h2 = gen_reg_rtx (V4HImode);
1812 emit_insn (gen_unpack1_l (gen_lowpart (V8QImode, l1), operands[1], x1));
1813 emit_insn (gen_unpack1_l (gen_lowpart (V8QImode, l2), operands[2], x2));
1814 emit_insn (gen_unpack1_h (gen_lowpart (V8QImode, h1), operands[1], x1));
1815 emit_insn (gen_unpack1_h (gen_lowpart (V8QImode, h2), operands[2], x2));
1817 p1 = gen_reg_rtx (V2SImode);
1818 p2 = gen_reg_rtx (V2SImode);
1819 p3 = gen_reg_rtx (V2SImode);
1820 p4 = gen_reg_rtx (V2SImode);
1821 emit_insn (gen_pmpy2_r (p1, l1, l2));
1822 emit_insn (gen_pmpy2_l (p2, l1, l2));
1823 emit_insn (gen_pmpy2_r (p3, h1, h2));
1824 emit_insn (gen_pmpy2_l (p4, h1, h2));
1826 s1 = gen_reg_rtx (V2SImode);
1827 s2 = gen_reg_rtx (V2SImode);
1828 s3 = gen_reg_rtx (V2SImode);
1829 emit_insn (gen_addv2si3 (s1, p1, p2));
1830 emit_insn (gen_addv2si3 (s2, p3, p4));
1831 emit_insn (gen_addv2si3 (s3, s1, operands[3]));
1832 emit_insn (gen_addv2si3 (operands[0], s2, s3));
1835 /* Emit the appropriate sequence for a call. */
1837 void
1838 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
1839 int sibcall_p)
1841 rtx insn, b0;
1843 addr = XEXP (addr, 0);
1844 addr = convert_memory_address (DImode, addr);
1845 b0 = gen_rtx_REG (DImode, R_BR (0));
1847 /* ??? Should do this for functions known to bind local too. */
1848 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
1850 if (sibcall_p)
1851 insn = gen_sibcall_nogp (addr);
1852 else if (! retval)
1853 insn = gen_call_nogp (addr, b0);
1854 else
1855 insn = gen_call_value_nogp (retval, addr, b0);
1856 insn = emit_call_insn (insn);
1858 else
1860 if (sibcall_p)
1861 insn = gen_sibcall_gp (addr);
1862 else if (! retval)
1863 insn = gen_call_gp (addr, b0);
1864 else
1865 insn = gen_call_value_gp (retval, addr, b0);
1866 insn = emit_call_insn (insn);
1868 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
1871 if (sibcall_p)
1872 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
1875 void
1876 ia64_reload_gp (void)
1878 rtx tmp;
1880 if (current_frame_info.reg_save_gp)
1881 tmp = gen_rtx_REG (DImode, current_frame_info.reg_save_gp);
1882 else
1884 HOST_WIDE_INT offset;
1885 rtx offset_r;
1887 offset = (current_frame_info.spill_cfa_off
1888 + current_frame_info.spill_size);
1889 if (frame_pointer_needed)
1891 tmp = hard_frame_pointer_rtx;
1892 offset = -offset;
1894 else
1896 tmp = stack_pointer_rtx;
1897 offset = current_frame_info.total_size - offset;
1900 offset_r = GEN_INT (offset);
1901 if (satisfies_constraint_I (offset_r))
1902 emit_insn (gen_adddi3 (pic_offset_table_rtx, tmp, offset_r));
1903 else
1905 emit_move_insn (pic_offset_table_rtx, offset_r);
1906 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1907 pic_offset_table_rtx, tmp));
1910 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
1913 emit_move_insn (pic_offset_table_rtx, tmp);
1916 void
1917 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
1918 rtx scratch_b, int noreturn_p, int sibcall_p)
1920 rtx insn;
1921 bool is_desc = false;
1923 /* If we find we're calling through a register, then we're actually
1924 calling through a descriptor, so load up the values. */
1925 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
1927 rtx tmp;
1928 bool addr_dead_p;
1930 /* ??? We are currently constrained to *not* use peep2, because
1931 we can legitimately change the global lifetime of the GP
1932 (in the form of killing where previously live). This is
1933 because a call through a descriptor doesn't use the previous
1934 value of the GP, while a direct call does, and we do not
1935 commit to either form until the split here.
1937 That said, this means that we lack precise life info for
1938 whether ADDR is dead after this call. This is not terribly
1939 important, since we can fix things up essentially for free
1940 with the POST_DEC below, but it's nice to not use it when we
1941 can immediately tell it's not necessary. */
1942 addr_dead_p = ((noreturn_p || sibcall_p
1943 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
1944 REGNO (addr)))
1945 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
1947 /* Load the code address into scratch_b. */
1948 tmp = gen_rtx_POST_INC (Pmode, addr);
1949 tmp = gen_rtx_MEM (Pmode, tmp);
1950 emit_move_insn (scratch_r, tmp);
1951 emit_move_insn (scratch_b, scratch_r);
1953 /* Load the GP address. If ADDR is not dead here, then we must
1954 revert the change made above via the POST_INCREMENT. */
1955 if (!addr_dead_p)
1956 tmp = gen_rtx_POST_DEC (Pmode, addr);
1957 else
1958 tmp = addr;
1959 tmp = gen_rtx_MEM (Pmode, tmp);
1960 emit_move_insn (pic_offset_table_rtx, tmp);
1962 is_desc = true;
1963 addr = scratch_b;
1966 if (sibcall_p)
1967 insn = gen_sibcall_nogp (addr);
1968 else if (retval)
1969 insn = gen_call_value_nogp (retval, addr, retaddr);
1970 else
1971 insn = gen_call_nogp (addr, retaddr);
1972 emit_call_insn (insn);
1974 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
1975 ia64_reload_gp ();
1978 /* Expand an atomic operation. We want to perform MEM <CODE>= VAL atomically.
1980 This differs from the generic code in that we know about the zero-extending
1981 properties of cmpxchg, and the zero-extending requirements of ar.ccv. We
1982 also know that ld.acq+cmpxchg.rel equals a full barrier.
1984 The loop we want to generate looks like
1986 cmp_reg = mem;
1987 label:
1988 old_reg = cmp_reg;
1989 new_reg = cmp_reg op val;
1990 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
1991 if (cmp_reg != old_reg)
1992 goto label;
1994 Note that we only do the plain load from memory once. Subsequent
1995 iterations use the value loaded by the compare-and-swap pattern. */
1997 void
1998 ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
1999 rtx old_dst, rtx new_dst)
2001 enum machine_mode mode = GET_MODE (mem);
2002 rtx old_reg, new_reg, cmp_reg, ar_ccv, label;
2003 enum insn_code icode;
2005 /* Special case for using fetchadd. */
2006 if ((mode == SImode || mode == DImode)
2007 && (code == PLUS || code == MINUS)
2008 && fetchadd_operand (val, mode))
2010 if (code == MINUS)
2011 val = GEN_INT (-INTVAL (val));
2013 if (!old_dst)
2014 old_dst = gen_reg_rtx (mode);
2016 emit_insn (gen_memory_barrier ());
2018 if (mode == SImode)
2019 icode = CODE_FOR_fetchadd_acq_si;
2020 else
2021 icode = CODE_FOR_fetchadd_acq_di;
2022 emit_insn (GEN_FCN (icode) (old_dst, mem, val));
2024 if (new_dst)
2026 new_reg = expand_simple_binop (mode, PLUS, old_dst, val, new_dst,
2027 true, OPTAB_WIDEN);
2028 if (new_reg != new_dst)
2029 emit_move_insn (new_dst, new_reg);
2031 return;
2034 /* Because of the volatile mem read, we get an ld.acq, which is the
2035 front half of the full barrier. The end half is the cmpxchg.rel. */
2036 gcc_assert (MEM_VOLATILE_P (mem));
2038 old_reg = gen_reg_rtx (DImode);
2039 cmp_reg = gen_reg_rtx (DImode);
2040 label = gen_label_rtx ();
2042 if (mode != DImode)
2044 val = simplify_gen_subreg (DImode, val, mode, 0);
2045 emit_insn (gen_extend_insn (cmp_reg, mem, DImode, mode, 1));
2047 else
2048 emit_move_insn (cmp_reg, mem);
2050 emit_label (label);
2052 ar_ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
2053 emit_move_insn (old_reg, cmp_reg);
2054 emit_move_insn (ar_ccv, cmp_reg);
2056 if (old_dst)
2057 emit_move_insn (old_dst, gen_lowpart (mode, cmp_reg));
2059 new_reg = cmp_reg;
2060 if (code == NOT)
2062 new_reg = expand_simple_unop (DImode, NOT, new_reg, NULL_RTX, true);
2063 code = AND;
2065 new_reg = expand_simple_binop (DImode, code, new_reg, val, NULL_RTX,
2066 true, OPTAB_DIRECT);
2068 if (mode != DImode)
2069 new_reg = gen_lowpart (mode, new_reg);
2070 if (new_dst)
2071 emit_move_insn (new_dst, new_reg);
2073 switch (mode)
2075 case QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;
2076 case HImode: icode = CODE_FOR_cmpxchg_rel_hi; break;
2077 case SImode: icode = CODE_FOR_cmpxchg_rel_si; break;
2078 case DImode: icode = CODE_FOR_cmpxchg_rel_di; break;
2079 default:
2080 gcc_unreachable ();
2083 emit_insn (GEN_FCN (icode) (cmp_reg, mem, ar_ccv, new_reg));
2085 emit_cmp_and_jump_insns (cmp_reg, old_reg, NE, NULL, DImode, true, label);
2088 /* Begin the assembly file. */
2090 static void
2091 ia64_file_start (void)
2093 /* Variable tracking should be run after all optimizations which change order
2094 of insns. It also needs a valid CFG. This can't be done in
2095 ia64_override_options, because flag_var_tracking is finalized after
2096 that. */
2097 ia64_flag_var_tracking = flag_var_tracking;
2098 flag_var_tracking = 0;
2100 default_file_start ();
2101 emit_safe_across_calls ();
2104 void
2105 emit_safe_across_calls (void)
2107 unsigned int rs, re;
2108 int out_state;
2110 rs = 1;
2111 out_state = 0;
2112 while (1)
2114 while (rs < 64 && call_used_regs[PR_REG (rs)])
2115 rs++;
2116 if (rs >= 64)
2117 break;
2118 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
2119 continue;
2120 if (out_state == 0)
2122 fputs ("\t.pred.safe_across_calls ", asm_out_file);
2123 out_state = 1;
2125 else
2126 fputc (',', asm_out_file);
2127 if (re == rs + 1)
2128 fprintf (asm_out_file, "p%u", rs);
2129 else
2130 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
2131 rs = re + 1;
2133 if (out_state)
2134 fputc ('\n', asm_out_file);
2137 /* Globalize a declaration. */
2139 static void
2140 ia64_globalize_decl_name (FILE * stream, tree decl)
2142 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2143 tree version_attr = lookup_attribute ("version_id", DECL_ATTRIBUTES (decl));
2144 if (version_attr)
2146 tree v = TREE_VALUE (TREE_VALUE (version_attr));
2147 const char *p = TREE_STRING_POINTER (v);
2148 fprintf (stream, "\t.alias %s#, \"%s{%s}\"\n", name, name, p);
2150 targetm.asm_out.globalize_label (stream, name);
2151 if (TREE_CODE (decl) == FUNCTION_DECL)
2152 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "function");
2155 /* Helper function for ia64_compute_frame_size: find an appropriate general
2156 register to spill some special register to. SPECIAL_SPILL_MASK contains
2157 bits in GR0 to GR31 that have already been allocated by this routine.
2158 TRY_LOCALS is true if we should attempt to locate a local regnum. */
2160 static int
2161 find_gr_spill (int try_locals)
2163 int regno;
2165 /* If this is a leaf function, first try an otherwise unused
2166 call-clobbered register. */
2167 if (current_function_is_leaf)
2169 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2170 if (! regs_ever_live[regno]
2171 && call_used_regs[regno]
2172 && ! fixed_regs[regno]
2173 && ! global_regs[regno]
2174 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
2176 current_frame_info.gr_used_mask |= 1 << regno;
2177 return regno;
2181 if (try_locals)
2183 regno = current_frame_info.n_local_regs;
2184 /* If there is a frame pointer, then we can't use loc79, because
2185 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
2186 reg_name switching code in ia64_expand_prologue. */
2187 if (regno < (80 - frame_pointer_needed))
2189 current_frame_info.n_local_regs = regno + 1;
2190 return LOC_REG (0) + regno;
2194 /* Failed to find a general register to spill to. Must use stack. */
2195 return 0;
2198 /* In order to make for nice schedules, we try to allocate every temporary
2199 to a different register. We must of course stay away from call-saved,
2200 fixed, and global registers. We must also stay away from registers
2201 allocated in current_frame_info.gr_used_mask, since those include regs
2202 used all through the prologue.
2204 Any register allocated here must be used immediately. The idea is to
2205 aid scheduling, not to solve data flow problems. */
2207 static int last_scratch_gr_reg;
2209 static int
2210 next_scratch_gr_reg (void)
2212 int i, regno;
2214 for (i = 0; i < 32; ++i)
2216 regno = (last_scratch_gr_reg + i + 1) & 31;
2217 if (call_used_regs[regno]
2218 && ! fixed_regs[regno]
2219 && ! global_regs[regno]
2220 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
2222 last_scratch_gr_reg = regno;
2223 return regno;
2227 /* There must be _something_ available. */
2228 gcc_unreachable ();
2231 /* Helper function for ia64_compute_frame_size, called through
2232 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
2234 static void
2235 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
2237 unsigned int regno = REGNO (reg);
2238 if (regno < 32)
2240 unsigned int i, n = hard_regno_nregs[regno][GET_MODE (reg)];
2241 for (i = 0; i < n; ++i)
2242 current_frame_info.gr_used_mask |= 1 << (regno + i);
2246 /* Returns the number of bytes offset between the frame pointer and the stack
2247 pointer for the current function. SIZE is the number of bytes of space
2248 needed for local variables. */
2250 static void
2251 ia64_compute_frame_size (HOST_WIDE_INT size)
2253 HOST_WIDE_INT total_size;
2254 HOST_WIDE_INT spill_size = 0;
2255 HOST_WIDE_INT extra_spill_size = 0;
2256 HOST_WIDE_INT pretend_args_size;
2257 HARD_REG_SET mask;
2258 int n_spilled = 0;
2259 int spilled_gr_p = 0;
2260 int spilled_fr_p = 0;
2261 unsigned int regno;
2262 int i;
2264 if (current_frame_info.initialized)
2265 return;
2267 memset (&current_frame_info, 0, sizeof current_frame_info);
2268 CLEAR_HARD_REG_SET (mask);
2270 /* Don't allocate scratches to the return register. */
2271 diddle_return_value (mark_reg_gr_used_mask, NULL);
2273 /* Don't allocate scratches to the EH scratch registers. */
2274 if (cfun->machine->ia64_eh_epilogue_sp)
2275 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
2276 if (cfun->machine->ia64_eh_epilogue_bsp)
2277 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
2279 /* Find the size of the register stack frame. We have only 80 local
2280 registers, because we reserve 8 for the inputs and 8 for the
2281 outputs. */
2283 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
2284 since we'll be adjusting that down later. */
2285 regno = LOC_REG (78) + ! frame_pointer_needed;
2286 for (; regno >= LOC_REG (0); regno--)
2287 if (regs_ever_live[regno])
2288 break;
2289 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2291 /* For functions marked with the syscall_linkage attribute, we must mark
2292 all eight input registers as in use, so that locals aren't visible to
2293 the caller. */
2295 if (cfun->machine->n_varargs > 0
2296 || lookup_attribute ("syscall_linkage",
2297 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
2298 current_frame_info.n_input_regs = 8;
2299 else
2301 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
2302 if (regs_ever_live[regno])
2303 break;
2304 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
2307 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
2308 if (regs_ever_live[regno])
2309 break;
2310 i = regno - OUT_REG (0) + 1;
2312 #ifndef PROFILE_HOOK
2313 /* When -p profiling, we need one output register for the mcount argument.
2314 Likewise for -a profiling for the bb_init_func argument. For -ax
2315 profiling, we need two output registers for the two bb_init_trace_func
2316 arguments. */
2317 if (current_function_profile)
2318 i = MAX (i, 1);
2319 #endif
2320 current_frame_info.n_output_regs = i;
2322 /* ??? No rotating register support yet. */
2323 current_frame_info.n_rotate_regs = 0;
2325 /* Discover which registers need spilling, and how much room that
2326 will take. Begin with floating point and general registers,
2327 which will always wind up on the stack. */
2329 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
2330 if (regs_ever_live[regno] && ! call_used_regs[regno])
2332 SET_HARD_REG_BIT (mask, regno);
2333 spill_size += 16;
2334 n_spilled += 1;
2335 spilled_fr_p = 1;
2338 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2339 if (regs_ever_live[regno] && ! call_used_regs[regno])
2341 SET_HARD_REG_BIT (mask, regno);
2342 spill_size += 8;
2343 n_spilled += 1;
2344 spilled_gr_p = 1;
2347 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
2348 if (regs_ever_live[regno] && ! call_used_regs[regno])
2350 SET_HARD_REG_BIT (mask, regno);
2351 spill_size += 8;
2352 n_spilled += 1;
2355 /* Now come all special registers that might get saved in other
2356 general registers. */
2358 if (frame_pointer_needed)
2360 current_frame_info.reg_fp = find_gr_spill (1);
2361 /* If we did not get a register, then we take LOC79. This is guaranteed
2362 to be free, even if regs_ever_live is already set, because this is
2363 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
2364 as we don't count loc79 above. */
2365 if (current_frame_info.reg_fp == 0)
2367 current_frame_info.reg_fp = LOC_REG (79);
2368 current_frame_info.n_local_regs++;
2372 if (! current_function_is_leaf)
2374 /* Emit a save of BR0 if we call other functions. Do this even
2375 if this function doesn't return, as EH depends on this to be
2376 able to unwind the stack. */
2377 SET_HARD_REG_BIT (mask, BR_REG (0));
2379 current_frame_info.reg_save_b0 = find_gr_spill (1);
2380 if (current_frame_info.reg_save_b0 == 0)
2382 extra_spill_size += 8;
2383 n_spilled += 1;
2386 /* Similarly for ar.pfs. */
2387 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2388 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
2389 if (current_frame_info.reg_save_ar_pfs == 0)
2391 extra_spill_size += 8;
2392 n_spilled += 1;
2395 /* Similarly for gp. Note that if we're calling setjmp, the stacked
2396 registers are clobbered, so we fall back to the stack. */
2397 current_frame_info.reg_save_gp
2398 = (current_function_calls_setjmp ? 0 : find_gr_spill (1));
2399 if (current_frame_info.reg_save_gp == 0)
2401 SET_HARD_REG_BIT (mask, GR_REG (1));
2402 spill_size += 8;
2403 n_spilled += 1;
2406 else
2408 if (regs_ever_live[BR_REG (0)] && ! call_used_regs[BR_REG (0)])
2410 SET_HARD_REG_BIT (mask, BR_REG (0));
2411 extra_spill_size += 8;
2412 n_spilled += 1;
2415 if (regs_ever_live[AR_PFS_REGNUM])
2417 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2418 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
2419 if (current_frame_info.reg_save_ar_pfs == 0)
2421 extra_spill_size += 8;
2422 n_spilled += 1;
2427 /* Unwind descriptor hackery: things are most efficient if we allocate
2428 consecutive GR save registers for RP, PFS, FP in that order. However,
2429 it is absolutely critical that FP get the only hard register that's
2430 guaranteed to be free, so we allocated it first. If all three did
2431 happen to be allocated hard regs, and are consecutive, rearrange them
2432 into the preferred order now. */
2433 if (current_frame_info.reg_fp != 0
2434 && current_frame_info.reg_save_b0 == current_frame_info.reg_fp + 1
2435 && current_frame_info.reg_save_ar_pfs == current_frame_info.reg_fp + 2)
2437 current_frame_info.reg_save_b0 = current_frame_info.reg_fp;
2438 current_frame_info.reg_save_ar_pfs = current_frame_info.reg_fp + 1;
2439 current_frame_info.reg_fp = current_frame_info.reg_fp + 2;
2442 /* See if we need to store the predicate register block. */
2443 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2444 if (regs_ever_live[regno] && ! call_used_regs[regno])
2445 break;
2446 if (regno <= PR_REG (63))
2448 SET_HARD_REG_BIT (mask, PR_REG (0));
2449 current_frame_info.reg_save_pr = find_gr_spill (1);
2450 if (current_frame_info.reg_save_pr == 0)
2452 extra_spill_size += 8;
2453 n_spilled += 1;
2456 /* ??? Mark them all as used so that register renaming and such
2457 are free to use them. */
2458 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2459 regs_ever_live[regno] = 1;
2462 /* If we're forced to use st8.spill, we're forced to save and restore
2463 ar.unat as well. The check for existing liveness allows inline asm
2464 to touch ar.unat. */
2465 if (spilled_gr_p || cfun->machine->n_varargs
2466 || regs_ever_live[AR_UNAT_REGNUM])
2468 regs_ever_live[AR_UNAT_REGNUM] = 1;
2469 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
2470 current_frame_info.reg_save_ar_unat = find_gr_spill (spill_size == 0);
2471 if (current_frame_info.reg_save_ar_unat == 0)
2473 extra_spill_size += 8;
2474 n_spilled += 1;
2478 if (regs_ever_live[AR_LC_REGNUM])
2480 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
2481 current_frame_info.reg_save_ar_lc = find_gr_spill (spill_size == 0);
2482 if (current_frame_info.reg_save_ar_lc == 0)
2484 extra_spill_size += 8;
2485 n_spilled += 1;
2489 /* If we have an odd number of words of pretend arguments written to
2490 the stack, then the FR save area will be unaligned. We round the
2491 size of this area up to keep things 16 byte aligned. */
2492 if (spilled_fr_p)
2493 pretend_args_size = IA64_STACK_ALIGN (current_function_pretend_args_size);
2494 else
2495 pretend_args_size = current_function_pretend_args_size;
2497 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2498 + current_function_outgoing_args_size);
2499 total_size = IA64_STACK_ALIGN (total_size);
2501 /* We always use the 16-byte scratch area provided by the caller, but
2502 if we are a leaf function, there's no one to which we need to provide
2503 a scratch area. */
2504 if (current_function_is_leaf)
2505 total_size = MAX (0, total_size - 16);
2507 current_frame_info.total_size = total_size;
2508 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2509 current_frame_info.spill_size = spill_size;
2510 current_frame_info.extra_spill_size = extra_spill_size;
2511 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2512 current_frame_info.n_spilled = n_spilled;
2513 current_frame_info.initialized = reload_completed;
2516 /* Compute the initial difference between the specified pair of registers. */
2518 HOST_WIDE_INT
2519 ia64_initial_elimination_offset (int from, int to)
2521 HOST_WIDE_INT offset;
2523 ia64_compute_frame_size (get_frame_size ());
2524 switch (from)
2526 case FRAME_POINTER_REGNUM:
2527 switch (to)
2529 case HARD_FRAME_POINTER_REGNUM:
2530 if (current_function_is_leaf)
2531 offset = -current_frame_info.total_size;
2532 else
2533 offset = -(current_frame_info.total_size
2534 - current_function_outgoing_args_size - 16);
2535 break;
2537 case STACK_POINTER_REGNUM:
2538 if (current_function_is_leaf)
2539 offset = 0;
2540 else
2541 offset = 16 + current_function_outgoing_args_size;
2542 break;
2544 default:
2545 gcc_unreachable ();
2547 break;
2549 case ARG_POINTER_REGNUM:
2550 /* Arguments start above the 16 byte save area, unless stdarg
2551 in which case we store through the 16 byte save area. */
2552 switch (to)
2554 case HARD_FRAME_POINTER_REGNUM:
2555 offset = 16 - current_function_pretend_args_size;
2556 break;
2558 case STACK_POINTER_REGNUM:
2559 offset = (current_frame_info.total_size
2560 + 16 - current_function_pretend_args_size);
2561 break;
2563 default:
2564 gcc_unreachable ();
2566 break;
2568 default:
2569 gcc_unreachable ();
2572 return offset;
2575 /* If there are more than a trivial number of register spills, we use
2576 two interleaved iterators so that we can get two memory references
2577 per insn group.
2579 In order to simplify things in the prologue and epilogue expanders,
2580 we use helper functions to fix up the memory references after the
2581 fact with the appropriate offsets to a POST_MODIFY memory mode.
2582 The following data structure tracks the state of the two iterators
2583 while insns are being emitted. */
2585 struct spill_fill_data
2587 rtx init_after; /* point at which to emit initializations */
2588 rtx init_reg[2]; /* initial base register */
2589 rtx iter_reg[2]; /* the iterator registers */
2590 rtx *prev_addr[2]; /* address of last memory use */
2591 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
2592 HOST_WIDE_INT prev_off[2]; /* last offset */
2593 int n_iter; /* number of iterators in use */
2594 int next_iter; /* next iterator to use */
2595 unsigned int save_gr_used_mask;
2598 static struct spill_fill_data spill_fill_data;
2600 static void
2601 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
2603 int i;
2605 spill_fill_data.init_after = get_last_insn ();
2606 spill_fill_data.init_reg[0] = init_reg;
2607 spill_fill_data.init_reg[1] = init_reg;
2608 spill_fill_data.prev_addr[0] = NULL;
2609 spill_fill_data.prev_addr[1] = NULL;
2610 spill_fill_data.prev_insn[0] = NULL;
2611 spill_fill_data.prev_insn[1] = NULL;
2612 spill_fill_data.prev_off[0] = cfa_off;
2613 spill_fill_data.prev_off[1] = cfa_off;
2614 spill_fill_data.next_iter = 0;
2615 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
2617 spill_fill_data.n_iter = 1 + (n_spills > 2);
2618 for (i = 0; i < spill_fill_data.n_iter; ++i)
2620 int regno = next_scratch_gr_reg ();
2621 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
2622 current_frame_info.gr_used_mask |= 1 << regno;
2626 static void
2627 finish_spill_pointers (void)
2629 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
2632 static rtx
2633 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
2635 int iter = spill_fill_data.next_iter;
2636 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
2637 rtx disp_rtx = GEN_INT (disp);
2638 rtx mem;
2640 if (spill_fill_data.prev_addr[iter])
2642 if (satisfies_constraint_N (disp_rtx))
2644 *spill_fill_data.prev_addr[iter]
2645 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
2646 gen_rtx_PLUS (DImode,
2647 spill_fill_data.iter_reg[iter],
2648 disp_rtx));
2649 REG_NOTES (spill_fill_data.prev_insn[iter])
2650 = gen_rtx_EXPR_LIST (REG_INC, spill_fill_data.iter_reg[iter],
2651 REG_NOTES (spill_fill_data.prev_insn[iter]));
2653 else
2655 /* ??? Could use register post_modify for loads. */
2656 if (!satisfies_constraint_I (disp_rtx))
2658 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2659 emit_move_insn (tmp, disp_rtx);
2660 disp_rtx = tmp;
2662 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2663 spill_fill_data.iter_reg[iter], disp_rtx));
2666 /* Micro-optimization: if we've created a frame pointer, it's at
2667 CFA 0, which may allow the real iterator to be initialized lower,
2668 slightly increasing parallelism. Also, if there are few saves
2669 it may eliminate the iterator entirely. */
2670 else if (disp == 0
2671 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
2672 && frame_pointer_needed)
2674 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
2675 set_mem_alias_set (mem, get_varargs_alias_set ());
2676 return mem;
2678 else
2680 rtx seq, insn;
2682 if (disp == 0)
2683 seq = gen_movdi (spill_fill_data.iter_reg[iter],
2684 spill_fill_data.init_reg[iter]);
2685 else
2687 start_sequence ();
2689 if (!satisfies_constraint_I (disp_rtx))
2691 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2692 emit_move_insn (tmp, disp_rtx);
2693 disp_rtx = tmp;
2696 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2697 spill_fill_data.init_reg[iter],
2698 disp_rtx));
2700 seq = get_insns ();
2701 end_sequence ();
2704 /* Careful for being the first insn in a sequence. */
2705 if (spill_fill_data.init_after)
2706 insn = emit_insn_after (seq, spill_fill_data.init_after);
2707 else
2709 rtx first = get_insns ();
2710 if (first)
2711 insn = emit_insn_before (seq, first);
2712 else
2713 insn = emit_insn (seq);
2715 spill_fill_data.init_after = insn;
2717 /* If DISP is 0, we may or may not have a further adjustment
2718 afterward. If we do, then the load/store insn may be modified
2719 to be a post-modify. If we don't, then this copy may be
2720 eliminated by copyprop_hardreg_forward, which makes this
2721 insn garbage, which runs afoul of the sanity check in
2722 propagate_one_insn. So mark this insn as legal to delete. */
2723 if (disp == 0)
2724 REG_NOTES(insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx,
2725 REG_NOTES (insn));
2728 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
2730 /* ??? Not all of the spills are for varargs, but some of them are.
2731 The rest of the spills belong in an alias set of their own. But
2732 it doesn't actually hurt to include them here. */
2733 set_mem_alias_set (mem, get_varargs_alias_set ());
2735 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
2736 spill_fill_data.prev_off[iter] = cfa_off;
2738 if (++iter >= spill_fill_data.n_iter)
2739 iter = 0;
2740 spill_fill_data.next_iter = iter;
2742 return mem;
2745 static void
2746 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
2747 rtx frame_reg)
2749 int iter = spill_fill_data.next_iter;
2750 rtx mem, insn;
2752 mem = spill_restore_mem (reg, cfa_off);
2753 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
2754 spill_fill_data.prev_insn[iter] = insn;
2756 if (frame_reg)
2758 rtx base;
2759 HOST_WIDE_INT off;
2761 RTX_FRAME_RELATED_P (insn) = 1;
2763 /* Don't even pretend that the unwind code can intuit its way
2764 through a pair of interleaved post_modify iterators. Just
2765 provide the correct answer. */
2767 if (frame_pointer_needed)
2769 base = hard_frame_pointer_rtx;
2770 off = - cfa_off;
2772 else
2774 base = stack_pointer_rtx;
2775 off = current_frame_info.total_size - cfa_off;
2778 REG_NOTES (insn)
2779 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2780 gen_rtx_SET (VOIDmode,
2781 gen_rtx_MEM (GET_MODE (reg),
2782 plus_constant (base, off)),
2783 frame_reg),
2784 REG_NOTES (insn));
2788 static void
2789 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
2791 int iter = spill_fill_data.next_iter;
2792 rtx insn;
2794 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
2795 GEN_INT (cfa_off)));
2796 spill_fill_data.prev_insn[iter] = insn;
2799 /* Wrapper functions that discards the CONST_INT spill offset. These
2800 exist so that we can give gr_spill/gr_fill the offset they need and
2801 use a consistent function interface. */
2803 static rtx
2804 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2806 return gen_movdi (dest, src);
2809 static rtx
2810 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2812 return gen_fr_spill (dest, src);
2815 static rtx
2816 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2818 return gen_fr_restore (dest, src);
2821 /* Called after register allocation to add any instructions needed for the
2822 prologue. Using a prologue insn is favored compared to putting all of the
2823 instructions in output_function_prologue(), since it allows the scheduler
2824 to intermix instructions with the saves of the caller saved registers. In
2825 some cases, it might be necessary to emit a barrier instruction as the last
2826 insn to prevent such scheduling.
2828 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
2829 so that the debug info generation code can handle them properly.
2831 The register save area is layed out like so:
2832 cfa+16
2833 [ varargs spill area ]
2834 [ fr register spill area ]
2835 [ br register spill area ]
2836 [ ar register spill area ]
2837 [ pr register spill area ]
2838 [ gr register spill area ] */
2840 /* ??? Get inefficient code when the frame size is larger than can fit in an
2841 adds instruction. */
2843 void
2844 ia64_expand_prologue (void)
2846 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
2847 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
2848 rtx reg, alt_reg;
2850 ia64_compute_frame_size (get_frame_size ());
2851 last_scratch_gr_reg = 15;
2853 /* If there is no epilogue, then we don't need some prologue insns.
2854 We need to avoid emitting the dead prologue insns, because flow
2855 will complain about them. */
2856 if (optimize)
2858 edge e;
2859 edge_iterator ei;
2861 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
2862 if ((e->flags & EDGE_FAKE) == 0
2863 && (e->flags & EDGE_FALLTHRU) != 0)
2864 break;
2865 epilogue_p = (e != NULL);
2867 else
2868 epilogue_p = 1;
2870 /* Set the local, input, and output register names. We need to do this
2871 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
2872 half. If we use in/loc/out register names, then we get assembler errors
2873 in crtn.S because there is no alloc insn or regstk directive in there. */
2874 if (! TARGET_REG_NAMES)
2876 int inputs = current_frame_info.n_input_regs;
2877 int locals = current_frame_info.n_local_regs;
2878 int outputs = current_frame_info.n_output_regs;
2880 for (i = 0; i < inputs; i++)
2881 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
2882 for (i = 0; i < locals; i++)
2883 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
2884 for (i = 0; i < outputs; i++)
2885 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
2888 /* Set the frame pointer register name. The regnum is logically loc79,
2889 but of course we'll not have allocated that many locals. Rather than
2890 worrying about renumbering the existing rtxs, we adjust the name. */
2891 /* ??? This code means that we can never use one local register when
2892 there is a frame pointer. loc79 gets wasted in this case, as it is
2893 renamed to a register that will never be used. See also the try_locals
2894 code in find_gr_spill. */
2895 if (current_frame_info.reg_fp)
2897 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
2898 reg_names[HARD_FRAME_POINTER_REGNUM]
2899 = reg_names[current_frame_info.reg_fp];
2900 reg_names[current_frame_info.reg_fp] = tmp;
2903 /* We don't need an alloc instruction if we've used no outputs or locals. */
2904 if (current_frame_info.n_local_regs == 0
2905 && current_frame_info.n_output_regs == 0
2906 && current_frame_info.n_input_regs <= current_function_args_info.int_regs
2907 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
2909 /* If there is no alloc, but there are input registers used, then we
2910 need a .regstk directive. */
2911 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
2912 ar_pfs_save_reg = NULL_RTX;
2914 else
2916 current_frame_info.need_regstk = 0;
2918 if (current_frame_info.reg_save_ar_pfs)
2919 regno = current_frame_info.reg_save_ar_pfs;
2920 else
2921 regno = next_scratch_gr_reg ();
2922 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
2924 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
2925 GEN_INT (current_frame_info.n_input_regs),
2926 GEN_INT (current_frame_info.n_local_regs),
2927 GEN_INT (current_frame_info.n_output_regs),
2928 GEN_INT (current_frame_info.n_rotate_regs)));
2929 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_pfs != 0);
2932 /* Set up frame pointer, stack pointer, and spill iterators. */
2934 n_varargs = cfun->machine->n_varargs;
2935 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
2936 stack_pointer_rtx, 0);
2938 if (frame_pointer_needed)
2940 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
2941 RTX_FRAME_RELATED_P (insn) = 1;
2944 if (current_frame_info.total_size != 0)
2946 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
2947 rtx offset;
2949 if (satisfies_constraint_I (frame_size_rtx))
2950 offset = frame_size_rtx;
2951 else
2953 regno = next_scratch_gr_reg ();
2954 offset = gen_rtx_REG (DImode, regno);
2955 emit_move_insn (offset, frame_size_rtx);
2958 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
2959 stack_pointer_rtx, offset));
2961 if (! frame_pointer_needed)
2963 RTX_FRAME_RELATED_P (insn) = 1;
2964 if (GET_CODE (offset) != CONST_INT)
2966 REG_NOTES (insn)
2967 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2968 gen_rtx_SET (VOIDmode,
2969 stack_pointer_rtx,
2970 gen_rtx_PLUS (DImode,
2971 stack_pointer_rtx,
2972 frame_size_rtx)),
2973 REG_NOTES (insn));
2977 /* ??? At this point we must generate a magic insn that appears to
2978 modify the stack pointer, the frame pointer, and all spill
2979 iterators. This would allow the most scheduling freedom. For
2980 now, just hard stop. */
2981 emit_insn (gen_blockage ());
2984 /* Must copy out ar.unat before doing any integer spills. */
2985 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2987 if (current_frame_info.reg_save_ar_unat)
2988 ar_unat_save_reg
2989 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
2990 else
2992 alt_regno = next_scratch_gr_reg ();
2993 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
2994 current_frame_info.gr_used_mask |= 1 << alt_regno;
2997 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2998 insn = emit_move_insn (ar_unat_save_reg, reg);
2999 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_unat != 0);
3001 /* Even if we're not going to generate an epilogue, we still
3002 need to save the register so that EH works. */
3003 if (! epilogue_p && current_frame_info.reg_save_ar_unat)
3004 emit_insn (gen_prologue_use (ar_unat_save_reg));
3006 else
3007 ar_unat_save_reg = NULL_RTX;
3009 /* Spill all varargs registers. Do this before spilling any GR registers,
3010 since we want the UNAT bits for the GR registers to override the UNAT
3011 bits from varargs, which we don't care about. */
3013 cfa_off = -16;
3014 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
3016 reg = gen_rtx_REG (DImode, regno);
3017 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
3020 /* Locate the bottom of the register save area. */
3021 cfa_off = (current_frame_info.spill_cfa_off
3022 + current_frame_info.spill_size
3023 + current_frame_info.extra_spill_size);
3025 /* Save the predicate register block either in a register or in memory. */
3026 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3028 reg = gen_rtx_REG (DImode, PR_REG (0));
3029 if (current_frame_info.reg_save_pr != 0)
3031 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
3032 insn = emit_move_insn (alt_reg, reg);
3034 /* ??? Denote pr spill/fill by a DImode move that modifies all
3035 64 hard registers. */
3036 RTX_FRAME_RELATED_P (insn) = 1;
3037 REG_NOTES (insn)
3038 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3039 gen_rtx_SET (VOIDmode, alt_reg, reg),
3040 REG_NOTES (insn));
3042 /* Even if we're not going to generate an epilogue, we still
3043 need to save the register so that EH works. */
3044 if (! epilogue_p)
3045 emit_insn (gen_prologue_use (alt_reg));
3047 else
3049 alt_regno = next_scratch_gr_reg ();
3050 alt_reg = gen_rtx_REG (DImode, alt_regno);
3051 insn = emit_move_insn (alt_reg, reg);
3052 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3053 cfa_off -= 8;
3057 /* Handle AR regs in numerical order. All of them get special handling. */
3058 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
3059 && current_frame_info.reg_save_ar_unat == 0)
3061 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3062 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
3063 cfa_off -= 8;
3066 /* The alloc insn already copied ar.pfs into a general register. The
3067 only thing we have to do now is copy that register to a stack slot
3068 if we'd not allocated a local register for the job. */
3069 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
3070 && current_frame_info.reg_save_ar_pfs == 0)
3072 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3073 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
3074 cfa_off -= 8;
3077 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3079 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3080 if (current_frame_info.reg_save_ar_lc != 0)
3082 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
3083 insn = emit_move_insn (alt_reg, reg);
3084 RTX_FRAME_RELATED_P (insn) = 1;
3086 /* Even if we're not going to generate an epilogue, we still
3087 need to save the register so that EH works. */
3088 if (! epilogue_p)
3089 emit_insn (gen_prologue_use (alt_reg));
3091 else
3093 alt_regno = next_scratch_gr_reg ();
3094 alt_reg = gen_rtx_REG (DImode, alt_regno);
3095 emit_move_insn (alt_reg, reg);
3096 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3097 cfa_off -= 8;
3101 /* Save the return pointer. */
3102 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3104 reg = gen_rtx_REG (DImode, BR_REG (0));
3105 if (current_frame_info.reg_save_b0 != 0)
3107 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
3108 insn = emit_move_insn (alt_reg, reg);
3109 RTX_FRAME_RELATED_P (insn) = 1;
3111 /* Even if we're not going to generate an epilogue, we still
3112 need to save the register so that EH works. */
3113 if (! epilogue_p)
3114 emit_insn (gen_prologue_use (alt_reg));
3116 else
3118 alt_regno = next_scratch_gr_reg ();
3119 alt_reg = gen_rtx_REG (DImode, alt_regno);
3120 emit_move_insn (alt_reg, reg);
3121 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3122 cfa_off -= 8;
3126 if (current_frame_info.reg_save_gp)
3128 insn = emit_move_insn (gen_rtx_REG (DImode,
3129 current_frame_info.reg_save_gp),
3130 pic_offset_table_rtx);
3131 /* We don't know for sure yet if this is actually needed, since
3132 we've not split the PIC call patterns. If all of the calls
3133 are indirect, and not followed by any uses of the gp, then
3134 this save is dead. Allow it to go away. */
3135 REG_NOTES (insn)
3136 = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, REG_NOTES (insn));
3139 /* We should now be at the base of the gr/br/fr spill area. */
3140 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3141 + current_frame_info.spill_size));
3143 /* Spill all general registers. */
3144 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3145 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3147 reg = gen_rtx_REG (DImode, regno);
3148 do_spill (gen_gr_spill, reg, cfa_off, reg);
3149 cfa_off -= 8;
3152 /* Spill the rest of the BR registers. */
3153 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3154 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3156 alt_regno = next_scratch_gr_reg ();
3157 alt_reg = gen_rtx_REG (DImode, alt_regno);
3158 reg = gen_rtx_REG (DImode, regno);
3159 emit_move_insn (alt_reg, reg);
3160 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3161 cfa_off -= 8;
3164 /* Align the frame and spill all FR registers. */
3165 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3166 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3168 gcc_assert (!(cfa_off & 15));
3169 reg = gen_rtx_REG (XFmode, regno);
3170 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
3171 cfa_off -= 16;
3174 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3176 finish_spill_pointers ();
3179 /* Called after register allocation to add any instructions needed for the
3180 epilogue. Using an epilogue insn is favored compared to putting all of the
3181 instructions in output_function_prologue(), since it allows the scheduler
3182 to intermix instructions with the saves of the caller saved registers. In
3183 some cases, it might be necessary to emit a barrier instruction as the last
3184 insn to prevent such scheduling. */
3186 void
3187 ia64_expand_epilogue (int sibcall_p)
3189 rtx insn, reg, alt_reg, ar_unat_save_reg;
3190 int regno, alt_regno, cfa_off;
3192 ia64_compute_frame_size (get_frame_size ());
3194 /* If there is a frame pointer, then we use it instead of the stack
3195 pointer, so that the stack pointer does not need to be valid when
3196 the epilogue starts. See EXIT_IGNORE_STACK. */
3197 if (frame_pointer_needed)
3198 setup_spill_pointers (current_frame_info.n_spilled,
3199 hard_frame_pointer_rtx, 0);
3200 else
3201 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
3202 current_frame_info.total_size);
3204 if (current_frame_info.total_size != 0)
3206 /* ??? At this point we must generate a magic insn that appears to
3207 modify the spill iterators and the frame pointer. This would
3208 allow the most scheduling freedom. For now, just hard stop. */
3209 emit_insn (gen_blockage ());
3212 /* Locate the bottom of the register save area. */
3213 cfa_off = (current_frame_info.spill_cfa_off
3214 + current_frame_info.spill_size
3215 + current_frame_info.extra_spill_size);
3217 /* Restore the predicate registers. */
3218 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3220 if (current_frame_info.reg_save_pr != 0)
3221 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
3222 else
3224 alt_regno = next_scratch_gr_reg ();
3225 alt_reg = gen_rtx_REG (DImode, alt_regno);
3226 do_restore (gen_movdi_x, alt_reg, cfa_off);
3227 cfa_off -= 8;
3229 reg = gen_rtx_REG (DImode, PR_REG (0));
3230 emit_move_insn (reg, alt_reg);
3233 /* Restore the application registers. */
3235 /* Load the saved unat from the stack, but do not restore it until
3236 after the GRs have been restored. */
3237 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3239 if (current_frame_info.reg_save_ar_unat != 0)
3240 ar_unat_save_reg
3241 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
3242 else
3244 alt_regno = next_scratch_gr_reg ();
3245 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3246 current_frame_info.gr_used_mask |= 1 << alt_regno;
3247 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
3248 cfa_off -= 8;
3251 else
3252 ar_unat_save_reg = NULL_RTX;
3254 if (current_frame_info.reg_save_ar_pfs != 0)
3256 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_pfs);
3257 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3258 emit_move_insn (reg, alt_reg);
3260 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3262 alt_regno = next_scratch_gr_reg ();
3263 alt_reg = gen_rtx_REG (DImode, alt_regno);
3264 do_restore (gen_movdi_x, alt_reg, cfa_off);
3265 cfa_off -= 8;
3266 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3267 emit_move_insn (reg, alt_reg);
3270 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3272 if (current_frame_info.reg_save_ar_lc != 0)
3273 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
3274 else
3276 alt_regno = next_scratch_gr_reg ();
3277 alt_reg = gen_rtx_REG (DImode, alt_regno);
3278 do_restore (gen_movdi_x, alt_reg, cfa_off);
3279 cfa_off -= 8;
3281 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3282 emit_move_insn (reg, alt_reg);
3285 /* Restore the return pointer. */
3286 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3288 if (current_frame_info.reg_save_b0 != 0)
3289 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
3290 else
3292 alt_regno = next_scratch_gr_reg ();
3293 alt_reg = gen_rtx_REG (DImode, alt_regno);
3294 do_restore (gen_movdi_x, alt_reg, cfa_off);
3295 cfa_off -= 8;
3297 reg = gen_rtx_REG (DImode, BR_REG (0));
3298 emit_move_insn (reg, alt_reg);
3301 /* We should now be at the base of the gr/br/fr spill area. */
3302 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3303 + current_frame_info.spill_size));
3305 /* The GP may be stored on the stack in the prologue, but it's
3306 never restored in the epilogue. Skip the stack slot. */
3307 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
3308 cfa_off -= 8;
3310 /* Restore all general registers. */
3311 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
3312 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3314 reg = gen_rtx_REG (DImode, regno);
3315 do_restore (gen_gr_restore, reg, cfa_off);
3316 cfa_off -= 8;
3319 /* Restore the branch registers. */
3320 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3321 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3323 alt_regno = next_scratch_gr_reg ();
3324 alt_reg = gen_rtx_REG (DImode, alt_regno);
3325 do_restore (gen_movdi_x, alt_reg, cfa_off);
3326 cfa_off -= 8;
3327 reg = gen_rtx_REG (DImode, regno);
3328 emit_move_insn (reg, alt_reg);
3331 /* Restore floating point registers. */
3332 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3333 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3335 gcc_assert (!(cfa_off & 15));
3336 reg = gen_rtx_REG (XFmode, regno);
3337 do_restore (gen_fr_restore_x, reg, cfa_off);
3338 cfa_off -= 16;
3341 /* Restore ar.unat for real. */
3342 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3344 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3345 emit_move_insn (reg, ar_unat_save_reg);
3348 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3350 finish_spill_pointers ();
3352 if (current_frame_info.total_size || cfun->machine->ia64_eh_epilogue_sp)
3354 /* ??? At this point we must generate a magic insn that appears to
3355 modify the spill iterators, the stack pointer, and the frame
3356 pointer. This would allow the most scheduling freedom. For now,
3357 just hard stop. */
3358 emit_insn (gen_blockage ());
3361 if (cfun->machine->ia64_eh_epilogue_sp)
3362 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
3363 else if (frame_pointer_needed)
3365 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
3366 RTX_FRAME_RELATED_P (insn) = 1;
3368 else if (current_frame_info.total_size)
3370 rtx offset, frame_size_rtx;
3372 frame_size_rtx = GEN_INT (current_frame_info.total_size);
3373 if (satisfies_constraint_I (frame_size_rtx))
3374 offset = frame_size_rtx;
3375 else
3377 regno = next_scratch_gr_reg ();
3378 offset = gen_rtx_REG (DImode, regno);
3379 emit_move_insn (offset, frame_size_rtx);
3382 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
3383 offset));
3385 RTX_FRAME_RELATED_P (insn) = 1;
3386 if (GET_CODE (offset) != CONST_INT)
3388 REG_NOTES (insn)
3389 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3390 gen_rtx_SET (VOIDmode,
3391 stack_pointer_rtx,
3392 gen_rtx_PLUS (DImode,
3393 stack_pointer_rtx,
3394 frame_size_rtx)),
3395 REG_NOTES (insn));
3399 if (cfun->machine->ia64_eh_epilogue_bsp)
3400 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
3402 if (! sibcall_p)
3403 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
3404 else
3406 int fp = GR_REG (2);
3407 /* We need a throw away register here, r0 and r1 are reserved, so r2 is the
3408 first available call clobbered register. If there was a frame_pointer
3409 register, we may have swapped the names of r2 and HARD_FRAME_POINTER_REGNUM,
3410 so we have to make sure we're using the string "r2" when emitting
3411 the register name for the assembler. */
3412 if (current_frame_info.reg_fp && current_frame_info.reg_fp == GR_REG (2))
3413 fp = HARD_FRAME_POINTER_REGNUM;
3415 /* We must emit an alloc to force the input registers to become output
3416 registers. Otherwise, if the callee tries to pass its parameters
3417 through to another call without an intervening alloc, then these
3418 values get lost. */
3419 /* ??? We don't need to preserve all input registers. We only need to
3420 preserve those input registers used as arguments to the sibling call.
3421 It is unclear how to compute that number here. */
3422 if (current_frame_info.n_input_regs != 0)
3424 rtx n_inputs = GEN_INT (current_frame_info.n_input_regs);
3425 insn = emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
3426 const0_rtx, const0_rtx,
3427 n_inputs, const0_rtx));
3428 RTX_FRAME_RELATED_P (insn) = 1;
3433 /* Return 1 if br.ret can do all the work required to return from a
3434 function. */
3437 ia64_direct_return (void)
3439 if (reload_completed && ! frame_pointer_needed)
3441 ia64_compute_frame_size (get_frame_size ());
3443 return (current_frame_info.total_size == 0
3444 && current_frame_info.n_spilled == 0
3445 && current_frame_info.reg_save_b0 == 0
3446 && current_frame_info.reg_save_pr == 0
3447 && current_frame_info.reg_save_ar_pfs == 0
3448 && current_frame_info.reg_save_ar_unat == 0
3449 && current_frame_info.reg_save_ar_lc == 0);
3451 return 0;
3454 /* Return the magic cookie that we use to hold the return address
3455 during early compilation. */
3458 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
3460 if (count != 0)
3461 return NULL;
3462 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
3465 /* Split this value after reload, now that we know where the return
3466 address is saved. */
3468 void
3469 ia64_split_return_addr_rtx (rtx dest)
3471 rtx src;
3473 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3475 if (current_frame_info.reg_save_b0 != 0)
3476 src = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
3477 else
3479 HOST_WIDE_INT off;
3480 unsigned int regno;
3481 rtx off_r;
3483 /* Compute offset from CFA for BR0. */
3484 /* ??? Must be kept in sync with ia64_expand_prologue. */
3485 off = (current_frame_info.spill_cfa_off
3486 + current_frame_info.spill_size);
3487 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3488 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3489 off -= 8;
3491 /* Convert CFA offset to a register based offset. */
3492 if (frame_pointer_needed)
3493 src = hard_frame_pointer_rtx;
3494 else
3496 src = stack_pointer_rtx;
3497 off += current_frame_info.total_size;
3500 /* Load address into scratch register. */
3501 off_r = GEN_INT (off);
3502 if (satisfies_constraint_I (off_r))
3503 emit_insn (gen_adddi3 (dest, src, off_r));
3504 else
3506 emit_move_insn (dest, off_r);
3507 emit_insn (gen_adddi3 (dest, src, dest));
3510 src = gen_rtx_MEM (Pmode, dest);
3513 else
3514 src = gen_rtx_REG (DImode, BR_REG (0));
3516 emit_move_insn (dest, src);
3520 ia64_hard_regno_rename_ok (int from, int to)
3522 /* Don't clobber any of the registers we reserved for the prologue. */
3523 if (to == current_frame_info.reg_fp
3524 || to == current_frame_info.reg_save_b0
3525 || to == current_frame_info.reg_save_pr
3526 || to == current_frame_info.reg_save_ar_pfs
3527 || to == current_frame_info.reg_save_ar_unat
3528 || to == current_frame_info.reg_save_ar_lc)
3529 return 0;
3531 if (from == current_frame_info.reg_fp
3532 || from == current_frame_info.reg_save_b0
3533 || from == current_frame_info.reg_save_pr
3534 || from == current_frame_info.reg_save_ar_pfs
3535 || from == current_frame_info.reg_save_ar_unat
3536 || from == current_frame_info.reg_save_ar_lc)
3537 return 0;
3539 /* Don't use output registers outside the register frame. */
3540 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
3541 return 0;
3543 /* Retain even/oddness on predicate register pairs. */
3544 if (PR_REGNO_P (from) && PR_REGNO_P (to))
3545 return (from & 1) == (to & 1);
3547 return 1;
3550 /* Target hook for assembling integer objects. Handle word-sized
3551 aligned objects and detect the cases when @fptr is needed. */
3553 static bool
3554 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
3556 if (size == POINTER_SIZE / BITS_PER_UNIT
3557 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
3558 && GET_CODE (x) == SYMBOL_REF
3559 && SYMBOL_REF_FUNCTION_P (x))
3561 static const char * const directive[2][2] = {
3562 /* 64-bit pointer */ /* 32-bit pointer */
3563 { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
3564 { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
3566 fputs (directive[(aligned_p != 0)][POINTER_SIZE == 32], asm_out_file);
3567 output_addr_const (asm_out_file, x);
3568 fputs (")\n", asm_out_file);
3569 return true;
3571 return default_assemble_integer (x, size, aligned_p);
3574 /* Emit the function prologue. */
3576 static void
3577 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3579 int mask, grsave, grsave_prev;
3581 if (current_frame_info.need_regstk)
3582 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
3583 current_frame_info.n_input_regs,
3584 current_frame_info.n_local_regs,
3585 current_frame_info.n_output_regs,
3586 current_frame_info.n_rotate_regs);
3588 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3589 return;
3591 /* Emit the .prologue directive. */
3593 mask = 0;
3594 grsave = grsave_prev = 0;
3595 if (current_frame_info.reg_save_b0 != 0)
3597 mask |= 8;
3598 grsave = grsave_prev = current_frame_info.reg_save_b0;
3600 if (current_frame_info.reg_save_ar_pfs != 0
3601 && (grsave_prev == 0
3602 || current_frame_info.reg_save_ar_pfs == grsave_prev + 1))
3604 mask |= 4;
3605 if (grsave_prev == 0)
3606 grsave = current_frame_info.reg_save_ar_pfs;
3607 grsave_prev = current_frame_info.reg_save_ar_pfs;
3609 if (current_frame_info.reg_fp != 0
3610 && (grsave_prev == 0
3611 || current_frame_info.reg_fp == grsave_prev + 1))
3613 mask |= 2;
3614 if (grsave_prev == 0)
3615 grsave = HARD_FRAME_POINTER_REGNUM;
3616 grsave_prev = current_frame_info.reg_fp;
3618 if (current_frame_info.reg_save_pr != 0
3619 && (grsave_prev == 0
3620 || current_frame_info.reg_save_pr == grsave_prev + 1))
3622 mask |= 1;
3623 if (grsave_prev == 0)
3624 grsave = current_frame_info.reg_save_pr;
3627 if (mask && TARGET_GNU_AS)
3628 fprintf (file, "\t.prologue %d, %d\n", mask,
3629 ia64_dbx_register_number (grsave));
3630 else
3631 fputs ("\t.prologue\n", file);
3633 /* Emit a .spill directive, if necessary, to relocate the base of
3634 the register spill area. */
3635 if (current_frame_info.spill_cfa_off != -16)
3636 fprintf (file, "\t.spill %ld\n",
3637 (long) (current_frame_info.spill_cfa_off
3638 + current_frame_info.spill_size));
3641 /* Emit the .body directive at the scheduled end of the prologue. */
3643 static void
3644 ia64_output_function_end_prologue (FILE *file)
3646 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3647 return;
3649 fputs ("\t.body\n", file);
3652 /* Emit the function epilogue. */
3654 static void
3655 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
3656 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3658 int i;
3660 if (current_frame_info.reg_fp)
3662 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3663 reg_names[HARD_FRAME_POINTER_REGNUM]
3664 = reg_names[current_frame_info.reg_fp];
3665 reg_names[current_frame_info.reg_fp] = tmp;
3667 if (! TARGET_REG_NAMES)
3669 for (i = 0; i < current_frame_info.n_input_regs; i++)
3670 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
3671 for (i = 0; i < current_frame_info.n_local_regs; i++)
3672 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
3673 for (i = 0; i < current_frame_info.n_output_regs; i++)
3674 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
3677 current_frame_info.initialized = 0;
3681 ia64_dbx_register_number (int regno)
3683 /* In ia64_expand_prologue we quite literally renamed the frame pointer
3684 from its home at loc79 to something inside the register frame. We
3685 must perform the same renumbering here for the debug info. */
3686 if (current_frame_info.reg_fp)
3688 if (regno == HARD_FRAME_POINTER_REGNUM)
3689 regno = current_frame_info.reg_fp;
3690 else if (regno == current_frame_info.reg_fp)
3691 regno = HARD_FRAME_POINTER_REGNUM;
3694 if (IN_REGNO_P (regno))
3695 return 32 + regno - IN_REG (0);
3696 else if (LOC_REGNO_P (regno))
3697 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
3698 else if (OUT_REGNO_P (regno))
3699 return (32 + current_frame_info.n_input_regs
3700 + current_frame_info.n_local_regs + regno - OUT_REG (0));
3701 else
3702 return regno;
3705 void
3706 ia64_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
3708 rtx addr_reg, eight = GEN_INT (8);
3710 /* The Intel assembler requires that the global __ia64_trampoline symbol
3711 be declared explicitly */
3712 if (!TARGET_GNU_AS)
3714 static bool declared_ia64_trampoline = false;
3716 if (!declared_ia64_trampoline)
3718 declared_ia64_trampoline = true;
3719 (*targetm.asm_out.globalize_label) (asm_out_file,
3720 "__ia64_trampoline");
3724 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
3725 addr = convert_memory_address (Pmode, addr);
3726 fnaddr = convert_memory_address (Pmode, fnaddr);
3727 static_chain = convert_memory_address (Pmode, static_chain);
3729 /* Load up our iterator. */
3730 addr_reg = gen_reg_rtx (Pmode);
3731 emit_move_insn (addr_reg, addr);
3733 /* The first two words are the fake descriptor:
3734 __ia64_trampoline, ADDR+16. */
3735 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3736 gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline"));
3737 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3739 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3740 copy_to_reg (plus_constant (addr, 16)));
3741 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3743 /* The third word is the target descriptor. */
3744 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), fnaddr);
3745 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3747 /* The fourth word is the static chain. */
3748 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), static_chain);
3751 /* Do any needed setup for a variadic function. CUM has not been updated
3752 for the last named argument which has type TYPE and mode MODE.
3754 We generate the actual spill instructions during prologue generation. */
3756 static void
3757 ia64_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3758 tree type, int * pretend_size,
3759 int second_time ATTRIBUTE_UNUSED)
3761 CUMULATIVE_ARGS next_cum = *cum;
3763 /* Skip the current argument. */
3764 ia64_function_arg_advance (&next_cum, mode, type, 1);
3766 if (next_cum.words < MAX_ARGUMENT_SLOTS)
3768 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
3769 *pretend_size = n * UNITS_PER_WORD;
3770 cfun->machine->n_varargs = n;
3774 /* Check whether TYPE is a homogeneous floating point aggregate. If
3775 it is, return the mode of the floating point type that appears
3776 in all leafs. If it is not, return VOIDmode.
3778 An aggregate is a homogeneous floating point aggregate is if all
3779 fields/elements in it have the same floating point type (e.g,
3780 SFmode). 128-bit quad-precision floats are excluded.
3782 Variable sized aggregates should never arrive here, since we should
3783 have already decided to pass them by reference. Top-level zero-sized
3784 aggregates are excluded because our parallels crash the middle-end. */
3786 static enum machine_mode
3787 hfa_element_mode (tree type, bool nested)
3789 enum machine_mode element_mode = VOIDmode;
3790 enum machine_mode mode;
3791 enum tree_code code = TREE_CODE (type);
3792 int know_element_mode = 0;
3793 tree t;
3795 if (!nested && (!TYPE_SIZE (type) || integer_zerop (TYPE_SIZE (type))))
3796 return VOIDmode;
3798 switch (code)
3800 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
3801 case BOOLEAN_TYPE: case POINTER_TYPE:
3802 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
3803 case LANG_TYPE: case FUNCTION_TYPE:
3804 return VOIDmode;
3806 /* Fortran complex types are supposed to be HFAs, so we need to handle
3807 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
3808 types though. */
3809 case COMPLEX_TYPE:
3810 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
3811 && TYPE_MODE (type) != TCmode)
3812 return GET_MODE_INNER (TYPE_MODE (type));
3813 else
3814 return VOIDmode;
3816 case REAL_TYPE:
3817 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
3818 mode if this is contained within an aggregate. */
3819 if (nested && TYPE_MODE (type) != TFmode)
3820 return TYPE_MODE (type);
3821 else
3822 return VOIDmode;
3824 case ARRAY_TYPE:
3825 return hfa_element_mode (TREE_TYPE (type), 1);
3827 case RECORD_TYPE:
3828 case UNION_TYPE:
3829 case QUAL_UNION_TYPE:
3830 for (t = TYPE_FIELDS (type); t; t = TREE_CHAIN (t))
3832 if (TREE_CODE (t) != FIELD_DECL)
3833 continue;
3835 mode = hfa_element_mode (TREE_TYPE (t), 1);
3836 if (know_element_mode)
3838 if (mode != element_mode)
3839 return VOIDmode;
3841 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
3842 return VOIDmode;
3843 else
3845 know_element_mode = 1;
3846 element_mode = mode;
3849 return element_mode;
3851 default:
3852 /* If we reach here, we probably have some front-end specific type
3853 that the backend doesn't know about. This can happen via the
3854 aggregate_value_p call in init_function_start. All we can do is
3855 ignore unknown tree types. */
3856 return VOIDmode;
3859 return VOIDmode;
3862 /* Return the number of words required to hold a quantity of TYPE and MODE
3863 when passed as an argument. */
3864 static int
3865 ia64_function_arg_words (tree type, enum machine_mode mode)
3867 int words;
3869 if (mode == BLKmode)
3870 words = int_size_in_bytes (type);
3871 else
3872 words = GET_MODE_SIZE (mode);
3874 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
3877 /* Return the number of registers that should be skipped so the current
3878 argument (described by TYPE and WORDS) will be properly aligned.
3880 Integer and float arguments larger than 8 bytes start at the next
3881 even boundary. Aggregates larger than 8 bytes start at the next
3882 even boundary if the aggregate has 16 byte alignment. Note that
3883 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
3884 but are still to be aligned in registers.
3886 ??? The ABI does not specify how to handle aggregates with
3887 alignment from 9 to 15 bytes, or greater than 16. We handle them
3888 all as if they had 16 byte alignment. Such aggregates can occur
3889 only if gcc extensions are used. */
3890 static int
3891 ia64_function_arg_offset (CUMULATIVE_ARGS *cum, tree type, int words)
3893 if ((cum->words & 1) == 0)
3894 return 0;
3896 if (type
3897 && TREE_CODE (type) != INTEGER_TYPE
3898 && TREE_CODE (type) != REAL_TYPE)
3899 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
3900 else
3901 return words > 1;
3904 /* Return rtx for register where argument is passed, or zero if it is passed
3905 on the stack. */
3906 /* ??? 128-bit quad-precision floats are always passed in general
3907 registers. */
3910 ia64_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
3911 int named, int incoming)
3913 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
3914 int words = ia64_function_arg_words (type, mode);
3915 int offset = ia64_function_arg_offset (cum, type, words);
3916 enum machine_mode hfa_mode = VOIDmode;
3918 /* If all argument slots are used, then it must go on the stack. */
3919 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
3920 return 0;
3922 /* Check for and handle homogeneous FP aggregates. */
3923 if (type)
3924 hfa_mode = hfa_element_mode (type, 0);
3926 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
3927 and unprototyped hfas are passed specially. */
3928 if (hfa_mode != VOIDmode && (! cum->prototype || named))
3930 rtx loc[16];
3931 int i = 0;
3932 int fp_regs = cum->fp_regs;
3933 int int_regs = cum->words + offset;
3934 int hfa_size = GET_MODE_SIZE (hfa_mode);
3935 int byte_size;
3936 int args_byte_size;
3938 /* If prototyped, pass it in FR regs then GR regs.
3939 If not prototyped, pass it in both FR and GR regs.
3941 If this is an SFmode aggregate, then it is possible to run out of
3942 FR regs while GR regs are still left. In that case, we pass the
3943 remaining part in the GR regs. */
3945 /* Fill the FP regs. We do this always. We stop if we reach the end
3946 of the argument, the last FP register, or the last argument slot. */
3948 byte_size = ((mode == BLKmode)
3949 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3950 args_byte_size = int_regs * UNITS_PER_WORD;
3951 offset = 0;
3952 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
3953 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
3955 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3956 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
3957 + fp_regs)),
3958 GEN_INT (offset));
3959 offset += hfa_size;
3960 args_byte_size += hfa_size;
3961 fp_regs++;
3964 /* If no prototype, then the whole thing must go in GR regs. */
3965 if (! cum->prototype)
3966 offset = 0;
3967 /* If this is an SFmode aggregate, then we might have some left over
3968 that needs to go in GR regs. */
3969 else if (byte_size != offset)
3970 int_regs += offset / UNITS_PER_WORD;
3972 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
3974 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
3976 enum machine_mode gr_mode = DImode;
3977 unsigned int gr_size;
3979 /* If we have an odd 4 byte hunk because we ran out of FR regs,
3980 then this goes in a GR reg left adjusted/little endian, right
3981 adjusted/big endian. */
3982 /* ??? Currently this is handled wrong, because 4-byte hunks are
3983 always right adjusted/little endian. */
3984 if (offset & 0x4)
3985 gr_mode = SImode;
3986 /* If we have an even 4 byte hunk because the aggregate is a
3987 multiple of 4 bytes in size, then this goes in a GR reg right
3988 adjusted/little endian. */
3989 else if (byte_size - offset == 4)
3990 gr_mode = SImode;
3992 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3993 gen_rtx_REG (gr_mode, (basereg
3994 + int_regs)),
3995 GEN_INT (offset));
3997 gr_size = GET_MODE_SIZE (gr_mode);
3998 offset += gr_size;
3999 if (gr_size == UNITS_PER_WORD
4000 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
4001 int_regs++;
4002 else if (gr_size > UNITS_PER_WORD)
4003 int_regs += gr_size / UNITS_PER_WORD;
4005 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4008 /* Integral and aggregates go in general registers. If we have run out of
4009 FR registers, then FP values must also go in general registers. This can
4010 happen when we have a SFmode HFA. */
4011 else if (mode == TFmode || mode == TCmode
4012 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4014 int byte_size = ((mode == BLKmode)
4015 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4016 if (BYTES_BIG_ENDIAN
4017 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
4018 && byte_size < UNITS_PER_WORD
4019 && byte_size > 0)
4021 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4022 gen_rtx_REG (DImode,
4023 (basereg + cum->words
4024 + offset)),
4025 const0_rtx);
4026 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
4028 else
4029 return gen_rtx_REG (mode, basereg + cum->words + offset);
4033 /* If there is a prototype, then FP values go in a FR register when
4034 named, and in a GR register when unnamed. */
4035 else if (cum->prototype)
4037 if (named)
4038 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
4039 /* In big-endian mode, an anonymous SFmode value must be represented
4040 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
4041 the value into the high half of the general register. */
4042 else if (BYTES_BIG_ENDIAN && mode == SFmode)
4043 return gen_rtx_PARALLEL (mode,
4044 gen_rtvec (1,
4045 gen_rtx_EXPR_LIST (VOIDmode,
4046 gen_rtx_REG (DImode, basereg + cum->words + offset),
4047 const0_rtx)));
4048 else
4049 return gen_rtx_REG (mode, basereg + cum->words + offset);
4051 /* If there is no prototype, then FP values go in both FR and GR
4052 registers. */
4053 else
4055 /* See comment above. */
4056 enum machine_mode inner_mode =
4057 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
4059 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
4060 gen_rtx_REG (mode, (FR_ARG_FIRST
4061 + cum->fp_regs)),
4062 const0_rtx);
4063 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4064 gen_rtx_REG (inner_mode,
4065 (basereg + cum->words
4066 + offset)),
4067 const0_rtx);
4069 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
4073 /* Return number of bytes, at the beginning of the argument, that must be
4074 put in registers. 0 is the argument is entirely in registers or entirely
4075 in memory. */
4077 static int
4078 ia64_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4079 tree type, bool named ATTRIBUTE_UNUSED)
4081 int words = ia64_function_arg_words (type, mode);
4082 int offset = ia64_function_arg_offset (cum, type, words);
4084 /* If all argument slots are used, then it must go on the stack. */
4085 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4086 return 0;
4088 /* It doesn't matter whether the argument goes in FR or GR regs. If
4089 it fits within the 8 argument slots, then it goes entirely in
4090 registers. If it extends past the last argument slot, then the rest
4091 goes on the stack. */
4093 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
4094 return 0;
4096 return (MAX_ARGUMENT_SLOTS - cum->words - offset) * UNITS_PER_WORD;
4099 /* Update CUM to point after this argument. This is patterned after
4100 ia64_function_arg. */
4102 void
4103 ia64_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4104 tree type, int named)
4106 int words = ia64_function_arg_words (type, mode);
4107 int offset = ia64_function_arg_offset (cum, type, words);
4108 enum machine_mode hfa_mode = VOIDmode;
4110 /* If all arg slots are already full, then there is nothing to do. */
4111 if (cum->words >= MAX_ARGUMENT_SLOTS)
4112 return;
4114 cum->words += words + offset;
4116 /* Check for and handle homogeneous FP aggregates. */
4117 if (type)
4118 hfa_mode = hfa_element_mode (type, 0);
4120 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4121 and unprototyped hfas are passed specially. */
4122 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4124 int fp_regs = cum->fp_regs;
4125 /* This is the original value of cum->words + offset. */
4126 int int_regs = cum->words - words;
4127 int hfa_size = GET_MODE_SIZE (hfa_mode);
4128 int byte_size;
4129 int args_byte_size;
4131 /* If prototyped, pass it in FR regs then GR regs.
4132 If not prototyped, pass it in both FR and GR regs.
4134 If this is an SFmode aggregate, then it is possible to run out of
4135 FR regs while GR regs are still left. In that case, we pass the
4136 remaining part in the GR regs. */
4138 /* Fill the FP regs. We do this always. We stop if we reach the end
4139 of the argument, the last FP register, or the last argument slot. */
4141 byte_size = ((mode == BLKmode)
4142 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4143 args_byte_size = int_regs * UNITS_PER_WORD;
4144 offset = 0;
4145 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4146 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
4148 offset += hfa_size;
4149 args_byte_size += hfa_size;
4150 fp_regs++;
4153 cum->fp_regs = fp_regs;
4156 /* Integral and aggregates go in general registers. So do TFmode FP values.
4157 If we have run out of FR registers, then other FP values must also go in
4158 general registers. This can happen when we have a SFmode HFA. */
4159 else if (mode == TFmode || mode == TCmode
4160 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4161 cum->int_regs = cum->words;
4163 /* If there is a prototype, then FP values go in a FR register when
4164 named, and in a GR register when unnamed. */
4165 else if (cum->prototype)
4167 if (! named)
4168 cum->int_regs = cum->words;
4169 else
4170 /* ??? Complex types should not reach here. */
4171 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4173 /* If there is no prototype, then FP values go in both FR and GR
4174 registers. */
4175 else
4177 /* ??? Complex types should not reach here. */
4178 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4179 cum->int_regs = cum->words;
4183 /* Arguments with alignment larger than 8 bytes start at the next even
4184 boundary. On ILP32 HPUX, TFmode arguments start on next even boundary
4185 even though their normal alignment is 8 bytes. See ia64_function_arg. */
4188 ia64_function_arg_boundary (enum machine_mode mode, tree type)
4191 if (mode == TFmode && TARGET_HPUX && TARGET_ILP32)
4192 return PARM_BOUNDARY * 2;
4194 if (type)
4196 if (TYPE_ALIGN (type) > PARM_BOUNDARY)
4197 return PARM_BOUNDARY * 2;
4198 else
4199 return PARM_BOUNDARY;
4202 if (GET_MODE_BITSIZE (mode) > PARM_BOUNDARY)
4203 return PARM_BOUNDARY * 2;
4204 else
4205 return PARM_BOUNDARY;
4208 /* True if it is OK to do sibling call optimization for the specified
4209 call expression EXP. DECL will be the called function, or NULL if
4210 this is an indirect call. */
4211 static bool
4212 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
4214 /* We can't perform a sibcall if the current function has the syscall_linkage
4215 attribute. */
4216 if (lookup_attribute ("syscall_linkage",
4217 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
4218 return false;
4220 /* We must always return with our current GP. This means we can
4221 only sibcall to functions defined in the current module. */
4222 return decl && (*targetm.binds_local_p) (decl);
4226 /* Implement va_arg. */
4228 static tree
4229 ia64_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
4231 /* Variable sized types are passed by reference. */
4232 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
4234 tree ptrtype = build_pointer_type (type);
4235 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
4236 return build_va_arg_indirect_ref (addr);
4239 /* Aggregate arguments with alignment larger than 8 bytes start at
4240 the next even boundary. Integer and floating point arguments
4241 do so if they are larger than 8 bytes, whether or not they are
4242 also aligned larger than 8 bytes. */
4243 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
4244 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
4246 tree t = build2 (PLUS_EXPR, TREE_TYPE (valist), valist,
4247 build_int_cst (NULL_TREE, 2 * UNITS_PER_WORD - 1));
4248 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
4249 build_int_cst (NULL_TREE, -2 * UNITS_PER_WORD));
4250 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (valist), valist, t);
4251 gimplify_and_add (t, pre_p);
4254 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4257 /* Return 1 if function return value returned in memory. Return 0 if it is
4258 in a register. */
4260 static bool
4261 ia64_return_in_memory (tree valtype, tree fntype ATTRIBUTE_UNUSED)
4263 enum machine_mode mode;
4264 enum machine_mode hfa_mode;
4265 HOST_WIDE_INT byte_size;
4267 mode = TYPE_MODE (valtype);
4268 byte_size = GET_MODE_SIZE (mode);
4269 if (mode == BLKmode)
4271 byte_size = int_size_in_bytes (valtype);
4272 if (byte_size < 0)
4273 return true;
4276 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
4278 hfa_mode = hfa_element_mode (valtype, 0);
4279 if (hfa_mode != VOIDmode)
4281 int hfa_size = GET_MODE_SIZE (hfa_mode);
4283 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
4284 return true;
4285 else
4286 return false;
4288 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
4289 return true;
4290 else
4291 return false;
4294 /* Return rtx for register that holds the function return value. */
4297 ia64_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
4299 enum machine_mode mode;
4300 enum machine_mode hfa_mode;
4302 mode = TYPE_MODE (valtype);
4303 hfa_mode = hfa_element_mode (valtype, 0);
4305 if (hfa_mode != VOIDmode)
4307 rtx loc[8];
4308 int i;
4309 int hfa_size;
4310 int byte_size;
4311 int offset;
4313 hfa_size = GET_MODE_SIZE (hfa_mode);
4314 byte_size = ((mode == BLKmode)
4315 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
4316 offset = 0;
4317 for (i = 0; offset < byte_size; i++)
4319 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4320 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
4321 GEN_INT (offset));
4322 offset += hfa_size;
4324 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4326 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
4327 return gen_rtx_REG (mode, FR_ARG_FIRST);
4328 else
4330 bool need_parallel = false;
4332 /* In big-endian mode, we need to manage the layout of aggregates
4333 in the registers so that we get the bits properly aligned in
4334 the highpart of the registers. */
4335 if (BYTES_BIG_ENDIAN
4336 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
4337 need_parallel = true;
4339 /* Something like struct S { long double x; char a[0] } is not an
4340 HFA structure, and therefore doesn't go in fp registers. But
4341 the middle-end will give it XFmode anyway, and XFmode values
4342 don't normally fit in integer registers. So we need to smuggle
4343 the value inside a parallel. */
4344 else if (mode == XFmode || mode == XCmode || mode == RFmode)
4345 need_parallel = true;
4347 if (need_parallel)
4349 rtx loc[8];
4350 int offset;
4351 int bytesize;
4352 int i;
4354 offset = 0;
4355 bytesize = int_size_in_bytes (valtype);
4356 /* An empty PARALLEL is invalid here, but the return value
4357 doesn't matter for empty structs. */
4358 if (bytesize == 0)
4359 return gen_rtx_REG (mode, GR_RET_FIRST);
4360 for (i = 0; offset < bytesize; i++)
4362 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4363 gen_rtx_REG (DImode,
4364 GR_RET_FIRST + i),
4365 GEN_INT (offset));
4366 offset += UNITS_PER_WORD;
4368 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4371 return gen_rtx_REG (mode, GR_RET_FIRST);
4375 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4376 We need to emit DTP-relative relocations. */
4378 static void
4379 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
4381 gcc_assert (size == 4 || size == 8);
4382 if (size == 4)
4383 fputs ("\tdata4.ua\t@dtprel(", file);
4384 else
4385 fputs ("\tdata8.ua\t@dtprel(", file);
4386 output_addr_const (file, x);
4387 fputs (")", file);
4390 /* Print a memory address as an operand to reference that memory location. */
4392 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
4393 also call this from ia64_print_operand for memory addresses. */
4395 void
4396 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
4397 rtx address ATTRIBUTE_UNUSED)
4401 /* Print an operand to an assembler instruction.
4402 C Swap and print a comparison operator.
4403 D Print an FP comparison operator.
4404 E Print 32 - constant, for SImode shifts as extract.
4405 e Print 64 - constant, for DImode rotates.
4406 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
4407 a floating point register emitted normally.
4408 I Invert a predicate register by adding 1.
4409 J Select the proper predicate register for a condition.
4410 j Select the inverse predicate register for a condition.
4411 O Append .acq for volatile load.
4412 P Postincrement of a MEM.
4413 Q Append .rel for volatile store.
4414 S Shift amount for shladd instruction.
4415 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
4416 for Intel assembler.
4417 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
4418 for Intel assembler.
4419 X A pair of floating point registers.
4420 r Print register name, or constant 0 as r0. HP compatibility for
4421 Linux kernel.
4422 v Print vector constant value as an 8-byte integer value. */
4424 void
4425 ia64_print_operand (FILE * file, rtx x, int code)
4427 const char *str;
4429 switch (code)
4431 case 0:
4432 /* Handled below. */
4433 break;
4435 case 'C':
4437 enum rtx_code c = swap_condition (GET_CODE (x));
4438 fputs (GET_RTX_NAME (c), file);
4439 return;
4442 case 'D':
4443 switch (GET_CODE (x))
4445 case NE:
4446 str = "neq";
4447 break;
4448 case UNORDERED:
4449 str = "unord";
4450 break;
4451 case ORDERED:
4452 str = "ord";
4453 break;
4454 case UNLT:
4455 str = "nge";
4456 break;
4457 case UNLE:
4458 str = "ngt";
4459 break;
4460 case UNGT:
4461 str = "nle";
4462 break;
4463 case UNGE:
4464 str = "nlt";
4465 break;
4466 default:
4467 str = GET_RTX_NAME (GET_CODE (x));
4468 break;
4470 fputs (str, file);
4471 return;
4473 case 'E':
4474 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
4475 return;
4477 case 'e':
4478 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
4479 return;
4481 case 'F':
4482 if (x == CONST0_RTX (GET_MODE (x)))
4483 str = reg_names [FR_REG (0)];
4484 else if (x == CONST1_RTX (GET_MODE (x)))
4485 str = reg_names [FR_REG (1)];
4486 else
4488 gcc_assert (GET_CODE (x) == REG);
4489 str = reg_names [REGNO (x)];
4491 fputs (str, file);
4492 return;
4494 case 'I':
4495 fputs (reg_names [REGNO (x) + 1], file);
4496 return;
4498 case 'J':
4499 case 'j':
4501 unsigned int regno = REGNO (XEXP (x, 0));
4502 if (GET_CODE (x) == EQ)
4503 regno += 1;
4504 if (code == 'j')
4505 regno ^= 1;
4506 fputs (reg_names [regno], file);
4508 return;
4510 case 'O':
4511 if (MEM_VOLATILE_P (x))
4512 fputs(".acq", file);
4513 return;
4515 case 'P':
4517 HOST_WIDE_INT value;
4519 switch (GET_CODE (XEXP (x, 0)))
4521 default:
4522 return;
4524 case POST_MODIFY:
4525 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
4526 if (GET_CODE (x) == CONST_INT)
4527 value = INTVAL (x);
4528 else
4530 gcc_assert (GET_CODE (x) == REG);
4531 fprintf (file, ", %s", reg_names[REGNO (x)]);
4532 return;
4534 break;
4536 case POST_INC:
4537 value = GET_MODE_SIZE (GET_MODE (x));
4538 break;
4540 case POST_DEC:
4541 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
4542 break;
4545 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
4546 return;
4549 case 'Q':
4550 if (MEM_VOLATILE_P (x))
4551 fputs(".rel", file);
4552 return;
4554 case 'S':
4555 fprintf (file, "%d", exact_log2 (INTVAL (x)));
4556 return;
4558 case 'T':
4559 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4561 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
4562 return;
4564 break;
4566 case 'U':
4567 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4569 const char *prefix = "0x";
4570 if (INTVAL (x) & 0x80000000)
4572 fprintf (file, "0xffffffff");
4573 prefix = "";
4575 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
4576 return;
4578 break;
4580 case 'X':
4582 unsigned int regno = REGNO (x);
4583 fprintf (file, "%s, %s", reg_names [regno], reg_names [regno + 1]);
4585 return;
4587 case 'r':
4588 /* If this operand is the constant zero, write it as register zero.
4589 Any register, zero, or CONST_INT value is OK here. */
4590 if (GET_CODE (x) == REG)
4591 fputs (reg_names[REGNO (x)], file);
4592 else if (x == CONST0_RTX (GET_MODE (x)))
4593 fputs ("r0", file);
4594 else if (GET_CODE (x) == CONST_INT)
4595 output_addr_const (file, x);
4596 else
4597 output_operand_lossage ("invalid %%r value");
4598 return;
4600 case 'v':
4601 gcc_assert (GET_CODE (x) == CONST_VECTOR);
4602 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
4603 break;
4605 case '+':
4607 const char *which;
4609 /* For conditional branches, returns or calls, substitute
4610 sptk, dptk, dpnt, or spnt for %s. */
4611 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
4612 if (x)
4614 int pred_val = INTVAL (XEXP (x, 0));
4616 /* Guess top and bottom 10% statically predicted. */
4617 if (pred_val < REG_BR_PROB_BASE / 50
4618 && br_prob_note_reliable_p (x))
4619 which = ".spnt";
4620 else if (pred_val < REG_BR_PROB_BASE / 2)
4621 which = ".dpnt";
4622 else if (pred_val < REG_BR_PROB_BASE / 100 * 98
4623 || !br_prob_note_reliable_p (x))
4624 which = ".dptk";
4625 else
4626 which = ".sptk";
4628 else if (GET_CODE (current_output_insn) == CALL_INSN)
4629 which = ".sptk";
4630 else
4631 which = ".dptk";
4633 fputs (which, file);
4634 return;
4637 case ',':
4638 x = current_insn_predicate;
4639 if (x)
4641 unsigned int regno = REGNO (XEXP (x, 0));
4642 if (GET_CODE (x) == EQ)
4643 regno += 1;
4644 fprintf (file, "(%s) ", reg_names [regno]);
4646 return;
4648 default:
4649 output_operand_lossage ("ia64_print_operand: unknown code");
4650 return;
4653 switch (GET_CODE (x))
4655 /* This happens for the spill/restore instructions. */
4656 case POST_INC:
4657 case POST_DEC:
4658 case POST_MODIFY:
4659 x = XEXP (x, 0);
4660 /* ... fall through ... */
4662 case REG:
4663 fputs (reg_names [REGNO (x)], file);
4664 break;
4666 case MEM:
4668 rtx addr = XEXP (x, 0);
4669 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
4670 addr = XEXP (addr, 0);
4671 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
4672 break;
4675 default:
4676 output_addr_const (file, x);
4677 break;
4680 return;
4683 /* Compute a (partial) cost for rtx X. Return true if the complete
4684 cost has been computed, and false if subexpressions should be
4685 scanned. In either case, *TOTAL contains the cost result. */
4686 /* ??? This is incomplete. */
4688 static bool
4689 ia64_rtx_costs (rtx x, int code, int outer_code, int *total)
4691 switch (code)
4693 case CONST_INT:
4694 switch (outer_code)
4696 case SET:
4697 *total = satisfies_constraint_J (x) ? 0 : COSTS_N_INSNS (1);
4698 return true;
4699 case PLUS:
4700 if (satisfies_constraint_I (x))
4701 *total = 0;
4702 else if (satisfies_constraint_J (x))
4703 *total = 1;
4704 else
4705 *total = COSTS_N_INSNS (1);
4706 return true;
4707 default:
4708 if (satisfies_constraint_K (x) || satisfies_constraint_L (x))
4709 *total = 0;
4710 else
4711 *total = COSTS_N_INSNS (1);
4712 return true;
4715 case CONST_DOUBLE:
4716 *total = COSTS_N_INSNS (1);
4717 return true;
4719 case CONST:
4720 case SYMBOL_REF:
4721 case LABEL_REF:
4722 *total = COSTS_N_INSNS (3);
4723 return true;
4725 case MULT:
4726 /* For multiplies wider than HImode, we have to go to the FPU,
4727 which normally involves copies. Plus there's the latency
4728 of the multiply itself, and the latency of the instructions to
4729 transfer integer regs to FP regs. */
4730 /* ??? Check for FP mode. */
4731 if (GET_MODE_SIZE (GET_MODE (x)) > 2)
4732 *total = COSTS_N_INSNS (10);
4733 else
4734 *total = COSTS_N_INSNS (2);
4735 return true;
4737 case PLUS:
4738 case MINUS:
4739 case ASHIFT:
4740 case ASHIFTRT:
4741 case LSHIFTRT:
4742 *total = COSTS_N_INSNS (1);
4743 return true;
4745 case DIV:
4746 case UDIV:
4747 case MOD:
4748 case UMOD:
4749 /* We make divide expensive, so that divide-by-constant will be
4750 optimized to a multiply. */
4751 *total = COSTS_N_INSNS (60);
4752 return true;
4754 default:
4755 return false;
4759 /* Calculate the cost of moving data from a register in class FROM to
4760 one in class TO, using MODE. */
4763 ia64_register_move_cost (enum machine_mode mode, enum reg_class from,
4764 enum reg_class to)
4766 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
4767 if (to == ADDL_REGS)
4768 to = GR_REGS;
4769 if (from == ADDL_REGS)
4770 from = GR_REGS;
4772 /* All costs are symmetric, so reduce cases by putting the
4773 lower number class as the destination. */
4774 if (from < to)
4776 enum reg_class tmp = to;
4777 to = from, from = tmp;
4780 /* Moving from FR<->GR in XFmode must be more expensive than 2,
4781 so that we get secondary memory reloads. Between FR_REGS,
4782 we have to make this at least as expensive as MEMORY_MOVE_COST
4783 to avoid spectacularly poor register class preferencing. */
4784 if (mode == XFmode || mode == RFmode)
4786 if (to != GR_REGS || from != GR_REGS)
4787 return MEMORY_MOVE_COST (mode, to, 0);
4788 else
4789 return 3;
4792 switch (to)
4794 case PR_REGS:
4795 /* Moving between PR registers takes two insns. */
4796 if (from == PR_REGS)
4797 return 3;
4798 /* Moving between PR and anything but GR is impossible. */
4799 if (from != GR_REGS)
4800 return MEMORY_MOVE_COST (mode, to, 0);
4801 break;
4803 case BR_REGS:
4804 /* Moving between BR and anything but GR is impossible. */
4805 if (from != GR_REGS && from != GR_AND_BR_REGS)
4806 return MEMORY_MOVE_COST (mode, to, 0);
4807 break;
4809 case AR_I_REGS:
4810 case AR_M_REGS:
4811 /* Moving between AR and anything but GR is impossible. */
4812 if (from != GR_REGS)
4813 return MEMORY_MOVE_COST (mode, to, 0);
4814 break;
4816 case GR_REGS:
4817 case FR_REGS:
4818 case FP_REGS:
4819 case GR_AND_FR_REGS:
4820 case GR_AND_BR_REGS:
4821 case ALL_REGS:
4822 break;
4824 default:
4825 gcc_unreachable ();
4828 return 2;
4831 /* Implement PREFERRED_RELOAD_CLASS. Place additional restrictions on CLASS
4832 to use when copying X into that class. */
4834 enum reg_class
4835 ia64_preferred_reload_class (rtx x, enum reg_class class)
4837 switch (class)
4839 case FR_REGS:
4840 case FP_REGS:
4841 /* Don't allow volatile mem reloads into floating point registers.
4842 This is defined to force reload to choose the r/m case instead
4843 of the f/f case when reloading (set (reg fX) (mem/v)). */
4844 if (MEM_P (x) && MEM_VOLATILE_P (x))
4845 return NO_REGS;
4847 /* Force all unrecognized constants into the constant pool. */
4848 if (CONSTANT_P (x))
4849 return NO_REGS;
4850 break;
4852 case AR_M_REGS:
4853 case AR_I_REGS:
4854 if (!OBJECT_P (x))
4855 return NO_REGS;
4856 break;
4858 default:
4859 break;
4862 return class;
4865 /* This function returns the register class required for a secondary
4866 register when copying between one of the registers in CLASS, and X,
4867 using MODE. A return value of NO_REGS means that no secondary register
4868 is required. */
4870 enum reg_class
4871 ia64_secondary_reload_class (enum reg_class class,
4872 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
4874 int regno = -1;
4876 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
4877 regno = true_regnum (x);
4879 switch (class)
4881 case BR_REGS:
4882 case AR_M_REGS:
4883 case AR_I_REGS:
4884 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
4885 interaction. We end up with two pseudos with overlapping lifetimes
4886 both of which are equiv to the same constant, and both which need
4887 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
4888 changes depending on the path length, which means the qty_first_reg
4889 check in make_regs_eqv can give different answers at different times.
4890 At some point I'll probably need a reload_indi pattern to handle
4891 this.
4893 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
4894 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
4895 non-general registers for good measure. */
4896 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
4897 return GR_REGS;
4899 /* This is needed if a pseudo used as a call_operand gets spilled to a
4900 stack slot. */
4901 if (GET_CODE (x) == MEM)
4902 return GR_REGS;
4903 break;
4905 case FR_REGS:
4906 case FP_REGS:
4907 /* Need to go through general registers to get to other class regs. */
4908 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
4909 return GR_REGS;
4911 /* This can happen when a paradoxical subreg is an operand to the
4912 muldi3 pattern. */
4913 /* ??? This shouldn't be necessary after instruction scheduling is
4914 enabled, because paradoxical subregs are not accepted by
4915 register_operand when INSN_SCHEDULING is defined. Or alternatively,
4916 stop the paradoxical subreg stupidity in the *_operand functions
4917 in recog.c. */
4918 if (GET_CODE (x) == MEM
4919 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
4920 || GET_MODE (x) == QImode))
4921 return GR_REGS;
4923 /* This can happen because of the ior/and/etc patterns that accept FP
4924 registers as operands. If the third operand is a constant, then it
4925 needs to be reloaded into a FP register. */
4926 if (GET_CODE (x) == CONST_INT)
4927 return GR_REGS;
4929 /* This can happen because of register elimination in a muldi3 insn.
4930 E.g. `26107 * (unsigned long)&u'. */
4931 if (GET_CODE (x) == PLUS)
4932 return GR_REGS;
4933 break;
4935 case PR_REGS:
4936 /* ??? This happens if we cse/gcse a BImode value across a call,
4937 and the function has a nonlocal goto. This is because global
4938 does not allocate call crossing pseudos to hard registers when
4939 current_function_has_nonlocal_goto is true. This is relatively
4940 common for C++ programs that use exceptions. To reproduce,
4941 return NO_REGS and compile libstdc++. */
4942 if (GET_CODE (x) == MEM)
4943 return GR_REGS;
4945 /* This can happen when we take a BImode subreg of a DImode value,
4946 and that DImode value winds up in some non-GR register. */
4947 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
4948 return GR_REGS;
4949 break;
4951 default:
4952 break;
4955 return NO_REGS;
4959 /* Parse the -mfixed-range= option string. */
4961 static void
4962 fix_range (const char *const_str)
4964 int i, first, last;
4965 char *str, *dash, *comma;
4967 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
4968 REG2 are either register names or register numbers. The effect
4969 of this option is to mark the registers in the range from REG1 to
4970 REG2 as ``fixed'' so they won't be used by the compiler. This is
4971 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
4973 i = strlen (const_str);
4974 str = (char *) alloca (i + 1);
4975 memcpy (str, const_str, i + 1);
4977 while (1)
4979 dash = strchr (str, '-');
4980 if (!dash)
4982 warning (0, "value of -mfixed-range must have form REG1-REG2");
4983 return;
4985 *dash = '\0';
4987 comma = strchr (dash + 1, ',');
4988 if (comma)
4989 *comma = '\0';
4991 first = decode_reg_name (str);
4992 if (first < 0)
4994 warning (0, "unknown register name: %s", str);
4995 return;
4998 last = decode_reg_name (dash + 1);
4999 if (last < 0)
5001 warning (0, "unknown register name: %s", dash + 1);
5002 return;
5005 *dash = '-';
5007 if (first > last)
5009 warning (0, "%s-%s is an empty range", str, dash + 1);
5010 return;
5013 for (i = first; i <= last; ++i)
5014 fixed_regs[i] = call_used_regs[i] = 1;
5016 if (!comma)
5017 break;
5019 *comma = ',';
5020 str = comma + 1;
5024 /* Implement TARGET_HANDLE_OPTION. */
5026 static bool
5027 ia64_handle_option (size_t code, const char *arg, int value)
5029 switch (code)
5031 case OPT_mfixed_range_:
5032 fix_range (arg);
5033 return true;
5035 case OPT_mtls_size_:
5036 if (value != 14 && value != 22 && value != 64)
5037 error ("bad value %<%s%> for -mtls-size= switch", arg);
5038 return true;
5040 case OPT_mtune_:
5042 static struct pta
5044 const char *name; /* processor name or nickname. */
5045 enum processor_type processor;
5047 const processor_alias_table[] =
5049 {"itanium", PROCESSOR_ITANIUM},
5050 {"itanium1", PROCESSOR_ITANIUM},
5051 {"merced", PROCESSOR_ITANIUM},
5052 {"itanium2", PROCESSOR_ITANIUM2},
5053 {"mckinley", PROCESSOR_ITANIUM2},
5055 int const pta_size = ARRAY_SIZE (processor_alias_table);
5056 int i;
5058 for (i = 0; i < pta_size; i++)
5059 if (!strcmp (arg, processor_alias_table[i].name))
5061 ia64_tune = processor_alias_table[i].processor;
5062 break;
5064 if (i == pta_size)
5065 error ("bad value %<%s%> for -mtune= switch", arg);
5066 return true;
5069 default:
5070 return true;
5074 /* Implement OVERRIDE_OPTIONS. */
5076 void
5077 ia64_override_options (void)
5079 if (TARGET_AUTO_PIC)
5080 target_flags |= MASK_CONST_GP;
5082 if (TARGET_INLINE_SQRT == INL_MIN_LAT)
5084 warning (0, "not yet implemented: latency-optimized inline square root");
5085 TARGET_INLINE_SQRT = INL_MAX_THR;
5088 ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
5089 flag_schedule_insns_after_reload = 0;
5091 ia64_section_threshold = g_switch_set ? g_switch_value : IA64_DEFAULT_GVALUE;
5093 init_machine_status = ia64_init_machine_status;
5096 static struct machine_function *
5097 ia64_init_machine_status (void)
5099 return ggc_alloc_cleared (sizeof (struct machine_function));
5102 static enum attr_itanium_class ia64_safe_itanium_class (rtx);
5103 static enum attr_type ia64_safe_type (rtx);
5105 static enum attr_itanium_class
5106 ia64_safe_itanium_class (rtx insn)
5108 if (recog_memoized (insn) >= 0)
5109 return get_attr_itanium_class (insn);
5110 else
5111 return ITANIUM_CLASS_UNKNOWN;
5114 static enum attr_type
5115 ia64_safe_type (rtx insn)
5117 if (recog_memoized (insn) >= 0)
5118 return get_attr_type (insn);
5119 else
5120 return TYPE_UNKNOWN;
5123 /* The following collection of routines emit instruction group stop bits as
5124 necessary to avoid dependencies. */
5126 /* Need to track some additional registers as far as serialization is
5127 concerned so we can properly handle br.call and br.ret. We could
5128 make these registers visible to gcc, but since these registers are
5129 never explicitly used in gcc generated code, it seems wasteful to
5130 do so (plus it would make the call and return patterns needlessly
5131 complex). */
5132 #define REG_RP (BR_REG (0))
5133 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
5134 /* This is used for volatile asms which may require a stop bit immediately
5135 before and after them. */
5136 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
5137 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
5138 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
5140 /* For each register, we keep track of how it has been written in the
5141 current instruction group.
5143 If a register is written unconditionally (no qualifying predicate),
5144 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
5146 If a register is written if its qualifying predicate P is true, we
5147 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
5148 may be written again by the complement of P (P^1) and when this happens,
5149 WRITE_COUNT gets set to 2.
5151 The result of this is that whenever an insn attempts to write a register
5152 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
5154 If a predicate register is written by a floating-point insn, we set
5155 WRITTEN_BY_FP to true.
5157 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
5158 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
5160 struct reg_write_state
5162 unsigned int write_count : 2;
5163 unsigned int first_pred : 16;
5164 unsigned int written_by_fp : 1;
5165 unsigned int written_by_and : 1;
5166 unsigned int written_by_or : 1;
5169 /* Cumulative info for the current instruction group. */
5170 struct reg_write_state rws_sum[NUM_REGS];
5171 /* Info for the current instruction. This gets copied to rws_sum after a
5172 stop bit is emitted. */
5173 struct reg_write_state rws_insn[NUM_REGS];
5175 /* Indicates whether this is the first instruction after a stop bit,
5176 in which case we don't need another stop bit. Without this,
5177 ia64_variable_issue will die when scheduling an alloc. */
5178 static int first_instruction;
5180 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
5181 RTL for one instruction. */
5182 struct reg_flags
5184 unsigned int is_write : 1; /* Is register being written? */
5185 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
5186 unsigned int is_branch : 1; /* Is register used as part of a branch? */
5187 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
5188 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
5189 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
5192 static void rws_update (struct reg_write_state *, int, struct reg_flags, int);
5193 static int rws_access_regno (int, struct reg_flags, int);
5194 static int rws_access_reg (rtx, struct reg_flags, int);
5195 static void update_set_flags (rtx, struct reg_flags *);
5196 static int set_src_needs_barrier (rtx, struct reg_flags, int);
5197 static int rtx_needs_barrier (rtx, struct reg_flags, int);
5198 static void init_insn_group_barriers (void);
5199 static int group_barrier_needed (rtx);
5200 static int safe_group_barrier_needed (rtx);
5202 /* Update *RWS for REGNO, which is being written by the current instruction,
5203 with predicate PRED, and associated register flags in FLAGS. */
5205 static void
5206 rws_update (struct reg_write_state *rws, int regno, struct reg_flags flags, int pred)
5208 if (pred)
5209 rws[regno].write_count++;
5210 else
5211 rws[regno].write_count = 2;
5212 rws[regno].written_by_fp |= flags.is_fp;
5213 /* ??? Not tracking and/or across differing predicates. */
5214 rws[regno].written_by_and = flags.is_and;
5215 rws[regno].written_by_or = flags.is_or;
5216 rws[regno].first_pred = pred;
5219 /* Handle an access to register REGNO of type FLAGS using predicate register
5220 PRED. Update rws_insn and rws_sum arrays. Return 1 if this access creates
5221 a dependency with an earlier instruction in the same group. */
5223 static int
5224 rws_access_regno (int regno, struct reg_flags flags, int pred)
5226 int need_barrier = 0;
5228 gcc_assert (regno < NUM_REGS);
5230 if (! PR_REGNO_P (regno))
5231 flags.is_and = flags.is_or = 0;
5233 if (flags.is_write)
5235 int write_count;
5237 /* One insn writes same reg multiple times? */
5238 gcc_assert (!rws_insn[regno].write_count);
5240 /* Update info for current instruction. */
5241 rws_update (rws_insn, regno, flags, pred);
5242 write_count = rws_sum[regno].write_count;
5244 switch (write_count)
5246 case 0:
5247 /* The register has not been written yet. */
5248 rws_update (rws_sum, regno, flags, pred);
5249 break;
5251 case 1:
5252 /* The register has been written via a predicate. If this is
5253 not a complementary predicate, then we need a barrier. */
5254 /* ??? This assumes that P and P+1 are always complementary
5255 predicates for P even. */
5256 if (flags.is_and && rws_sum[regno].written_by_and)
5258 else if (flags.is_or && rws_sum[regno].written_by_or)
5260 else if ((rws_sum[regno].first_pred ^ 1) != pred)
5261 need_barrier = 1;
5262 rws_update (rws_sum, regno, flags, pred);
5263 break;
5265 case 2:
5266 /* The register has been unconditionally written already. We
5267 need a barrier. */
5268 if (flags.is_and && rws_sum[regno].written_by_and)
5270 else if (flags.is_or && rws_sum[regno].written_by_or)
5272 else
5273 need_barrier = 1;
5274 rws_sum[regno].written_by_and = flags.is_and;
5275 rws_sum[regno].written_by_or = flags.is_or;
5276 break;
5278 default:
5279 gcc_unreachable ();
5282 else
5284 if (flags.is_branch)
5286 /* Branches have several RAW exceptions that allow to avoid
5287 barriers. */
5289 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
5290 /* RAW dependencies on branch regs are permissible as long
5291 as the writer is a non-branch instruction. Since we
5292 never generate code that uses a branch register written
5293 by a branch instruction, handling this case is
5294 easy. */
5295 return 0;
5297 if (REGNO_REG_CLASS (regno) == PR_REGS
5298 && ! rws_sum[regno].written_by_fp)
5299 /* The predicates of a branch are available within the
5300 same insn group as long as the predicate was written by
5301 something other than a floating-point instruction. */
5302 return 0;
5305 if (flags.is_and && rws_sum[regno].written_by_and)
5306 return 0;
5307 if (flags.is_or && rws_sum[regno].written_by_or)
5308 return 0;
5310 switch (rws_sum[regno].write_count)
5312 case 0:
5313 /* The register has not been written yet. */
5314 break;
5316 case 1:
5317 /* The register has been written via a predicate. If this is
5318 not a complementary predicate, then we need a barrier. */
5319 /* ??? This assumes that P and P+1 are always complementary
5320 predicates for P even. */
5321 if ((rws_sum[regno].first_pred ^ 1) != pred)
5322 need_barrier = 1;
5323 break;
5325 case 2:
5326 /* The register has been unconditionally written already. We
5327 need a barrier. */
5328 need_barrier = 1;
5329 break;
5331 default:
5332 gcc_unreachable ();
5336 return need_barrier;
5339 static int
5340 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
5342 int regno = REGNO (reg);
5343 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
5345 if (n == 1)
5346 return rws_access_regno (regno, flags, pred);
5347 else
5349 int need_barrier = 0;
5350 while (--n >= 0)
5351 need_barrier |= rws_access_regno (regno + n, flags, pred);
5352 return need_barrier;
5356 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
5357 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
5359 static void
5360 update_set_flags (rtx x, struct reg_flags *pflags)
5362 rtx src = SET_SRC (x);
5364 switch (GET_CODE (src))
5366 case CALL:
5367 return;
5369 case IF_THEN_ELSE:
5370 /* There are four cases here:
5371 (1) The destination is (pc), in which case this is a branch,
5372 nothing here applies.
5373 (2) The destination is ar.lc, in which case this is a
5374 doloop_end_internal,
5375 (3) The destination is an fp register, in which case this is
5376 an fselect instruction.
5377 (4) The condition has (unspec [(reg)] UNSPEC_LDC), in which case
5378 this is a check load.
5379 In all cases, nothing we do in this function applies. */
5380 return;
5382 default:
5383 if (COMPARISON_P (src)
5384 && SCALAR_FLOAT_MODE_P (GET_MODE (XEXP (src, 0))))
5385 /* Set pflags->is_fp to 1 so that we know we're dealing
5386 with a floating point comparison when processing the
5387 destination of the SET. */
5388 pflags->is_fp = 1;
5390 /* Discover if this is a parallel comparison. We only handle
5391 and.orcm and or.andcm at present, since we must retain a
5392 strict inverse on the predicate pair. */
5393 else if (GET_CODE (src) == AND)
5394 pflags->is_and = 1;
5395 else if (GET_CODE (src) == IOR)
5396 pflags->is_or = 1;
5398 break;
5402 /* Subroutine of rtx_needs_barrier; this function determines whether the
5403 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
5404 are as in rtx_needs_barrier. COND is an rtx that holds the condition
5405 for this insn. */
5407 static int
5408 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred)
5410 int need_barrier = 0;
5411 rtx dst;
5412 rtx src = SET_SRC (x);
5414 if (GET_CODE (src) == CALL)
5415 /* We don't need to worry about the result registers that
5416 get written by subroutine call. */
5417 return rtx_needs_barrier (src, flags, pred);
5418 else if (SET_DEST (x) == pc_rtx)
5420 /* X is a conditional branch. */
5421 /* ??? This seems redundant, as the caller sets this bit for
5422 all JUMP_INSNs. */
5423 if (!ia64_spec_check_src_p (src))
5424 flags.is_branch = 1;
5425 return rtx_needs_barrier (src, flags, pred);
5428 if (ia64_spec_check_src_p (src))
5429 /* Avoid checking one register twice (in condition
5430 and in 'then' section) for ldc pattern. */
5432 gcc_assert (REG_P (XEXP (src, 2)));
5433 need_barrier = rtx_needs_barrier (XEXP (src, 2), flags, pred);
5435 /* We process MEM below. */
5436 src = XEXP (src, 1);
5439 need_barrier |= rtx_needs_barrier (src, flags, pred);
5441 dst = SET_DEST (x);
5442 if (GET_CODE (dst) == ZERO_EXTRACT)
5444 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
5445 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
5447 return need_barrier;
5450 /* Handle an access to rtx X of type FLAGS using predicate register
5451 PRED. Return 1 if this access creates a dependency with an earlier
5452 instruction in the same group. */
5454 static int
5455 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
5457 int i, j;
5458 int is_complemented = 0;
5459 int need_barrier = 0;
5460 const char *format_ptr;
5461 struct reg_flags new_flags;
5462 rtx cond;
5464 if (! x)
5465 return 0;
5467 new_flags = flags;
5469 switch (GET_CODE (x))
5471 case SET:
5472 update_set_flags (x, &new_flags);
5473 need_barrier = set_src_needs_barrier (x, new_flags, pred);
5474 if (GET_CODE (SET_SRC (x)) != CALL)
5476 new_flags.is_write = 1;
5477 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
5479 break;
5481 case CALL:
5482 new_flags.is_write = 0;
5483 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5485 /* Avoid multiple register writes, in case this is a pattern with
5486 multiple CALL rtx. This avoids a failure in rws_access_reg. */
5487 if (! flags.is_sibcall && ! rws_insn[REG_AR_CFM].write_count)
5489 new_flags.is_write = 1;
5490 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
5491 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
5492 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5494 break;
5496 case COND_EXEC:
5497 /* X is a predicated instruction. */
5499 cond = COND_EXEC_TEST (x);
5500 gcc_assert (!pred);
5501 need_barrier = rtx_needs_barrier (cond, flags, 0);
5503 if (GET_CODE (cond) == EQ)
5504 is_complemented = 1;
5505 cond = XEXP (cond, 0);
5506 gcc_assert (GET_CODE (cond) == REG
5507 && REGNO_REG_CLASS (REGNO (cond)) == PR_REGS);
5508 pred = REGNO (cond);
5509 if (is_complemented)
5510 ++pred;
5512 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
5513 return need_barrier;
5515 case CLOBBER:
5516 case USE:
5517 /* Clobber & use are for earlier compiler-phases only. */
5518 break;
5520 case ASM_OPERANDS:
5521 case ASM_INPUT:
5522 /* We always emit stop bits for traditional asms. We emit stop bits
5523 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
5524 if (GET_CODE (x) != ASM_OPERANDS
5525 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
5527 /* Avoid writing the register multiple times if we have multiple
5528 asm outputs. This avoids a failure in rws_access_reg. */
5529 if (! rws_insn[REG_VOLATILE].write_count)
5531 new_flags.is_write = 1;
5532 rws_access_regno (REG_VOLATILE, new_flags, pred);
5534 return 1;
5537 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
5538 We cannot just fall through here since then we would be confused
5539 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
5540 traditional asms unlike their normal usage. */
5542 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
5543 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
5544 need_barrier = 1;
5545 break;
5547 case PARALLEL:
5548 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5550 rtx pat = XVECEXP (x, 0, i);
5551 switch (GET_CODE (pat))
5553 case SET:
5554 update_set_flags (pat, &new_flags);
5555 need_barrier |= set_src_needs_barrier (pat, new_flags, pred);
5556 break;
5558 case USE:
5559 case CALL:
5560 case ASM_OPERANDS:
5561 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5562 break;
5564 case CLOBBER:
5565 case RETURN:
5566 break;
5568 default:
5569 gcc_unreachable ();
5572 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5574 rtx pat = XVECEXP (x, 0, i);
5575 if (GET_CODE (pat) == SET)
5577 if (GET_CODE (SET_SRC (pat)) != CALL)
5579 new_flags.is_write = 1;
5580 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
5581 pred);
5584 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
5585 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5587 break;
5589 case SUBREG:
5590 need_barrier |= rtx_needs_barrier (SUBREG_REG (x), flags, pred);
5591 break;
5592 case REG:
5593 if (REGNO (x) == AR_UNAT_REGNUM)
5595 for (i = 0; i < 64; ++i)
5596 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
5598 else
5599 need_barrier = rws_access_reg (x, flags, pred);
5600 break;
5602 case MEM:
5603 /* Find the regs used in memory address computation. */
5604 new_flags.is_write = 0;
5605 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5606 break;
5608 case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR:
5609 case SYMBOL_REF: case LABEL_REF: case CONST:
5610 break;
5612 /* Operators with side-effects. */
5613 case POST_INC: case POST_DEC:
5614 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
5616 new_flags.is_write = 0;
5617 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5618 new_flags.is_write = 1;
5619 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5620 break;
5622 case POST_MODIFY:
5623 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
5625 new_flags.is_write = 0;
5626 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5627 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5628 new_flags.is_write = 1;
5629 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5630 break;
5632 /* Handle common unary and binary ops for efficiency. */
5633 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
5634 case MOD: case UDIV: case UMOD: case AND: case IOR:
5635 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
5636 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
5637 case NE: case EQ: case GE: case GT: case LE:
5638 case LT: case GEU: case GTU: case LEU: case LTU:
5639 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5640 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5641 break;
5643 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
5644 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
5645 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
5646 case SQRT: case FFS: case POPCOUNT:
5647 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5648 break;
5650 case VEC_SELECT:
5651 /* VEC_SELECT's second argument is a PARALLEL with integers that
5652 describe the elements selected. On ia64, those integers are
5653 always constants. Avoid walking the PARALLEL so that we don't
5654 get confused with "normal" parallels and then die. */
5655 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5656 break;
5658 case UNSPEC:
5659 switch (XINT (x, 1))
5661 case UNSPEC_LTOFF_DTPMOD:
5662 case UNSPEC_LTOFF_DTPREL:
5663 case UNSPEC_DTPREL:
5664 case UNSPEC_LTOFF_TPREL:
5665 case UNSPEC_TPREL:
5666 case UNSPEC_PRED_REL_MUTEX:
5667 case UNSPEC_PIC_CALL:
5668 case UNSPEC_MF:
5669 case UNSPEC_FETCHADD_ACQ:
5670 case UNSPEC_BSP_VALUE:
5671 case UNSPEC_FLUSHRS:
5672 case UNSPEC_BUNDLE_SELECTOR:
5673 break;
5675 case UNSPEC_GR_SPILL:
5676 case UNSPEC_GR_RESTORE:
5678 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
5679 HOST_WIDE_INT bit = (offset >> 3) & 63;
5681 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5682 new_flags.is_write = (XINT (x, 1) == UNSPEC_GR_SPILL);
5683 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
5684 new_flags, pred);
5685 break;
5688 case UNSPEC_FR_SPILL:
5689 case UNSPEC_FR_RESTORE:
5690 case UNSPEC_GETF_EXP:
5691 case UNSPEC_SETF_EXP:
5692 case UNSPEC_ADDP4:
5693 case UNSPEC_FR_SQRT_RECIP_APPROX:
5694 case UNSPEC_LDA:
5695 case UNSPEC_LDS:
5696 case UNSPEC_LDSA:
5697 case UNSPEC_CHKACLR:
5698 case UNSPEC_CHKS:
5699 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5700 break;
5702 case UNSPEC_FR_RECIP_APPROX:
5703 case UNSPEC_SHRP:
5704 case UNSPEC_COPYSIGN:
5705 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5706 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5707 break;
5709 case UNSPEC_CMPXCHG_ACQ:
5710 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5711 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
5712 break;
5714 default:
5715 gcc_unreachable ();
5717 break;
5719 case UNSPEC_VOLATILE:
5720 switch (XINT (x, 1))
5722 case UNSPECV_ALLOC:
5723 /* Alloc must always be the first instruction of a group.
5724 We force this by always returning true. */
5725 /* ??? We might get better scheduling if we explicitly check for
5726 input/local/output register dependencies, and modify the
5727 scheduler so that alloc is always reordered to the start of
5728 the current group. We could then eliminate all of the
5729 first_instruction code. */
5730 rws_access_regno (AR_PFS_REGNUM, flags, pred);
5732 new_flags.is_write = 1;
5733 rws_access_regno (REG_AR_CFM, new_flags, pred);
5734 return 1;
5736 case UNSPECV_SET_BSP:
5737 need_barrier = 1;
5738 break;
5740 case UNSPECV_BLOCKAGE:
5741 case UNSPECV_INSN_GROUP_BARRIER:
5742 case UNSPECV_BREAK:
5743 case UNSPECV_PSAC_ALL:
5744 case UNSPECV_PSAC_NORMAL:
5745 return 0;
5747 default:
5748 gcc_unreachable ();
5750 break;
5752 case RETURN:
5753 new_flags.is_write = 0;
5754 need_barrier = rws_access_regno (REG_RP, flags, pred);
5755 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
5757 new_flags.is_write = 1;
5758 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5759 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5760 break;
5762 default:
5763 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
5764 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5765 switch (format_ptr[i])
5767 case '0': /* unused field */
5768 case 'i': /* integer */
5769 case 'n': /* note */
5770 case 'w': /* wide integer */
5771 case 's': /* pointer to string */
5772 case 'S': /* optional pointer to string */
5773 break;
5775 case 'e':
5776 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
5777 need_barrier = 1;
5778 break;
5780 case 'E':
5781 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
5782 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
5783 need_barrier = 1;
5784 break;
5786 default:
5787 gcc_unreachable ();
5789 break;
5791 return need_barrier;
5794 /* Clear out the state for group_barrier_needed at the start of a
5795 sequence of insns. */
5797 static void
5798 init_insn_group_barriers (void)
5800 memset (rws_sum, 0, sizeof (rws_sum));
5801 first_instruction = 1;
5804 /* Given the current state, determine whether a group barrier (a stop bit) is
5805 necessary before INSN. Return nonzero if so. This modifies the state to
5806 include the effects of INSN as a side-effect. */
5808 static int
5809 group_barrier_needed (rtx insn)
5811 rtx pat;
5812 int need_barrier = 0;
5813 struct reg_flags flags;
5815 memset (&flags, 0, sizeof (flags));
5816 switch (GET_CODE (insn))
5818 case NOTE:
5819 break;
5821 case BARRIER:
5822 /* A barrier doesn't imply an instruction group boundary. */
5823 break;
5825 case CODE_LABEL:
5826 memset (rws_insn, 0, sizeof (rws_insn));
5827 return 1;
5829 case CALL_INSN:
5830 flags.is_branch = 1;
5831 flags.is_sibcall = SIBLING_CALL_P (insn);
5832 memset (rws_insn, 0, sizeof (rws_insn));
5834 /* Don't bundle a call following another call. */
5835 if ((pat = prev_active_insn (insn))
5836 && GET_CODE (pat) == CALL_INSN)
5838 need_barrier = 1;
5839 break;
5842 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
5843 break;
5845 case JUMP_INSN:
5846 if (!ia64_spec_check_p (insn))
5847 flags.is_branch = 1;
5849 /* Don't bundle a jump following a call. */
5850 if ((pat = prev_active_insn (insn))
5851 && GET_CODE (pat) == CALL_INSN)
5853 need_barrier = 1;
5854 break;
5856 /* FALLTHRU */
5858 case INSN:
5859 if (GET_CODE (PATTERN (insn)) == USE
5860 || GET_CODE (PATTERN (insn)) == CLOBBER)
5861 /* Don't care about USE and CLOBBER "insns"---those are used to
5862 indicate to the optimizer that it shouldn't get rid of
5863 certain operations. */
5864 break;
5866 pat = PATTERN (insn);
5868 /* Ug. Hack hacks hacked elsewhere. */
5869 switch (recog_memoized (insn))
5871 /* We play dependency tricks with the epilogue in order
5872 to get proper schedules. Undo this for dv analysis. */
5873 case CODE_FOR_epilogue_deallocate_stack:
5874 case CODE_FOR_prologue_allocate_stack:
5875 pat = XVECEXP (pat, 0, 0);
5876 break;
5878 /* The pattern we use for br.cloop confuses the code above.
5879 The second element of the vector is representative. */
5880 case CODE_FOR_doloop_end_internal:
5881 pat = XVECEXP (pat, 0, 1);
5882 break;
5884 /* Doesn't generate code. */
5885 case CODE_FOR_pred_rel_mutex:
5886 case CODE_FOR_prologue_use:
5887 return 0;
5889 default:
5890 break;
5893 memset (rws_insn, 0, sizeof (rws_insn));
5894 need_barrier = rtx_needs_barrier (pat, flags, 0);
5896 /* Check to see if the previous instruction was a volatile
5897 asm. */
5898 if (! need_barrier)
5899 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
5900 break;
5902 default:
5903 gcc_unreachable ();
5906 if (first_instruction && INSN_P (insn)
5907 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
5908 && GET_CODE (PATTERN (insn)) != USE
5909 && GET_CODE (PATTERN (insn)) != CLOBBER)
5911 need_barrier = 0;
5912 first_instruction = 0;
5915 return need_barrier;
5918 /* Like group_barrier_needed, but do not clobber the current state. */
5920 static int
5921 safe_group_barrier_needed (rtx insn)
5923 struct reg_write_state rws_saved[NUM_REGS];
5924 int saved_first_instruction;
5925 int t;
5927 memcpy (rws_saved, rws_sum, NUM_REGS * sizeof *rws_saved);
5928 saved_first_instruction = first_instruction;
5930 t = group_barrier_needed (insn);
5932 memcpy (rws_sum, rws_saved, NUM_REGS * sizeof *rws_saved);
5933 first_instruction = saved_first_instruction;
5935 return t;
5938 /* Scan the current function and insert stop bits as necessary to
5939 eliminate dependencies. This function assumes that a final
5940 instruction scheduling pass has been run which has already
5941 inserted most of the necessary stop bits. This function only
5942 inserts new ones at basic block boundaries, since these are
5943 invisible to the scheduler. */
5945 static void
5946 emit_insn_group_barriers (FILE *dump)
5948 rtx insn;
5949 rtx last_label = 0;
5950 int insns_since_last_label = 0;
5952 init_insn_group_barriers ();
5954 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5956 if (GET_CODE (insn) == CODE_LABEL)
5958 if (insns_since_last_label)
5959 last_label = insn;
5960 insns_since_last_label = 0;
5962 else if (GET_CODE (insn) == NOTE
5963 && NOTE_KIND (insn) == NOTE_INSN_BASIC_BLOCK)
5965 if (insns_since_last_label)
5966 last_label = insn;
5967 insns_since_last_label = 0;
5969 else if (GET_CODE (insn) == INSN
5970 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
5971 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
5973 init_insn_group_barriers ();
5974 last_label = 0;
5976 else if (INSN_P (insn))
5978 insns_since_last_label = 1;
5980 if (group_barrier_needed (insn))
5982 if (last_label)
5984 if (dump)
5985 fprintf (dump, "Emitting stop before label %d\n",
5986 INSN_UID (last_label));
5987 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
5988 insn = last_label;
5990 init_insn_group_barriers ();
5991 last_label = 0;
5998 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
5999 This function has to emit all necessary group barriers. */
6001 static void
6002 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
6004 rtx insn;
6006 init_insn_group_barriers ();
6008 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6010 if (GET_CODE (insn) == BARRIER)
6012 rtx last = prev_active_insn (insn);
6014 if (! last)
6015 continue;
6016 if (GET_CODE (last) == JUMP_INSN
6017 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
6018 last = prev_active_insn (last);
6019 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
6020 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
6022 init_insn_group_barriers ();
6024 else if (INSN_P (insn))
6026 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
6027 init_insn_group_barriers ();
6028 else if (group_barrier_needed (insn))
6030 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
6031 init_insn_group_barriers ();
6032 group_barrier_needed (insn);
6040 /* Instruction scheduling support. */
6042 #define NR_BUNDLES 10
6044 /* A list of names of all available bundles. */
6046 static const char *bundle_name [NR_BUNDLES] =
6048 ".mii",
6049 ".mmi",
6050 ".mfi",
6051 ".mmf",
6052 #if NR_BUNDLES == 10
6053 ".bbb",
6054 ".mbb",
6055 #endif
6056 ".mib",
6057 ".mmb",
6058 ".mfb",
6059 ".mlx"
6062 /* Nonzero if we should insert stop bits into the schedule. */
6064 int ia64_final_schedule = 0;
6066 /* Codes of the corresponding queried units: */
6068 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
6069 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
6071 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
6072 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
6074 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
6076 /* The following variable value is an insn group barrier. */
6078 static rtx dfa_stop_insn;
6080 /* The following variable value is the last issued insn. */
6082 static rtx last_scheduled_insn;
6084 /* The following variable value is size of the DFA state. */
6086 static size_t dfa_state_size;
6088 /* The following variable value is pointer to a DFA state used as
6089 temporary variable. */
6091 static state_t temp_dfa_state = NULL;
6093 /* The following variable value is DFA state after issuing the last
6094 insn. */
6096 static state_t prev_cycle_state = NULL;
6098 /* The following array element values are TRUE if the corresponding
6099 insn requires to add stop bits before it. */
6101 static char *stops_p = NULL;
6103 /* The following array element values are ZERO for non-speculative
6104 instructions and hold corresponding speculation check number for
6105 speculative instructions. */
6106 static int *spec_check_no = NULL;
6108 /* Size of spec_check_no array. */
6109 static int max_uid = 0;
6111 /* The following variable is used to set up the mentioned above array. */
6113 static int stop_before_p = 0;
6115 /* The following variable value is length of the arrays `clocks' and
6116 `add_cycles'. */
6118 static int clocks_length;
6120 /* The following array element values are cycles on which the
6121 corresponding insn will be issued. The array is used only for
6122 Itanium1. */
6124 static int *clocks;
6126 /* The following array element values are numbers of cycles should be
6127 added to improve insn scheduling for MM_insns for Itanium1. */
6129 static int *add_cycles;
6131 /* The following variable value is number of data speculations in progress. */
6132 static int pending_data_specs = 0;
6134 static rtx ia64_single_set (rtx);
6135 static void ia64_emit_insn_before (rtx, rtx);
6137 /* Map a bundle number to its pseudo-op. */
6139 const char *
6140 get_bundle_name (int b)
6142 return bundle_name[b];
6146 /* Return the maximum number of instructions a cpu can issue. */
6148 static int
6149 ia64_issue_rate (void)
6151 return 6;
6154 /* Helper function - like single_set, but look inside COND_EXEC. */
6156 static rtx
6157 ia64_single_set (rtx insn)
6159 rtx x = PATTERN (insn), ret;
6160 if (GET_CODE (x) == COND_EXEC)
6161 x = COND_EXEC_CODE (x);
6162 if (GET_CODE (x) == SET)
6163 return x;
6165 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
6166 Although they are not classical single set, the second set is there just
6167 to protect it from moving past FP-relative stack accesses. */
6168 switch (recog_memoized (insn))
6170 case CODE_FOR_prologue_allocate_stack:
6171 case CODE_FOR_epilogue_deallocate_stack:
6172 ret = XVECEXP (x, 0, 0);
6173 break;
6175 default:
6176 ret = single_set_2 (insn, x);
6177 break;
6180 return ret;
6183 /* Adjust the cost of a scheduling dependency. Return the new cost of
6184 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
6186 static int
6187 ia64_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
6189 enum attr_itanium_class dep_class;
6190 enum attr_itanium_class insn_class;
6192 if (REG_NOTE_KIND (link) != REG_DEP_OUTPUT)
6193 return cost;
6195 insn_class = ia64_safe_itanium_class (insn);
6196 dep_class = ia64_safe_itanium_class (dep_insn);
6197 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
6198 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
6199 return 0;
6201 return cost;
6204 /* Like emit_insn_before, but skip cycle_display notes.
6205 ??? When cycle display notes are implemented, update this. */
6207 static void
6208 ia64_emit_insn_before (rtx insn, rtx before)
6210 emit_insn_before (insn, before);
6213 /* The following function marks insns who produce addresses for load
6214 and store insns. Such insns will be placed into M slots because it
6215 decrease latency time for Itanium1 (see function
6216 `ia64_produce_address_p' and the DFA descriptions). */
6218 static void
6219 ia64_dependencies_evaluation_hook (rtx head, rtx tail)
6221 rtx insn, next, next_tail;
6223 /* Before reload, which_alternative is not set, which means that
6224 ia64_safe_itanium_class will produce wrong results for (at least)
6225 move instructions. */
6226 if (!reload_completed)
6227 return;
6229 next_tail = NEXT_INSN (tail);
6230 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6231 if (INSN_P (insn))
6232 insn->call = 0;
6233 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6234 if (INSN_P (insn)
6235 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
6237 dep_link_t link;
6239 FOR_EACH_DEP_LINK (link, INSN_FORW_DEPS (insn))
6241 enum attr_itanium_class c;
6243 if (DEP_LINK_KIND (link) != REG_DEP_TRUE)
6244 continue;
6246 next = DEP_LINK_CON (link);
6247 c = ia64_safe_itanium_class (next);
6248 if ((c == ITANIUM_CLASS_ST
6249 || c == ITANIUM_CLASS_STF)
6250 && ia64_st_address_bypass_p (insn, next))
6251 break;
6252 else if ((c == ITANIUM_CLASS_LD
6253 || c == ITANIUM_CLASS_FLD
6254 || c == ITANIUM_CLASS_FLDP)
6255 && ia64_ld_address_bypass_p (insn, next))
6256 break;
6258 insn->call = link != 0;
6262 /* We're beginning a new block. Initialize data structures as necessary. */
6264 static void
6265 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
6266 int sched_verbose ATTRIBUTE_UNUSED,
6267 int max_ready ATTRIBUTE_UNUSED)
6269 #ifdef ENABLE_CHECKING
6270 rtx insn;
6272 if (reload_completed)
6273 for (insn = NEXT_INSN (current_sched_info->prev_head);
6274 insn != current_sched_info->next_tail;
6275 insn = NEXT_INSN (insn))
6276 gcc_assert (!SCHED_GROUP_P (insn));
6277 #endif
6278 last_scheduled_insn = NULL_RTX;
6279 init_insn_group_barriers ();
6282 /* We're beginning a scheduling pass. Check assertion. */
6284 static void
6285 ia64_sched_init_global (FILE *dump ATTRIBUTE_UNUSED,
6286 int sched_verbose ATTRIBUTE_UNUSED,
6287 int max_ready ATTRIBUTE_UNUSED)
6289 gcc_assert (!pending_data_specs);
6292 /* Scheduling pass is now finished. Free/reset static variable. */
6293 static void
6294 ia64_sched_finish_global (FILE *dump ATTRIBUTE_UNUSED,
6295 int sched_verbose ATTRIBUTE_UNUSED)
6297 free (spec_check_no);
6298 spec_check_no = 0;
6299 max_uid = 0;
6302 /* We are about to being issuing insns for this clock cycle.
6303 Override the default sort algorithm to better slot instructions. */
6305 static int
6306 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
6307 int *pn_ready, int clock_var ATTRIBUTE_UNUSED,
6308 int reorder_type)
6310 int n_asms;
6311 int n_ready = *pn_ready;
6312 rtx *e_ready = ready + n_ready;
6313 rtx *insnp;
6315 if (sched_verbose)
6316 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
6318 if (reorder_type == 0)
6320 /* First, move all USEs, CLOBBERs and other crud out of the way. */
6321 n_asms = 0;
6322 for (insnp = ready; insnp < e_ready; insnp++)
6323 if (insnp < e_ready)
6325 rtx insn = *insnp;
6326 enum attr_type t = ia64_safe_type (insn);
6327 if (t == TYPE_UNKNOWN)
6329 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6330 || asm_noperands (PATTERN (insn)) >= 0)
6332 rtx lowest = ready[n_asms];
6333 ready[n_asms] = insn;
6334 *insnp = lowest;
6335 n_asms++;
6337 else
6339 rtx highest = ready[n_ready - 1];
6340 ready[n_ready - 1] = insn;
6341 *insnp = highest;
6342 return 1;
6347 if (n_asms < n_ready)
6349 /* Some normal insns to process. Skip the asms. */
6350 ready += n_asms;
6351 n_ready -= n_asms;
6353 else if (n_ready > 0)
6354 return 1;
6357 if (ia64_final_schedule)
6359 int deleted = 0;
6360 int nr_need_stop = 0;
6362 for (insnp = ready; insnp < e_ready; insnp++)
6363 if (safe_group_barrier_needed (*insnp))
6364 nr_need_stop++;
6366 if (reorder_type == 1 && n_ready == nr_need_stop)
6367 return 0;
6368 if (reorder_type == 0)
6369 return 1;
6370 insnp = e_ready;
6371 /* Move down everything that needs a stop bit, preserving
6372 relative order. */
6373 while (insnp-- > ready + deleted)
6374 while (insnp >= ready + deleted)
6376 rtx insn = *insnp;
6377 if (! safe_group_barrier_needed (insn))
6378 break;
6379 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
6380 *ready = insn;
6381 deleted++;
6383 n_ready -= deleted;
6384 ready += deleted;
6387 return 1;
6390 /* We are about to being issuing insns for this clock cycle. Override
6391 the default sort algorithm to better slot instructions. */
6393 static int
6394 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
6395 int clock_var)
6397 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
6398 pn_ready, clock_var, 0);
6401 /* Like ia64_sched_reorder, but called after issuing each insn.
6402 Override the default sort algorithm to better slot instructions. */
6404 static int
6405 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
6406 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
6407 int *pn_ready, int clock_var)
6409 if (ia64_tune == PROCESSOR_ITANIUM && reload_completed && last_scheduled_insn)
6410 clocks [INSN_UID (last_scheduled_insn)] = clock_var;
6411 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
6412 clock_var, 1);
6415 /* We are about to issue INSN. Return the number of insns left on the
6416 ready queue that can be issued this cycle. */
6418 static int
6419 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
6420 int sched_verbose ATTRIBUTE_UNUSED,
6421 rtx insn ATTRIBUTE_UNUSED,
6422 int can_issue_more ATTRIBUTE_UNUSED)
6424 if (current_sched_info->flags & DO_SPECULATION)
6425 /* Modulo scheduling does not extend h_i_d when emitting
6426 new instructions. Deal with it. */
6428 if (DONE_SPEC (insn) & BEGIN_DATA)
6429 pending_data_specs++;
6430 if (CHECK_SPEC (insn) & BEGIN_DATA)
6431 pending_data_specs--;
6434 last_scheduled_insn = insn;
6435 memcpy (prev_cycle_state, curr_state, dfa_state_size);
6436 if (reload_completed)
6438 int needed = group_barrier_needed (insn);
6440 gcc_assert (!needed);
6441 if (GET_CODE (insn) == CALL_INSN)
6442 init_insn_group_barriers ();
6443 stops_p [INSN_UID (insn)] = stop_before_p;
6444 stop_before_p = 0;
6446 return 1;
6449 /* We are choosing insn from the ready queue. Return nonzero if INSN
6450 can be chosen. */
6452 static int
6453 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn)
6455 gcc_assert (insn && INSN_P (insn));
6456 return ((!reload_completed
6457 || !safe_group_barrier_needed (insn))
6458 && ia64_first_cycle_multipass_dfa_lookahead_guard_spec (insn));
6461 /* We are choosing insn from the ready queue. Return nonzero if INSN
6462 can be chosen. */
6464 static bool
6465 ia64_first_cycle_multipass_dfa_lookahead_guard_spec (rtx insn)
6467 gcc_assert (insn && INSN_P (insn));
6468 /* Size of ALAT is 32. As far as we perform conservative data speculation,
6469 we keep ALAT half-empty. */
6470 return (pending_data_specs < 16
6471 || !(TODO_SPEC (insn) & BEGIN_DATA));
6474 /* The following variable value is pseudo-insn used by the DFA insn
6475 scheduler to change the DFA state when the simulated clock is
6476 increased. */
6478 static rtx dfa_pre_cycle_insn;
6480 /* We are about to being issuing INSN. Return nonzero if we cannot
6481 issue it on given cycle CLOCK and return zero if we should not sort
6482 the ready queue on the next clock start. */
6484 static int
6485 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
6486 int clock, int *sort_p)
6488 int setup_clocks_p = FALSE;
6490 gcc_assert (insn && INSN_P (insn));
6491 if ((reload_completed && safe_group_barrier_needed (insn))
6492 || (last_scheduled_insn
6493 && (GET_CODE (last_scheduled_insn) == CALL_INSN
6494 || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6495 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
6497 init_insn_group_barriers ();
6498 if (verbose && dump)
6499 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
6500 last_clock == clock ? " + cycle advance" : "");
6501 stop_before_p = 1;
6502 if (last_clock == clock)
6504 state_transition (curr_state, dfa_stop_insn);
6505 if (TARGET_EARLY_STOP_BITS)
6506 *sort_p = (last_scheduled_insn == NULL_RTX
6507 || GET_CODE (last_scheduled_insn) != CALL_INSN);
6508 else
6509 *sort_p = 0;
6510 return 1;
6512 else if (reload_completed)
6513 setup_clocks_p = TRUE;
6514 if (GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6515 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)
6516 state_reset (curr_state);
6517 else
6519 memcpy (curr_state, prev_cycle_state, dfa_state_size);
6520 state_transition (curr_state, dfa_stop_insn);
6521 state_transition (curr_state, dfa_pre_cycle_insn);
6522 state_transition (curr_state, NULL);
6525 else if (reload_completed)
6526 setup_clocks_p = TRUE;
6527 if (setup_clocks_p && ia64_tune == PROCESSOR_ITANIUM
6528 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6529 && asm_noperands (PATTERN (insn)) < 0)
6531 enum attr_itanium_class c = ia64_safe_itanium_class (insn);
6533 if (c != ITANIUM_CLASS_MMMUL && c != ITANIUM_CLASS_MMSHF)
6535 dep_link_t link;
6536 int d = -1;
6538 FOR_EACH_DEP_LINK (link, INSN_BACK_DEPS (insn))
6539 if (DEP_LINK_KIND (link) == REG_DEP_TRUE)
6541 enum attr_itanium_class dep_class;
6542 rtx dep_insn = DEP_LINK_PRO (link);
6544 dep_class = ia64_safe_itanium_class (dep_insn);
6545 if ((dep_class == ITANIUM_CLASS_MMMUL
6546 || dep_class == ITANIUM_CLASS_MMSHF)
6547 && last_clock - clocks [INSN_UID (dep_insn)] < 4
6548 && (d < 0
6549 || last_clock - clocks [INSN_UID (dep_insn)] < d))
6550 d = last_clock - clocks [INSN_UID (dep_insn)];
6552 if (d >= 0)
6553 add_cycles [INSN_UID (insn)] = 3 - d;
6556 return 0;
6559 /* Implement targetm.sched.h_i_d_extended hook.
6560 Extend internal data structures. */
6561 static void
6562 ia64_h_i_d_extended (void)
6564 if (current_sched_info->flags & DO_SPECULATION)
6566 int new_max_uid = get_max_uid () + 1;
6568 spec_check_no = xrecalloc (spec_check_no, new_max_uid,
6569 max_uid, sizeof (*spec_check_no));
6570 max_uid = new_max_uid;
6573 if (stops_p != NULL)
6575 int new_clocks_length = get_max_uid () + 1;
6577 stops_p = xrecalloc (stops_p, new_clocks_length, clocks_length, 1);
6579 if (ia64_tune == PROCESSOR_ITANIUM)
6581 clocks = xrecalloc (clocks, new_clocks_length, clocks_length,
6582 sizeof (int));
6583 add_cycles = xrecalloc (add_cycles, new_clocks_length, clocks_length,
6584 sizeof (int));
6587 clocks_length = new_clocks_length;
6591 /* Constants that help mapping 'enum machine_mode' to int. */
6592 enum SPEC_MODES
6594 SPEC_MODE_INVALID = -1,
6595 SPEC_MODE_FIRST = 0,
6596 SPEC_MODE_FOR_EXTEND_FIRST = 1,
6597 SPEC_MODE_FOR_EXTEND_LAST = 3,
6598 SPEC_MODE_LAST = 8
6601 /* Return index of the MODE. */
6602 static int
6603 ia64_mode_to_int (enum machine_mode mode)
6605 switch (mode)
6607 case BImode: return 0; /* SPEC_MODE_FIRST */
6608 case QImode: return 1; /* SPEC_MODE_FOR_EXTEND_FIRST */
6609 case HImode: return 2;
6610 case SImode: return 3; /* SPEC_MODE_FOR_EXTEND_LAST */
6611 case DImode: return 4;
6612 case SFmode: return 5;
6613 case DFmode: return 6;
6614 case XFmode: return 7;
6615 case TImode:
6616 /* ??? This mode needs testing. Bypasses for ldfp8 instruction are not
6617 mentioned in itanium[12].md. Predicate fp_register_operand also
6618 needs to be defined. Bottom line: better disable for now. */
6619 return SPEC_MODE_INVALID;
6620 default: return SPEC_MODE_INVALID;
6624 /* Provide information about speculation capabilities. */
6625 static void
6626 ia64_set_sched_flags (spec_info_t spec_info)
6628 unsigned int *flags = &(current_sched_info->flags);
6630 if (*flags & SCHED_RGN
6631 || *flags & SCHED_EBB)
6633 int mask = 0;
6635 if ((mflag_sched_br_data_spec && !reload_completed && optimize > 0)
6636 || (mflag_sched_ar_data_spec && reload_completed))
6638 mask |= BEGIN_DATA;
6640 if ((mflag_sched_br_in_data_spec && !reload_completed)
6641 || (mflag_sched_ar_in_data_spec && reload_completed))
6642 mask |= BE_IN_DATA;
6645 if (mflag_sched_control_spec)
6647 mask |= BEGIN_CONTROL;
6649 if (mflag_sched_in_control_spec)
6650 mask |= BE_IN_CONTROL;
6653 gcc_assert (*flags & USE_GLAT);
6655 if (mask)
6657 *flags |= USE_DEPS_LIST | DETACH_LIFE_INFO | DO_SPECULATION;
6659 spec_info->mask = mask;
6660 spec_info->flags = 0;
6662 if ((mask & DATA_SPEC) && mflag_sched_prefer_non_data_spec_insns)
6663 spec_info->flags |= PREFER_NON_DATA_SPEC;
6665 if ((mask & CONTROL_SPEC)
6666 && mflag_sched_prefer_non_control_spec_insns)
6667 spec_info->flags |= PREFER_NON_CONTROL_SPEC;
6669 if (mflag_sched_spec_verbose)
6671 if (sched_verbose >= 1)
6672 spec_info->dump = sched_dump;
6673 else
6674 spec_info->dump = stderr;
6676 else
6677 spec_info->dump = 0;
6679 if (mflag_sched_count_spec_in_critical_path)
6680 spec_info->flags |= COUNT_SPEC_IN_CRITICAL_PATH;
6685 /* Implement targetm.sched.speculate_insn hook.
6686 Check if the INSN can be TS speculative.
6687 If 'no' - return -1.
6688 If 'yes' - generate speculative pattern in the NEW_PAT and return 1.
6689 If current pattern of the INSN already provides TS speculation, return 0. */
6690 static int
6691 ia64_speculate_insn (rtx insn, ds_t ts, rtx *new_pat)
6693 rtx pat, reg, mem, mem_reg;
6694 int mode_no, gen_p = 1;
6695 bool extend_p;
6697 gcc_assert (!(ts & ~BEGIN_SPEC) && ts);
6699 pat = PATTERN (insn);
6701 if (GET_CODE (pat) == COND_EXEC)
6702 pat = COND_EXEC_CODE (pat);
6704 /* This should be a SET ... */
6705 if (GET_CODE (pat) != SET)
6706 return -1;
6708 reg = SET_DEST (pat);
6709 /* ... to the general/fp register ... */
6710 if (!REG_P (reg) || !(GR_REGNO_P (REGNO (reg)) || FP_REGNO_P (REGNO (reg))))
6711 return -1;
6713 /* ... from the mem ... */
6714 mem = SET_SRC (pat);
6716 /* ... that can, possibly, be a zero_extend ... */
6717 if (GET_CODE (mem) == ZERO_EXTEND)
6719 mem = XEXP (mem, 0);
6720 extend_p = true;
6722 else
6723 extend_p = false;
6725 /* ... or a speculative load. */
6726 if (GET_CODE (mem) == UNSPEC)
6728 int code;
6730 code = XINT (mem, 1);
6731 if (code != UNSPEC_LDA && code != UNSPEC_LDS && code != UNSPEC_LDSA)
6732 return -1;
6734 if ((code == UNSPEC_LDA && !(ts & BEGIN_CONTROL))
6735 || (code == UNSPEC_LDS && !(ts & BEGIN_DATA))
6736 || code == UNSPEC_LDSA)
6737 gen_p = 0;
6739 mem = XVECEXP (mem, 0, 0);
6740 gcc_assert (MEM_P (mem));
6743 /* Source should be a mem ... */
6744 if (!MEM_P (mem))
6745 return -1;
6747 /* ... addressed by a register. */
6748 mem_reg = XEXP (mem, 0);
6749 if (!REG_P (mem_reg))
6750 return -1;
6752 /* We should use MEM's mode since REG's mode in presence of ZERO_EXTEND
6753 will always be DImode. */
6754 mode_no = ia64_mode_to_int (GET_MODE (mem));
6756 if (mode_no == SPEC_MODE_INVALID
6757 || (extend_p
6758 && !(SPEC_MODE_FOR_EXTEND_FIRST <= mode_no
6759 && mode_no <= SPEC_MODE_FOR_EXTEND_LAST)))
6760 return -1;
6762 extract_insn_cached (insn);
6763 gcc_assert (reg == recog_data.operand[0] && mem == recog_data.operand[1]);
6765 *new_pat = ia64_gen_spec_insn (insn, ts, mode_no, gen_p != 0, extend_p);
6767 return gen_p;
6770 enum
6772 /* Offset to reach ZERO_EXTEND patterns. */
6773 SPEC_GEN_EXTEND_OFFSET = SPEC_MODE_LAST - SPEC_MODE_FOR_EXTEND_FIRST + 1,
6774 /* Number of patterns for each speculation mode. */
6775 SPEC_N = (SPEC_MODE_LAST
6776 + SPEC_MODE_FOR_EXTEND_LAST - SPEC_MODE_FOR_EXTEND_FIRST + 2)
6779 enum SPEC_GEN_LD_MAP
6781 /* Offset to ld.a patterns. */
6782 SPEC_GEN_A = 0 * SPEC_N,
6783 /* Offset to ld.s patterns. */
6784 SPEC_GEN_S = 1 * SPEC_N,
6785 /* Offset to ld.sa patterns. */
6786 SPEC_GEN_SA = 2 * SPEC_N,
6787 /* Offset to ld.sa patterns. For this patterns corresponding ld.c will
6788 mutate to chk.s. */
6789 SPEC_GEN_SA_FOR_S = 3 * SPEC_N
6792 /* These offsets are used to get (4 * SPEC_N). */
6793 enum SPEC_GEN_CHECK_OFFSET
6795 SPEC_GEN_CHKA_FOR_A_OFFSET = 4 * SPEC_N - SPEC_GEN_A,
6796 SPEC_GEN_CHKA_FOR_SA_OFFSET = 4 * SPEC_N - SPEC_GEN_SA
6799 /* If GEN_P is true, calculate the index of needed speculation check and return
6800 speculative pattern for INSN with speculative mode TS, machine mode
6801 MODE_NO and with ZERO_EXTEND (if EXTEND_P is true).
6802 If GEN_P is false, just calculate the index of needed speculation check. */
6803 static rtx
6804 ia64_gen_spec_insn (rtx insn, ds_t ts, int mode_no, bool gen_p, bool extend_p)
6806 rtx pat, new_pat;
6807 int load_no;
6808 int shift = 0;
6810 static rtx (* const gen_load[]) (rtx, rtx) = {
6811 gen_movbi_advanced,
6812 gen_movqi_advanced,
6813 gen_movhi_advanced,
6814 gen_movsi_advanced,
6815 gen_movdi_advanced,
6816 gen_movsf_advanced,
6817 gen_movdf_advanced,
6818 gen_movxf_advanced,
6819 gen_movti_advanced,
6820 gen_zero_extendqidi2_advanced,
6821 gen_zero_extendhidi2_advanced,
6822 gen_zero_extendsidi2_advanced,
6824 gen_movbi_speculative,
6825 gen_movqi_speculative,
6826 gen_movhi_speculative,
6827 gen_movsi_speculative,
6828 gen_movdi_speculative,
6829 gen_movsf_speculative,
6830 gen_movdf_speculative,
6831 gen_movxf_speculative,
6832 gen_movti_speculative,
6833 gen_zero_extendqidi2_speculative,
6834 gen_zero_extendhidi2_speculative,
6835 gen_zero_extendsidi2_speculative,
6837 gen_movbi_speculative_advanced,
6838 gen_movqi_speculative_advanced,
6839 gen_movhi_speculative_advanced,
6840 gen_movsi_speculative_advanced,
6841 gen_movdi_speculative_advanced,
6842 gen_movsf_speculative_advanced,
6843 gen_movdf_speculative_advanced,
6844 gen_movxf_speculative_advanced,
6845 gen_movti_speculative_advanced,
6846 gen_zero_extendqidi2_speculative_advanced,
6847 gen_zero_extendhidi2_speculative_advanced,
6848 gen_zero_extendsidi2_speculative_advanced,
6850 gen_movbi_speculative_advanced,
6851 gen_movqi_speculative_advanced,
6852 gen_movhi_speculative_advanced,
6853 gen_movsi_speculative_advanced,
6854 gen_movdi_speculative_advanced,
6855 gen_movsf_speculative_advanced,
6856 gen_movdf_speculative_advanced,
6857 gen_movxf_speculative_advanced,
6858 gen_movti_speculative_advanced,
6859 gen_zero_extendqidi2_speculative_advanced,
6860 gen_zero_extendhidi2_speculative_advanced,
6861 gen_zero_extendsidi2_speculative_advanced
6864 load_no = extend_p ? mode_no + SPEC_GEN_EXTEND_OFFSET : mode_no;
6866 if (ts & BEGIN_DATA)
6868 /* We don't need recovery because even if this is ld.sa
6869 ALAT entry will be allocated only if NAT bit is set to zero.
6870 So it is enough to use ld.c here. */
6872 if (ts & BEGIN_CONTROL)
6874 load_no += SPEC_GEN_SA;
6876 if (!mflag_sched_ldc)
6877 shift = SPEC_GEN_CHKA_FOR_SA_OFFSET;
6879 else
6881 load_no += SPEC_GEN_A;
6883 if (!mflag_sched_ldc)
6884 shift = SPEC_GEN_CHKA_FOR_A_OFFSET;
6887 else if (ts & BEGIN_CONTROL)
6889 /* ld.sa can be used instead of ld.s to avoid basic block splitting. */
6890 if (!mflag_control_ldc)
6891 load_no += SPEC_GEN_S;
6892 else
6894 gcc_assert (mflag_sched_ldc);
6895 load_no += SPEC_GEN_SA_FOR_S;
6898 else
6899 gcc_unreachable ();
6901 /* Set the desired check index. We add '1', because zero element in this
6902 array means, that instruction with such uid is non-speculative. */
6903 spec_check_no[INSN_UID (insn)] = load_no + shift + 1;
6905 if (!gen_p)
6906 return 0;
6908 new_pat = gen_load[load_no] (copy_rtx (recog_data.operand[0]),
6909 copy_rtx (recog_data.operand[1]));
6911 pat = PATTERN (insn);
6912 if (GET_CODE (pat) == COND_EXEC)
6913 new_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx
6914 (COND_EXEC_TEST (pat)), new_pat);
6916 return new_pat;
6919 /* Offset to branchy checks. */
6920 enum { SPEC_GEN_CHECK_MUTATION_OFFSET = 5 * SPEC_N };
6922 /* Return nonzero, if INSN needs branchy recovery check. */
6923 static bool
6924 ia64_needs_block_p (rtx insn)
6926 int check_no;
6928 check_no = spec_check_no[INSN_UID(insn)] - 1;
6929 gcc_assert (0 <= check_no && check_no < SPEC_GEN_CHECK_MUTATION_OFFSET);
6931 return ((SPEC_GEN_S <= check_no && check_no < SPEC_GEN_S + SPEC_N)
6932 || (4 * SPEC_N <= check_no && check_no < 4 * SPEC_N + SPEC_N));
6935 /* Generate (or regenerate, if (MUTATE_P)) recovery check for INSN.
6936 If (LABEL != 0 || MUTATE_P), generate branchy recovery check.
6937 Otherwise, generate a simple check. */
6938 static rtx
6939 ia64_gen_check (rtx insn, rtx label, bool mutate_p)
6941 rtx op1, pat, check_pat;
6943 static rtx (* const gen_check[]) (rtx, rtx) = {
6944 gen_movbi_clr,
6945 gen_movqi_clr,
6946 gen_movhi_clr,
6947 gen_movsi_clr,
6948 gen_movdi_clr,
6949 gen_movsf_clr,
6950 gen_movdf_clr,
6951 gen_movxf_clr,
6952 gen_movti_clr,
6953 gen_zero_extendqidi2_clr,
6954 gen_zero_extendhidi2_clr,
6955 gen_zero_extendsidi2_clr,
6957 gen_speculation_check_bi,
6958 gen_speculation_check_qi,
6959 gen_speculation_check_hi,
6960 gen_speculation_check_si,
6961 gen_speculation_check_di,
6962 gen_speculation_check_sf,
6963 gen_speculation_check_df,
6964 gen_speculation_check_xf,
6965 gen_speculation_check_ti,
6966 gen_speculation_check_di,
6967 gen_speculation_check_di,
6968 gen_speculation_check_di,
6970 gen_movbi_clr,
6971 gen_movqi_clr,
6972 gen_movhi_clr,
6973 gen_movsi_clr,
6974 gen_movdi_clr,
6975 gen_movsf_clr,
6976 gen_movdf_clr,
6977 gen_movxf_clr,
6978 gen_movti_clr,
6979 gen_zero_extendqidi2_clr,
6980 gen_zero_extendhidi2_clr,
6981 gen_zero_extendsidi2_clr,
6983 gen_movbi_clr,
6984 gen_movqi_clr,
6985 gen_movhi_clr,
6986 gen_movsi_clr,
6987 gen_movdi_clr,
6988 gen_movsf_clr,
6989 gen_movdf_clr,
6990 gen_movxf_clr,
6991 gen_movti_clr,
6992 gen_zero_extendqidi2_clr,
6993 gen_zero_extendhidi2_clr,
6994 gen_zero_extendsidi2_clr,
6996 gen_advanced_load_check_clr_bi,
6997 gen_advanced_load_check_clr_qi,
6998 gen_advanced_load_check_clr_hi,
6999 gen_advanced_load_check_clr_si,
7000 gen_advanced_load_check_clr_di,
7001 gen_advanced_load_check_clr_sf,
7002 gen_advanced_load_check_clr_df,
7003 gen_advanced_load_check_clr_xf,
7004 gen_advanced_load_check_clr_ti,
7005 gen_advanced_load_check_clr_di,
7006 gen_advanced_load_check_clr_di,
7007 gen_advanced_load_check_clr_di,
7009 /* Following checks are generated during mutation. */
7010 gen_advanced_load_check_clr_bi,
7011 gen_advanced_load_check_clr_qi,
7012 gen_advanced_load_check_clr_hi,
7013 gen_advanced_load_check_clr_si,
7014 gen_advanced_load_check_clr_di,
7015 gen_advanced_load_check_clr_sf,
7016 gen_advanced_load_check_clr_df,
7017 gen_advanced_load_check_clr_xf,
7018 gen_advanced_load_check_clr_ti,
7019 gen_advanced_load_check_clr_di,
7020 gen_advanced_load_check_clr_di,
7021 gen_advanced_load_check_clr_di,
7023 0,0,0,0,0,0,0,0,0,0,0,0,
7025 gen_advanced_load_check_clr_bi,
7026 gen_advanced_load_check_clr_qi,
7027 gen_advanced_load_check_clr_hi,
7028 gen_advanced_load_check_clr_si,
7029 gen_advanced_load_check_clr_di,
7030 gen_advanced_load_check_clr_sf,
7031 gen_advanced_load_check_clr_df,
7032 gen_advanced_load_check_clr_xf,
7033 gen_advanced_load_check_clr_ti,
7034 gen_advanced_load_check_clr_di,
7035 gen_advanced_load_check_clr_di,
7036 gen_advanced_load_check_clr_di,
7038 gen_speculation_check_bi,
7039 gen_speculation_check_qi,
7040 gen_speculation_check_hi,
7041 gen_speculation_check_si,
7042 gen_speculation_check_di,
7043 gen_speculation_check_sf,
7044 gen_speculation_check_df,
7045 gen_speculation_check_xf,
7046 gen_speculation_check_ti,
7047 gen_speculation_check_di,
7048 gen_speculation_check_di,
7049 gen_speculation_check_di
7052 extract_insn_cached (insn);
7054 if (label)
7056 gcc_assert (mutate_p || ia64_needs_block_p (insn));
7057 op1 = label;
7059 else
7061 gcc_assert (!mutate_p && !ia64_needs_block_p (insn));
7062 op1 = copy_rtx (recog_data.operand[1]);
7065 if (mutate_p)
7066 /* INSN is ld.c.
7067 Find the speculation check number by searching for original
7068 speculative load in the RESOLVED_DEPS list of INSN.
7069 As long as patterns are unique for each instruction, this can be
7070 accomplished by matching ORIG_PAT fields. */
7072 dep_link_t link;
7073 int check_no = 0;
7074 rtx orig_pat = ORIG_PAT (insn);
7076 FOR_EACH_DEP_LINK (link, INSN_RESOLVED_BACK_DEPS (insn))
7078 rtx x = DEP_LINK_PRO (link);
7080 if (ORIG_PAT (x) == orig_pat)
7081 check_no = spec_check_no[INSN_UID (x)];
7083 gcc_assert (check_no);
7085 spec_check_no[INSN_UID (insn)] = (check_no
7086 + SPEC_GEN_CHECK_MUTATION_OFFSET);
7089 check_pat = (gen_check[spec_check_no[INSN_UID (insn)] - 1]
7090 (copy_rtx (recog_data.operand[0]), op1));
7092 pat = PATTERN (insn);
7093 if (GET_CODE (pat) == COND_EXEC)
7094 check_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
7095 check_pat);
7097 return check_pat;
7100 /* Return nonzero, if X is branchy recovery check. */
7101 static int
7102 ia64_spec_check_p (rtx x)
7104 x = PATTERN (x);
7105 if (GET_CODE (x) == COND_EXEC)
7106 x = COND_EXEC_CODE (x);
7107 if (GET_CODE (x) == SET)
7108 return ia64_spec_check_src_p (SET_SRC (x));
7109 return 0;
7112 /* Return nonzero, if SRC belongs to recovery check. */
7113 static int
7114 ia64_spec_check_src_p (rtx src)
7116 if (GET_CODE (src) == IF_THEN_ELSE)
7118 rtx t;
7120 t = XEXP (src, 0);
7121 if (GET_CODE (t) == NE)
7123 t = XEXP (t, 0);
7125 if (GET_CODE (t) == UNSPEC)
7127 int code;
7129 code = XINT (t, 1);
7131 if (code == UNSPEC_CHKACLR
7132 || code == UNSPEC_CHKS
7133 || code == UNSPEC_LDCCLR)
7135 gcc_assert (code != 0);
7136 return code;
7141 return 0;
7145 /* The following page contains abstract data `bundle states' which are
7146 used for bundling insns (inserting nops and template generation). */
7148 /* The following describes state of insn bundling. */
7150 struct bundle_state
7152 /* Unique bundle state number to identify them in the debugging
7153 output */
7154 int unique_num;
7155 rtx insn; /* corresponding insn, NULL for the 1st and the last state */
7156 /* number nops before and after the insn */
7157 short before_nops_num, after_nops_num;
7158 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
7159 insn */
7160 int cost; /* cost of the state in cycles */
7161 int accumulated_insns_num; /* number of all previous insns including
7162 nops. L is considered as 2 insns */
7163 int branch_deviation; /* deviation of previous branches from 3rd slots */
7164 struct bundle_state *next; /* next state with the same insn_num */
7165 struct bundle_state *originator; /* originator (previous insn state) */
7166 /* All bundle states are in the following chain. */
7167 struct bundle_state *allocated_states_chain;
7168 /* The DFA State after issuing the insn and the nops. */
7169 state_t dfa_state;
7172 /* The following is map insn number to the corresponding bundle state. */
7174 static struct bundle_state **index_to_bundle_states;
7176 /* The unique number of next bundle state. */
7178 static int bundle_states_num;
7180 /* All allocated bundle states are in the following chain. */
7182 static struct bundle_state *allocated_bundle_states_chain;
7184 /* All allocated but not used bundle states are in the following
7185 chain. */
7187 static struct bundle_state *free_bundle_state_chain;
7190 /* The following function returns a free bundle state. */
7192 static struct bundle_state *
7193 get_free_bundle_state (void)
7195 struct bundle_state *result;
7197 if (free_bundle_state_chain != NULL)
7199 result = free_bundle_state_chain;
7200 free_bundle_state_chain = result->next;
7202 else
7204 result = xmalloc (sizeof (struct bundle_state));
7205 result->dfa_state = xmalloc (dfa_state_size);
7206 result->allocated_states_chain = allocated_bundle_states_chain;
7207 allocated_bundle_states_chain = result;
7209 result->unique_num = bundle_states_num++;
7210 return result;
7214 /* The following function frees given bundle state. */
7216 static void
7217 free_bundle_state (struct bundle_state *state)
7219 state->next = free_bundle_state_chain;
7220 free_bundle_state_chain = state;
7223 /* Start work with abstract data `bundle states'. */
7225 static void
7226 initiate_bundle_states (void)
7228 bundle_states_num = 0;
7229 free_bundle_state_chain = NULL;
7230 allocated_bundle_states_chain = NULL;
7233 /* Finish work with abstract data `bundle states'. */
7235 static void
7236 finish_bundle_states (void)
7238 struct bundle_state *curr_state, *next_state;
7240 for (curr_state = allocated_bundle_states_chain;
7241 curr_state != NULL;
7242 curr_state = next_state)
7244 next_state = curr_state->allocated_states_chain;
7245 free (curr_state->dfa_state);
7246 free (curr_state);
7250 /* Hash table of the bundle states. The key is dfa_state and insn_num
7251 of the bundle states. */
7253 static htab_t bundle_state_table;
7255 /* The function returns hash of BUNDLE_STATE. */
7257 static unsigned
7258 bundle_state_hash (const void *bundle_state)
7260 const struct bundle_state *state = (struct bundle_state *) bundle_state;
7261 unsigned result, i;
7263 for (result = i = 0; i < dfa_state_size; i++)
7264 result += (((unsigned char *) state->dfa_state) [i]
7265 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
7266 return result + state->insn_num;
7269 /* The function returns nonzero if the bundle state keys are equal. */
7271 static int
7272 bundle_state_eq_p (const void *bundle_state_1, const void *bundle_state_2)
7274 const struct bundle_state * state1 = (struct bundle_state *) bundle_state_1;
7275 const struct bundle_state * state2 = (struct bundle_state *) bundle_state_2;
7277 return (state1->insn_num == state2->insn_num
7278 && memcmp (state1->dfa_state, state2->dfa_state,
7279 dfa_state_size) == 0);
7282 /* The function inserts the BUNDLE_STATE into the hash table. The
7283 function returns nonzero if the bundle has been inserted into the
7284 table. The table contains the best bundle state with given key. */
7286 static int
7287 insert_bundle_state (struct bundle_state *bundle_state)
7289 void **entry_ptr;
7291 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, 1);
7292 if (*entry_ptr == NULL)
7294 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
7295 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
7296 *entry_ptr = (void *) bundle_state;
7297 return TRUE;
7299 else if (bundle_state->cost < ((struct bundle_state *) *entry_ptr)->cost
7300 || (bundle_state->cost == ((struct bundle_state *) *entry_ptr)->cost
7301 && (((struct bundle_state *)*entry_ptr)->accumulated_insns_num
7302 > bundle_state->accumulated_insns_num
7303 || (((struct bundle_state *)
7304 *entry_ptr)->accumulated_insns_num
7305 == bundle_state->accumulated_insns_num
7306 && ((struct bundle_state *)
7307 *entry_ptr)->branch_deviation
7308 > bundle_state->branch_deviation))))
7311 struct bundle_state temp;
7313 temp = *(struct bundle_state *) *entry_ptr;
7314 *(struct bundle_state *) *entry_ptr = *bundle_state;
7315 ((struct bundle_state *) *entry_ptr)->next = temp.next;
7316 *bundle_state = temp;
7318 return FALSE;
7321 /* Start work with the hash table. */
7323 static void
7324 initiate_bundle_state_table (void)
7326 bundle_state_table = htab_create (50, bundle_state_hash, bundle_state_eq_p,
7327 (htab_del) 0);
7330 /* Finish work with the hash table. */
7332 static void
7333 finish_bundle_state_table (void)
7335 htab_delete (bundle_state_table);
7340 /* The following variable is a insn `nop' used to check bundle states
7341 with different number of inserted nops. */
7343 static rtx ia64_nop;
7345 /* The following function tries to issue NOPS_NUM nops for the current
7346 state without advancing processor cycle. If it failed, the
7347 function returns FALSE and frees the current state. */
7349 static int
7350 try_issue_nops (struct bundle_state *curr_state, int nops_num)
7352 int i;
7354 for (i = 0; i < nops_num; i++)
7355 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
7357 free_bundle_state (curr_state);
7358 return FALSE;
7360 return TRUE;
7363 /* The following function tries to issue INSN for the current
7364 state without advancing processor cycle. If it failed, the
7365 function returns FALSE and frees the current state. */
7367 static int
7368 try_issue_insn (struct bundle_state *curr_state, rtx insn)
7370 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
7372 free_bundle_state (curr_state);
7373 return FALSE;
7375 return TRUE;
7378 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
7379 starting with ORIGINATOR without advancing processor cycle. If
7380 TRY_BUNDLE_END_P is TRUE, the function also/only (if
7381 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
7382 If it was successful, the function creates new bundle state and
7383 insert into the hash table and into `index_to_bundle_states'. */
7385 static void
7386 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
7387 rtx insn, int try_bundle_end_p, int only_bundle_end_p)
7389 struct bundle_state *curr_state;
7391 curr_state = get_free_bundle_state ();
7392 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
7393 curr_state->insn = insn;
7394 curr_state->insn_num = originator->insn_num + 1;
7395 curr_state->cost = originator->cost;
7396 curr_state->originator = originator;
7397 curr_state->before_nops_num = before_nops_num;
7398 curr_state->after_nops_num = 0;
7399 curr_state->accumulated_insns_num
7400 = originator->accumulated_insns_num + before_nops_num;
7401 curr_state->branch_deviation = originator->branch_deviation;
7402 gcc_assert (insn);
7403 if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
7405 gcc_assert (GET_MODE (insn) != TImode);
7406 if (!try_issue_nops (curr_state, before_nops_num))
7407 return;
7408 if (!try_issue_insn (curr_state, insn))
7409 return;
7410 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
7411 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
7412 && curr_state->accumulated_insns_num % 3 != 0)
7414 free_bundle_state (curr_state);
7415 return;
7418 else if (GET_MODE (insn) != TImode)
7420 if (!try_issue_nops (curr_state, before_nops_num))
7421 return;
7422 if (!try_issue_insn (curr_state, insn))
7423 return;
7424 curr_state->accumulated_insns_num++;
7425 gcc_assert (GET_CODE (PATTERN (insn)) != ASM_INPUT
7426 && asm_noperands (PATTERN (insn)) < 0);
7428 if (ia64_safe_type (insn) == TYPE_L)
7429 curr_state->accumulated_insns_num++;
7431 else
7433 /* If this is an insn that must be first in a group, then don't allow
7434 nops to be emitted before it. Currently, alloc is the only such
7435 supported instruction. */
7436 /* ??? The bundling automatons should handle this for us, but they do
7437 not yet have support for the first_insn attribute. */
7438 if (before_nops_num > 0 && get_attr_first_insn (insn) == FIRST_INSN_YES)
7440 free_bundle_state (curr_state);
7441 return;
7444 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
7445 state_transition (curr_state->dfa_state, NULL);
7446 curr_state->cost++;
7447 if (!try_issue_nops (curr_state, before_nops_num))
7448 return;
7449 if (!try_issue_insn (curr_state, insn))
7450 return;
7451 curr_state->accumulated_insns_num++;
7452 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
7453 || asm_noperands (PATTERN (insn)) >= 0)
7455 /* Finish bundle containing asm insn. */
7456 curr_state->after_nops_num
7457 = 3 - curr_state->accumulated_insns_num % 3;
7458 curr_state->accumulated_insns_num
7459 += 3 - curr_state->accumulated_insns_num % 3;
7461 else if (ia64_safe_type (insn) == TYPE_L)
7462 curr_state->accumulated_insns_num++;
7464 if (ia64_safe_type (insn) == TYPE_B)
7465 curr_state->branch_deviation
7466 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
7467 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
7469 if (!only_bundle_end_p && insert_bundle_state (curr_state))
7471 state_t dfa_state;
7472 struct bundle_state *curr_state1;
7473 struct bundle_state *allocated_states_chain;
7475 curr_state1 = get_free_bundle_state ();
7476 dfa_state = curr_state1->dfa_state;
7477 allocated_states_chain = curr_state1->allocated_states_chain;
7478 *curr_state1 = *curr_state;
7479 curr_state1->dfa_state = dfa_state;
7480 curr_state1->allocated_states_chain = allocated_states_chain;
7481 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
7482 dfa_state_size);
7483 curr_state = curr_state1;
7485 if (!try_issue_nops (curr_state,
7486 3 - curr_state->accumulated_insns_num % 3))
7487 return;
7488 curr_state->after_nops_num
7489 = 3 - curr_state->accumulated_insns_num % 3;
7490 curr_state->accumulated_insns_num
7491 += 3 - curr_state->accumulated_insns_num % 3;
7493 if (!insert_bundle_state (curr_state))
7494 free_bundle_state (curr_state);
7495 return;
7498 /* The following function returns position in the two window bundle
7499 for given STATE. */
7501 static int
7502 get_max_pos (state_t state)
7504 if (cpu_unit_reservation_p (state, pos_6))
7505 return 6;
7506 else if (cpu_unit_reservation_p (state, pos_5))
7507 return 5;
7508 else if (cpu_unit_reservation_p (state, pos_4))
7509 return 4;
7510 else if (cpu_unit_reservation_p (state, pos_3))
7511 return 3;
7512 else if (cpu_unit_reservation_p (state, pos_2))
7513 return 2;
7514 else if (cpu_unit_reservation_p (state, pos_1))
7515 return 1;
7516 else
7517 return 0;
7520 /* The function returns code of a possible template for given position
7521 and state. The function should be called only with 2 values of
7522 position equal to 3 or 6. We avoid generating F NOPs by putting
7523 templates containing F insns at the end of the template search
7524 because undocumented anomaly in McKinley derived cores which can
7525 cause stalls if an F-unit insn (including a NOP) is issued within a
7526 six-cycle window after reading certain application registers (such
7527 as ar.bsp). Furthermore, power-considerations also argue against
7528 the use of F-unit instructions unless they're really needed. */
7530 static int
7531 get_template (state_t state, int pos)
7533 switch (pos)
7535 case 3:
7536 if (cpu_unit_reservation_p (state, _0mmi_))
7537 return 1;
7538 else if (cpu_unit_reservation_p (state, _0mii_))
7539 return 0;
7540 else if (cpu_unit_reservation_p (state, _0mmb_))
7541 return 7;
7542 else if (cpu_unit_reservation_p (state, _0mib_))
7543 return 6;
7544 else if (cpu_unit_reservation_p (state, _0mbb_))
7545 return 5;
7546 else if (cpu_unit_reservation_p (state, _0bbb_))
7547 return 4;
7548 else if (cpu_unit_reservation_p (state, _0mmf_))
7549 return 3;
7550 else if (cpu_unit_reservation_p (state, _0mfi_))
7551 return 2;
7552 else if (cpu_unit_reservation_p (state, _0mfb_))
7553 return 8;
7554 else if (cpu_unit_reservation_p (state, _0mlx_))
7555 return 9;
7556 else
7557 gcc_unreachable ();
7558 case 6:
7559 if (cpu_unit_reservation_p (state, _1mmi_))
7560 return 1;
7561 else if (cpu_unit_reservation_p (state, _1mii_))
7562 return 0;
7563 else if (cpu_unit_reservation_p (state, _1mmb_))
7564 return 7;
7565 else if (cpu_unit_reservation_p (state, _1mib_))
7566 return 6;
7567 else if (cpu_unit_reservation_p (state, _1mbb_))
7568 return 5;
7569 else if (cpu_unit_reservation_p (state, _1bbb_))
7570 return 4;
7571 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
7572 return 3;
7573 else if (cpu_unit_reservation_p (state, _1mfi_))
7574 return 2;
7575 else if (cpu_unit_reservation_p (state, _1mfb_))
7576 return 8;
7577 else if (cpu_unit_reservation_p (state, _1mlx_))
7578 return 9;
7579 else
7580 gcc_unreachable ();
7581 default:
7582 gcc_unreachable ();
7586 /* The following function returns an insn important for insn bundling
7587 followed by INSN and before TAIL. */
7589 static rtx
7590 get_next_important_insn (rtx insn, rtx tail)
7592 for (; insn && insn != tail; insn = NEXT_INSN (insn))
7593 if (INSN_P (insn)
7594 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
7595 && GET_CODE (PATTERN (insn)) != USE
7596 && GET_CODE (PATTERN (insn)) != CLOBBER)
7597 return insn;
7598 return NULL_RTX;
7601 /* Add a bundle selector TEMPLATE0 before INSN. */
7603 static void
7604 ia64_add_bundle_selector_before (int template0, rtx insn)
7606 rtx b = gen_bundle_selector (GEN_INT (template0));
7608 ia64_emit_insn_before (b, insn);
7609 #if NR_BUNDLES == 10
7610 if ((template0 == 4 || template0 == 5)
7611 && (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS)))
7613 int i;
7614 rtx note = NULL_RTX;
7616 /* In .mbb and .bbb bundles, check if CALL_INSN isn't in the
7617 first or second slot. If it is and has REG_EH_NOTE set, copy it
7618 to following nops, as br.call sets rp to the address of following
7619 bundle and therefore an EH region end must be on a bundle
7620 boundary. */
7621 insn = PREV_INSN (insn);
7622 for (i = 0; i < 3; i++)
7625 insn = next_active_insn (insn);
7626 while (GET_CODE (insn) == INSN
7627 && get_attr_empty (insn) == EMPTY_YES);
7628 if (GET_CODE (insn) == CALL_INSN)
7629 note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
7630 else if (note)
7632 int code;
7634 gcc_assert ((code = recog_memoized (insn)) == CODE_FOR_nop
7635 || code == CODE_FOR_nop_b);
7636 if (find_reg_note (insn, REG_EH_REGION, NULL_RTX))
7637 note = NULL_RTX;
7638 else
7639 REG_NOTES (insn)
7640 = gen_rtx_EXPR_LIST (REG_EH_REGION, XEXP (note, 0),
7641 REG_NOTES (insn));
7645 #endif
7648 /* The following function does insn bundling. Bundling means
7649 inserting templates and nop insns to fit insn groups into permitted
7650 templates. Instruction scheduling uses NDFA (non-deterministic
7651 finite automata) encoding informations about the templates and the
7652 inserted nops. Nondeterminism of the automata permits follows
7653 all possible insn sequences very fast.
7655 Unfortunately it is not possible to get information about inserting
7656 nop insns and used templates from the automata states. The
7657 automata only says that we can issue an insn possibly inserting
7658 some nops before it and using some template. Therefore insn
7659 bundling in this function is implemented by using DFA
7660 (deterministic finite automata). We follow all possible insn
7661 sequences by inserting 0-2 nops (that is what the NDFA describe for
7662 insn scheduling) before/after each insn being bundled. We know the
7663 start of simulated processor cycle from insn scheduling (insn
7664 starting a new cycle has TImode).
7666 Simple implementation of insn bundling would create enormous
7667 number of possible insn sequences satisfying information about new
7668 cycle ticks taken from the insn scheduling. To make the algorithm
7669 practical we use dynamic programming. Each decision (about
7670 inserting nops and implicitly about previous decisions) is described
7671 by structure bundle_state (see above). If we generate the same
7672 bundle state (key is automaton state after issuing the insns and
7673 nops for it), we reuse already generated one. As consequence we
7674 reject some decisions which cannot improve the solution and
7675 reduce memory for the algorithm.
7677 When we reach the end of EBB (extended basic block), we choose the
7678 best sequence and then, moving back in EBB, insert templates for
7679 the best alternative. The templates are taken from querying
7680 automaton state for each insn in chosen bundle states.
7682 So the algorithm makes two (forward and backward) passes through
7683 EBB. There is an additional forward pass through EBB for Itanium1
7684 processor. This pass inserts more nops to make dependency between
7685 a producer insn and MMMUL/MMSHF at least 4 cycles long. */
7687 static void
7688 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
7690 struct bundle_state *curr_state, *next_state, *best_state;
7691 rtx insn, next_insn;
7692 int insn_num;
7693 int i, bundle_end_p, only_bundle_end_p, asm_p;
7694 int pos = 0, max_pos, template0, template1;
7695 rtx b;
7696 rtx nop;
7697 enum attr_type type;
7699 insn_num = 0;
7700 /* Count insns in the EBB. */
7701 for (insn = NEXT_INSN (prev_head_insn);
7702 insn && insn != tail;
7703 insn = NEXT_INSN (insn))
7704 if (INSN_P (insn))
7705 insn_num++;
7706 if (insn_num == 0)
7707 return;
7708 bundling_p = 1;
7709 dfa_clean_insn_cache ();
7710 initiate_bundle_state_table ();
7711 index_to_bundle_states = xmalloc ((insn_num + 2)
7712 * sizeof (struct bundle_state *));
7713 /* First (forward) pass -- generation of bundle states. */
7714 curr_state = get_free_bundle_state ();
7715 curr_state->insn = NULL;
7716 curr_state->before_nops_num = 0;
7717 curr_state->after_nops_num = 0;
7718 curr_state->insn_num = 0;
7719 curr_state->cost = 0;
7720 curr_state->accumulated_insns_num = 0;
7721 curr_state->branch_deviation = 0;
7722 curr_state->next = NULL;
7723 curr_state->originator = NULL;
7724 state_reset (curr_state->dfa_state);
7725 index_to_bundle_states [0] = curr_state;
7726 insn_num = 0;
7727 /* Shift cycle mark if it is put on insn which could be ignored. */
7728 for (insn = NEXT_INSN (prev_head_insn);
7729 insn != tail;
7730 insn = NEXT_INSN (insn))
7731 if (INSN_P (insn)
7732 && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
7733 || GET_CODE (PATTERN (insn)) == USE
7734 || GET_CODE (PATTERN (insn)) == CLOBBER)
7735 && GET_MODE (insn) == TImode)
7737 PUT_MODE (insn, VOIDmode);
7738 for (next_insn = NEXT_INSN (insn);
7739 next_insn != tail;
7740 next_insn = NEXT_INSN (next_insn))
7741 if (INSN_P (next_insn)
7742 && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
7743 && GET_CODE (PATTERN (next_insn)) != USE
7744 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
7746 PUT_MODE (next_insn, TImode);
7747 break;
7750 /* Forward pass: generation of bundle states. */
7751 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
7752 insn != NULL_RTX;
7753 insn = next_insn)
7755 gcc_assert (INSN_P (insn)
7756 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
7757 && GET_CODE (PATTERN (insn)) != USE
7758 && GET_CODE (PATTERN (insn)) != CLOBBER);
7759 type = ia64_safe_type (insn);
7760 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
7761 insn_num++;
7762 index_to_bundle_states [insn_num] = NULL;
7763 for (curr_state = index_to_bundle_states [insn_num - 1];
7764 curr_state != NULL;
7765 curr_state = next_state)
7767 pos = curr_state->accumulated_insns_num % 3;
7768 next_state = curr_state->next;
7769 /* We must fill up the current bundle in order to start a
7770 subsequent asm insn in a new bundle. Asm insn is always
7771 placed in a separate bundle. */
7772 only_bundle_end_p
7773 = (next_insn != NULL_RTX
7774 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
7775 && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
7776 /* We may fill up the current bundle if it is the cycle end
7777 without a group barrier. */
7778 bundle_end_p
7779 = (only_bundle_end_p || next_insn == NULL_RTX
7780 || (GET_MODE (next_insn) == TImode
7781 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
7782 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
7783 || type == TYPE_S
7784 /* We need to insert 2 nops for cases like M_MII. To
7785 guarantee issuing all insns on the same cycle for
7786 Itanium 1, we need to issue 2 nops after the first M
7787 insn (MnnMII where n is a nop insn). */
7788 || ((type == TYPE_M || type == TYPE_A)
7789 && ia64_tune == PROCESSOR_ITANIUM
7790 && !bundle_end_p && pos == 1))
7791 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
7792 only_bundle_end_p);
7793 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
7794 only_bundle_end_p);
7795 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
7796 only_bundle_end_p);
7798 gcc_assert (index_to_bundle_states [insn_num]);
7799 for (curr_state = index_to_bundle_states [insn_num];
7800 curr_state != NULL;
7801 curr_state = curr_state->next)
7802 if (verbose >= 2 && dump)
7804 /* This structure is taken from generated code of the
7805 pipeline hazard recognizer (see file insn-attrtab.c).
7806 Please don't forget to change the structure if a new
7807 automaton is added to .md file. */
7808 struct DFA_chip
7810 unsigned short one_automaton_state;
7811 unsigned short oneb_automaton_state;
7812 unsigned short two_automaton_state;
7813 unsigned short twob_automaton_state;
7816 fprintf
7817 (dump,
7818 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
7819 curr_state->unique_num,
7820 (curr_state->originator == NULL
7821 ? -1 : curr_state->originator->unique_num),
7822 curr_state->cost,
7823 curr_state->before_nops_num, curr_state->after_nops_num,
7824 curr_state->accumulated_insns_num, curr_state->branch_deviation,
7825 (ia64_tune == PROCESSOR_ITANIUM
7826 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
7827 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
7828 INSN_UID (insn));
7832 /* We should find a solution because the 2nd insn scheduling has
7833 found one. */
7834 gcc_assert (index_to_bundle_states [insn_num]);
7835 /* Find a state corresponding to the best insn sequence. */
7836 best_state = NULL;
7837 for (curr_state = index_to_bundle_states [insn_num];
7838 curr_state != NULL;
7839 curr_state = curr_state->next)
7840 /* We are just looking at the states with fully filled up last
7841 bundle. The first we prefer insn sequences with minimal cost
7842 then with minimal inserted nops and finally with branch insns
7843 placed in the 3rd slots. */
7844 if (curr_state->accumulated_insns_num % 3 == 0
7845 && (best_state == NULL || best_state->cost > curr_state->cost
7846 || (best_state->cost == curr_state->cost
7847 && (curr_state->accumulated_insns_num
7848 < best_state->accumulated_insns_num
7849 || (curr_state->accumulated_insns_num
7850 == best_state->accumulated_insns_num
7851 && curr_state->branch_deviation
7852 < best_state->branch_deviation)))))
7853 best_state = curr_state;
7854 /* Second (backward) pass: adding nops and templates. */
7855 insn_num = best_state->before_nops_num;
7856 template0 = template1 = -1;
7857 for (curr_state = best_state;
7858 curr_state->originator != NULL;
7859 curr_state = curr_state->originator)
7861 insn = curr_state->insn;
7862 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
7863 || asm_noperands (PATTERN (insn)) >= 0);
7864 insn_num++;
7865 if (verbose >= 2 && dump)
7867 struct DFA_chip
7869 unsigned short one_automaton_state;
7870 unsigned short oneb_automaton_state;
7871 unsigned short two_automaton_state;
7872 unsigned short twob_automaton_state;
7875 fprintf
7876 (dump,
7877 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
7878 curr_state->unique_num,
7879 (curr_state->originator == NULL
7880 ? -1 : curr_state->originator->unique_num),
7881 curr_state->cost,
7882 curr_state->before_nops_num, curr_state->after_nops_num,
7883 curr_state->accumulated_insns_num, curr_state->branch_deviation,
7884 (ia64_tune == PROCESSOR_ITANIUM
7885 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
7886 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
7887 INSN_UID (insn));
7889 /* Find the position in the current bundle window. The window can
7890 contain at most two bundles. Two bundle window means that
7891 the processor will make two bundle rotation. */
7892 max_pos = get_max_pos (curr_state->dfa_state);
7893 if (max_pos == 6
7894 /* The following (negative template number) means that the
7895 processor did one bundle rotation. */
7896 || (max_pos == 3 && template0 < 0))
7898 /* We are at the end of the window -- find template(s) for
7899 its bundle(s). */
7900 pos = max_pos;
7901 if (max_pos == 3)
7902 template0 = get_template (curr_state->dfa_state, 3);
7903 else
7905 template1 = get_template (curr_state->dfa_state, 3);
7906 template0 = get_template (curr_state->dfa_state, 6);
7909 if (max_pos > 3 && template1 < 0)
7910 /* It may happen when we have the stop inside a bundle. */
7912 gcc_assert (pos <= 3);
7913 template1 = get_template (curr_state->dfa_state, 3);
7914 pos += 3;
7916 if (!asm_p)
7917 /* Emit nops after the current insn. */
7918 for (i = 0; i < curr_state->after_nops_num; i++)
7920 nop = gen_nop ();
7921 emit_insn_after (nop, insn);
7922 pos--;
7923 gcc_assert (pos >= 0);
7924 if (pos % 3 == 0)
7926 /* We are at the start of a bundle: emit the template
7927 (it should be defined). */
7928 gcc_assert (template0 >= 0);
7929 ia64_add_bundle_selector_before (template0, nop);
7930 /* If we have two bundle window, we make one bundle
7931 rotation. Otherwise template0 will be undefined
7932 (negative value). */
7933 template0 = template1;
7934 template1 = -1;
7937 /* Move the position backward in the window. Group barrier has
7938 no slot. Asm insn takes all bundle. */
7939 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
7940 && GET_CODE (PATTERN (insn)) != ASM_INPUT
7941 && asm_noperands (PATTERN (insn)) < 0)
7942 pos--;
7943 /* Long insn takes 2 slots. */
7944 if (ia64_safe_type (insn) == TYPE_L)
7945 pos--;
7946 gcc_assert (pos >= 0);
7947 if (pos % 3 == 0
7948 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
7949 && GET_CODE (PATTERN (insn)) != ASM_INPUT
7950 && asm_noperands (PATTERN (insn)) < 0)
7952 /* The current insn is at the bundle start: emit the
7953 template. */
7954 gcc_assert (template0 >= 0);
7955 ia64_add_bundle_selector_before (template0, insn);
7956 b = PREV_INSN (insn);
7957 insn = b;
7958 /* See comment above in analogous place for emitting nops
7959 after the insn. */
7960 template0 = template1;
7961 template1 = -1;
7963 /* Emit nops after the current insn. */
7964 for (i = 0; i < curr_state->before_nops_num; i++)
7966 nop = gen_nop ();
7967 ia64_emit_insn_before (nop, insn);
7968 nop = PREV_INSN (insn);
7969 insn = nop;
7970 pos--;
7971 gcc_assert (pos >= 0);
7972 if (pos % 3 == 0)
7974 /* See comment above in analogous place for emitting nops
7975 after the insn. */
7976 gcc_assert (template0 >= 0);
7977 ia64_add_bundle_selector_before (template0, insn);
7978 b = PREV_INSN (insn);
7979 insn = b;
7980 template0 = template1;
7981 template1 = -1;
7985 if (ia64_tune == PROCESSOR_ITANIUM)
7986 /* Insert additional cycles for MM-insns (MMMUL and MMSHF).
7987 Itanium1 has a strange design, if the distance between an insn
7988 and dependent MM-insn is less 4 then we have a 6 additional
7989 cycles stall. So we make the distance equal to 4 cycles if it
7990 is less. */
7991 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
7992 insn != NULL_RTX;
7993 insn = next_insn)
7995 gcc_assert (INSN_P (insn)
7996 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
7997 && GET_CODE (PATTERN (insn)) != USE
7998 && GET_CODE (PATTERN (insn)) != CLOBBER);
7999 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
8000 if (INSN_UID (insn) < clocks_length && add_cycles [INSN_UID (insn)])
8001 /* We found a MM-insn which needs additional cycles. */
8003 rtx last;
8004 int i, j, n;
8005 int pred_stop_p;
8007 /* Now we are searching for a template of the bundle in
8008 which the MM-insn is placed and the position of the
8009 insn in the bundle (0, 1, 2). Also we are searching
8010 for that there is a stop before the insn. */
8011 last = prev_active_insn (insn);
8012 pred_stop_p = recog_memoized (last) == CODE_FOR_insn_group_barrier;
8013 if (pred_stop_p)
8014 last = prev_active_insn (last);
8015 n = 0;
8016 for (;; last = prev_active_insn (last))
8017 if (recog_memoized (last) == CODE_FOR_bundle_selector)
8019 template0 = XINT (XVECEXP (PATTERN (last), 0, 0), 0);
8020 if (template0 == 9)
8021 /* The insn is in MLX bundle. Change the template
8022 onto MFI because we will add nops before the
8023 insn. It simplifies subsequent code a lot. */
8024 PATTERN (last)
8025 = gen_bundle_selector (const2_rtx); /* -> MFI */
8026 break;
8028 else if (recog_memoized (last) != CODE_FOR_insn_group_barrier
8029 && (ia64_safe_itanium_class (last)
8030 != ITANIUM_CLASS_IGNORE))
8031 n++;
8032 /* Some check of correctness: the stop is not at the
8033 bundle start, there are no more 3 insns in the bundle,
8034 and the MM-insn is not at the start of bundle with
8035 template MLX. */
8036 gcc_assert ((!pred_stop_p || n)
8037 && n <= 2
8038 && (template0 != 9 || !n));
8039 /* Put nops after the insn in the bundle. */
8040 for (j = 3 - n; j > 0; j --)
8041 ia64_emit_insn_before (gen_nop (), insn);
8042 /* It takes into account that we will add more N nops
8043 before the insn lately -- please see code below. */
8044 add_cycles [INSN_UID (insn)]--;
8045 if (!pred_stop_p || add_cycles [INSN_UID (insn)])
8046 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8047 insn);
8048 if (pred_stop_p)
8049 add_cycles [INSN_UID (insn)]--;
8050 for (i = add_cycles [INSN_UID (insn)]; i > 0; i--)
8052 /* Insert "MII;" template. */
8053 ia64_emit_insn_before (gen_bundle_selector (const0_rtx),
8054 insn);
8055 ia64_emit_insn_before (gen_nop (), insn);
8056 ia64_emit_insn_before (gen_nop (), insn);
8057 if (i > 1)
8059 /* To decrease code size, we use "MI;I;"
8060 template. */
8061 ia64_emit_insn_before
8062 (gen_insn_group_barrier (GEN_INT (3)), insn);
8063 i--;
8065 ia64_emit_insn_before (gen_nop (), insn);
8066 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8067 insn);
8069 /* Put the MM-insn in the same slot of a bundle with the
8070 same template as the original one. */
8071 ia64_add_bundle_selector_before (template0, insn);
8072 /* To put the insn in the same slot, add necessary number
8073 of nops. */
8074 for (j = n; j > 0; j --)
8075 ia64_emit_insn_before (gen_nop (), insn);
8076 /* Put the stop if the original bundle had it. */
8077 if (pred_stop_p)
8078 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8079 insn);
8082 free (index_to_bundle_states);
8083 finish_bundle_state_table ();
8084 bundling_p = 0;
8085 dfa_clean_insn_cache ();
8088 /* The following function is called at the end of scheduling BB or
8089 EBB. After reload, it inserts stop bits and does insn bundling. */
8091 static void
8092 ia64_sched_finish (FILE *dump, int sched_verbose)
8094 if (sched_verbose)
8095 fprintf (dump, "// Finishing schedule.\n");
8096 if (!reload_completed)
8097 return;
8098 if (reload_completed)
8100 final_emit_insn_group_barriers (dump);
8101 bundling (dump, sched_verbose, current_sched_info->prev_head,
8102 current_sched_info->next_tail);
8103 if (sched_verbose && dump)
8104 fprintf (dump, "// finishing %d-%d\n",
8105 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
8106 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
8108 return;
8112 /* The following function inserts stop bits in scheduled BB or EBB. */
8114 static void
8115 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
8117 rtx insn;
8118 int need_barrier_p = 0;
8119 rtx prev_insn = NULL_RTX;
8121 init_insn_group_barriers ();
8123 for (insn = NEXT_INSN (current_sched_info->prev_head);
8124 insn != current_sched_info->next_tail;
8125 insn = NEXT_INSN (insn))
8127 if (GET_CODE (insn) == BARRIER)
8129 rtx last = prev_active_insn (insn);
8131 if (! last)
8132 continue;
8133 if (GET_CODE (last) == JUMP_INSN
8134 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
8135 last = prev_active_insn (last);
8136 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
8137 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
8139 init_insn_group_barriers ();
8140 need_barrier_p = 0;
8141 prev_insn = NULL_RTX;
8143 else if (INSN_P (insn))
8145 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
8147 init_insn_group_barriers ();
8148 need_barrier_p = 0;
8149 prev_insn = NULL_RTX;
8151 else if (need_barrier_p || group_barrier_needed (insn))
8153 if (TARGET_EARLY_STOP_BITS)
8155 rtx last;
8157 for (last = insn;
8158 last != current_sched_info->prev_head;
8159 last = PREV_INSN (last))
8160 if (INSN_P (last) && GET_MODE (last) == TImode
8161 && stops_p [INSN_UID (last)])
8162 break;
8163 if (last == current_sched_info->prev_head)
8164 last = insn;
8165 last = prev_active_insn (last);
8166 if (last
8167 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
8168 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
8169 last);
8170 init_insn_group_barriers ();
8171 for (last = NEXT_INSN (last);
8172 last != insn;
8173 last = NEXT_INSN (last))
8174 if (INSN_P (last))
8175 group_barrier_needed (last);
8177 else
8179 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8180 insn);
8181 init_insn_group_barriers ();
8183 group_barrier_needed (insn);
8184 prev_insn = NULL_RTX;
8186 else if (recog_memoized (insn) >= 0)
8187 prev_insn = insn;
8188 need_barrier_p = (GET_CODE (insn) == CALL_INSN
8189 || GET_CODE (PATTERN (insn)) == ASM_INPUT
8190 || asm_noperands (PATTERN (insn)) >= 0);
8197 /* If the following function returns TRUE, we will use the DFA
8198 insn scheduler. */
8200 static int
8201 ia64_first_cycle_multipass_dfa_lookahead (void)
8203 return (reload_completed ? 6 : 4);
8206 /* The following function initiates variable `dfa_pre_cycle_insn'. */
8208 static void
8209 ia64_init_dfa_pre_cycle_insn (void)
8211 if (temp_dfa_state == NULL)
8213 dfa_state_size = state_size ();
8214 temp_dfa_state = xmalloc (dfa_state_size);
8215 prev_cycle_state = xmalloc (dfa_state_size);
8217 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
8218 PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
8219 recog_memoized (dfa_pre_cycle_insn);
8220 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
8221 PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX;
8222 recog_memoized (dfa_stop_insn);
8225 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
8226 used by the DFA insn scheduler. */
8228 static rtx
8229 ia64_dfa_pre_cycle_insn (void)
8231 return dfa_pre_cycle_insn;
8234 /* The following function returns TRUE if PRODUCER (of type ilog or
8235 ld) produces address for CONSUMER (of type st or stf). */
8238 ia64_st_address_bypass_p (rtx producer, rtx consumer)
8240 rtx dest, reg, mem;
8242 gcc_assert (producer && consumer);
8243 dest = ia64_single_set (producer);
8244 gcc_assert (dest);
8245 reg = SET_DEST (dest);
8246 gcc_assert (reg);
8247 if (GET_CODE (reg) == SUBREG)
8248 reg = SUBREG_REG (reg);
8249 gcc_assert (GET_CODE (reg) == REG);
8251 dest = ia64_single_set (consumer);
8252 gcc_assert (dest);
8253 mem = SET_DEST (dest);
8254 gcc_assert (mem && GET_CODE (mem) == MEM);
8255 return reg_mentioned_p (reg, mem);
8258 /* The following function returns TRUE if PRODUCER (of type ilog or
8259 ld) produces address for CONSUMER (of type ld or fld). */
8262 ia64_ld_address_bypass_p (rtx producer, rtx consumer)
8264 rtx dest, src, reg, mem;
8266 gcc_assert (producer && consumer);
8267 dest = ia64_single_set (producer);
8268 gcc_assert (dest);
8269 reg = SET_DEST (dest);
8270 gcc_assert (reg);
8271 if (GET_CODE (reg) == SUBREG)
8272 reg = SUBREG_REG (reg);
8273 gcc_assert (GET_CODE (reg) == REG);
8275 src = ia64_single_set (consumer);
8276 gcc_assert (src);
8277 mem = SET_SRC (src);
8278 gcc_assert (mem);
8280 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
8281 mem = XVECEXP (mem, 0, 0);
8282 else if (GET_CODE (mem) == IF_THEN_ELSE)
8283 /* ??? Is this bypass necessary for ld.c? */
8285 gcc_assert (XINT (XEXP (XEXP (mem, 0), 0), 1) == UNSPEC_LDCCLR);
8286 mem = XEXP (mem, 1);
8289 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
8290 mem = XEXP (mem, 0);
8292 if (GET_CODE (mem) == UNSPEC)
8294 int c = XINT (mem, 1);
8296 gcc_assert (c == UNSPEC_LDA || c == UNSPEC_LDS || c == UNSPEC_LDSA);
8297 mem = XVECEXP (mem, 0, 0);
8300 /* Note that LO_SUM is used for GOT loads. */
8301 gcc_assert (GET_CODE (mem) == LO_SUM || GET_CODE (mem) == MEM);
8303 return reg_mentioned_p (reg, mem);
8306 /* The following function returns TRUE if INSN produces address for a
8307 load/store insn. We will place such insns into M slot because it
8308 decreases its latency time. */
8311 ia64_produce_address_p (rtx insn)
8313 return insn->call;
8317 /* Emit pseudo-ops for the assembler to describe predicate relations.
8318 At present this assumes that we only consider predicate pairs to
8319 be mutex, and that the assembler can deduce proper values from
8320 straight-line code. */
8322 static void
8323 emit_predicate_relation_info (void)
8325 basic_block bb;
8327 FOR_EACH_BB_REVERSE (bb)
8329 int r;
8330 rtx head = BB_HEAD (bb);
8332 /* We only need such notes at code labels. */
8333 if (GET_CODE (head) != CODE_LABEL)
8334 continue;
8335 if (NOTE_INSN_BASIC_BLOCK_P (NEXT_INSN (head)))
8336 head = NEXT_INSN (head);
8338 /* Skip p0, which may be thought to be live due to (reg:DI p0)
8339 grabbing the entire block of predicate registers. */
8340 for (r = PR_REG (2); r < PR_REG (64); r += 2)
8341 if (REGNO_REG_SET_P (bb->il.rtl->global_live_at_start, r))
8343 rtx p = gen_rtx_REG (BImode, r);
8344 rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
8345 if (head == BB_END (bb))
8346 BB_END (bb) = n;
8347 head = n;
8351 /* Look for conditional calls that do not return, and protect predicate
8352 relations around them. Otherwise the assembler will assume the call
8353 returns, and complain about uses of call-clobbered predicates after
8354 the call. */
8355 FOR_EACH_BB_REVERSE (bb)
8357 rtx insn = BB_HEAD (bb);
8359 while (1)
8361 if (GET_CODE (insn) == CALL_INSN
8362 && GET_CODE (PATTERN (insn)) == COND_EXEC
8363 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
8365 rtx b = emit_insn_before (gen_safe_across_calls_all (), insn);
8366 rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn);
8367 if (BB_HEAD (bb) == insn)
8368 BB_HEAD (bb) = b;
8369 if (BB_END (bb) == insn)
8370 BB_END (bb) = a;
8373 if (insn == BB_END (bb))
8374 break;
8375 insn = NEXT_INSN (insn);
8380 /* Perform machine dependent operations on the rtl chain INSNS. */
8382 static void
8383 ia64_reorg (void)
8385 /* We are freeing block_for_insn in the toplev to keep compatibility
8386 with old MDEP_REORGS that are not CFG based. Recompute it now. */
8387 compute_bb_for_insn ();
8389 /* If optimizing, we'll have split before scheduling. */
8390 if (optimize == 0)
8391 split_all_insns (0);
8393 /* ??? update_life_info_in_dirty_blocks fails to terminate during
8394 non-optimizing bootstrap. */
8395 update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES, PROP_DEATH_NOTES);
8397 if (optimize && ia64_flag_schedule_insns2)
8399 timevar_push (TV_SCHED2);
8400 ia64_final_schedule = 1;
8402 initiate_bundle_states ();
8403 ia64_nop = make_insn_raw (gen_nop ());
8404 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
8405 recog_memoized (ia64_nop);
8406 clocks_length = get_max_uid () + 1;
8407 stops_p = xcalloc (1, clocks_length);
8408 if (ia64_tune == PROCESSOR_ITANIUM)
8410 clocks = xcalloc (clocks_length, sizeof (int));
8411 add_cycles = xcalloc (clocks_length, sizeof (int));
8413 if (ia64_tune == PROCESSOR_ITANIUM2)
8415 pos_1 = get_cpu_unit_code ("2_1");
8416 pos_2 = get_cpu_unit_code ("2_2");
8417 pos_3 = get_cpu_unit_code ("2_3");
8418 pos_4 = get_cpu_unit_code ("2_4");
8419 pos_5 = get_cpu_unit_code ("2_5");
8420 pos_6 = get_cpu_unit_code ("2_6");
8421 _0mii_ = get_cpu_unit_code ("2b_0mii.");
8422 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
8423 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
8424 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
8425 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
8426 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
8427 _0mib_ = get_cpu_unit_code ("2b_0mib.");
8428 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
8429 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
8430 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
8431 _1mii_ = get_cpu_unit_code ("2b_1mii.");
8432 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
8433 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
8434 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
8435 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
8436 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
8437 _1mib_ = get_cpu_unit_code ("2b_1mib.");
8438 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
8439 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
8440 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
8442 else
8444 pos_1 = get_cpu_unit_code ("1_1");
8445 pos_2 = get_cpu_unit_code ("1_2");
8446 pos_3 = get_cpu_unit_code ("1_3");
8447 pos_4 = get_cpu_unit_code ("1_4");
8448 pos_5 = get_cpu_unit_code ("1_5");
8449 pos_6 = get_cpu_unit_code ("1_6");
8450 _0mii_ = get_cpu_unit_code ("1b_0mii.");
8451 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
8452 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
8453 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
8454 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
8455 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
8456 _0mib_ = get_cpu_unit_code ("1b_0mib.");
8457 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
8458 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
8459 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
8460 _1mii_ = get_cpu_unit_code ("1b_1mii.");
8461 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
8462 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
8463 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
8464 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
8465 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
8466 _1mib_ = get_cpu_unit_code ("1b_1mib.");
8467 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
8468 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
8469 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
8471 schedule_ebbs ();
8472 finish_bundle_states ();
8473 if (ia64_tune == PROCESSOR_ITANIUM)
8475 free (add_cycles);
8476 free (clocks);
8478 free (stops_p);
8479 stops_p = NULL;
8480 emit_insn_group_barriers (dump_file);
8482 ia64_final_schedule = 0;
8483 timevar_pop (TV_SCHED2);
8485 else
8486 emit_all_insn_group_barriers (dump_file);
8488 /* A call must not be the last instruction in a function, so that the
8489 return address is still within the function, so that unwinding works
8490 properly. Note that IA-64 differs from dwarf2 on this point. */
8491 if (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
8493 rtx insn;
8494 int saw_stop = 0;
8496 insn = get_last_insn ();
8497 if (! INSN_P (insn))
8498 insn = prev_active_insn (insn);
8499 /* Skip over insns that expand to nothing. */
8500 while (GET_CODE (insn) == INSN && get_attr_empty (insn) == EMPTY_YES)
8502 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
8503 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
8504 saw_stop = 1;
8505 insn = prev_active_insn (insn);
8507 if (GET_CODE (insn) == CALL_INSN)
8509 if (! saw_stop)
8510 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
8511 emit_insn (gen_break_f ());
8512 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
8516 emit_predicate_relation_info ();
8518 if (ia64_flag_var_tracking)
8520 timevar_push (TV_VAR_TRACKING);
8521 variable_tracking_main ();
8522 timevar_pop (TV_VAR_TRACKING);
8526 /* Return true if REGNO is used by the epilogue. */
8529 ia64_epilogue_uses (int regno)
8531 switch (regno)
8533 case R_GR (1):
8534 /* With a call to a function in another module, we will write a new
8535 value to "gp". After returning from such a call, we need to make
8536 sure the function restores the original gp-value, even if the
8537 function itself does not use the gp anymore. */
8538 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
8540 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
8541 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
8542 /* For functions defined with the syscall_linkage attribute, all
8543 input registers are marked as live at all function exits. This
8544 prevents the register allocator from using the input registers,
8545 which in turn makes it possible to restart a system call after
8546 an interrupt without having to save/restore the input registers.
8547 This also prevents kernel data from leaking to application code. */
8548 return lookup_attribute ("syscall_linkage",
8549 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
8551 case R_BR (0):
8552 /* Conditional return patterns can't represent the use of `b0' as
8553 the return address, so we force the value live this way. */
8554 return 1;
8556 case AR_PFS_REGNUM:
8557 /* Likewise for ar.pfs, which is used by br.ret. */
8558 return 1;
8560 default:
8561 return 0;
8565 /* Return true if REGNO is used by the frame unwinder. */
8568 ia64_eh_uses (int regno)
8570 if (! reload_completed)
8571 return 0;
8573 if (current_frame_info.reg_save_b0
8574 && regno == current_frame_info.reg_save_b0)
8575 return 1;
8576 if (current_frame_info.reg_save_pr
8577 && regno == current_frame_info.reg_save_pr)
8578 return 1;
8579 if (current_frame_info.reg_save_ar_pfs
8580 && regno == current_frame_info.reg_save_ar_pfs)
8581 return 1;
8582 if (current_frame_info.reg_save_ar_unat
8583 && regno == current_frame_info.reg_save_ar_unat)
8584 return 1;
8585 if (current_frame_info.reg_save_ar_lc
8586 && regno == current_frame_info.reg_save_ar_lc)
8587 return 1;
8589 return 0;
8592 /* Return true if this goes in small data/bss. */
8594 /* ??? We could also support own long data here. Generating movl/add/ld8
8595 instead of addl,ld8/ld8. This makes the code bigger, but should make the
8596 code faster because there is one less load. This also includes incomplete
8597 types which can't go in sdata/sbss. */
8599 static bool
8600 ia64_in_small_data_p (tree exp)
8602 if (TARGET_NO_SDATA)
8603 return false;
8605 /* We want to merge strings, so we never consider them small data. */
8606 if (TREE_CODE (exp) == STRING_CST)
8607 return false;
8609 /* Functions are never small data. */
8610 if (TREE_CODE (exp) == FUNCTION_DECL)
8611 return false;
8613 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
8615 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
8617 if (strcmp (section, ".sdata") == 0
8618 || strncmp (section, ".sdata.", 7) == 0
8619 || strncmp (section, ".gnu.linkonce.s.", 16) == 0
8620 || strcmp (section, ".sbss") == 0
8621 || strncmp (section, ".sbss.", 6) == 0
8622 || strncmp (section, ".gnu.linkonce.sb.", 17) == 0)
8623 return true;
8625 else
8627 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
8629 /* If this is an incomplete type with size 0, then we can't put it
8630 in sdata because it might be too big when completed. */
8631 if (size > 0 && size <= ia64_section_threshold)
8632 return true;
8635 return false;
8638 /* Output assembly directives for prologue regions. */
8640 /* The current basic block number. */
8642 static bool last_block;
8644 /* True if we need a copy_state command at the start of the next block. */
8646 static bool need_copy_state;
8648 #ifndef MAX_ARTIFICIAL_LABEL_BYTES
8649 # define MAX_ARTIFICIAL_LABEL_BYTES 30
8650 #endif
8652 /* Emit a debugging label after a call-frame-related insn. We'd
8653 rather output the label right away, but we'd have to output it
8654 after, not before, the instruction, and the instruction has not
8655 been output yet. So we emit the label after the insn, delete it to
8656 avoid introducing basic blocks, and mark it as preserved, such that
8657 it is still output, given that it is referenced in debug info. */
8659 static const char *
8660 ia64_emit_deleted_label_after_insn (rtx insn)
8662 char label[MAX_ARTIFICIAL_LABEL_BYTES];
8663 rtx lb = gen_label_rtx ();
8664 rtx label_insn = emit_label_after (lb, insn);
8666 LABEL_PRESERVE_P (lb) = 1;
8668 delete_insn (label_insn);
8670 ASM_GENERATE_INTERNAL_LABEL (label, "L", CODE_LABEL_NUMBER (label_insn));
8672 return xstrdup (label);
8675 /* Define the CFA after INSN with the steady-state definition. */
8677 static void
8678 ia64_dwarf2out_def_steady_cfa (rtx insn)
8680 rtx fp = frame_pointer_needed
8681 ? hard_frame_pointer_rtx
8682 : stack_pointer_rtx;
8684 dwarf2out_def_cfa
8685 (ia64_emit_deleted_label_after_insn (insn),
8686 REGNO (fp),
8687 ia64_initial_elimination_offset
8688 (REGNO (arg_pointer_rtx), REGNO (fp))
8689 + ARG_POINTER_CFA_OFFSET (current_function_decl));
8692 /* The generic dwarf2 frame debug info generator does not define a
8693 separate region for the very end of the epilogue, so refrain from
8694 doing so in the IA64-specific code as well. */
8696 #define IA64_CHANGE_CFA_IN_EPILOGUE 0
8698 /* The function emits unwind directives for the start of an epilogue. */
8700 static void
8701 process_epilogue (FILE *asm_out_file, rtx insn, bool unwind, bool frame)
8703 /* If this isn't the last block of the function, then we need to label the
8704 current state, and copy it back in at the start of the next block. */
8706 if (!last_block)
8708 if (unwind)
8709 fprintf (asm_out_file, "\t.label_state %d\n",
8710 ++cfun->machine->state_num);
8711 need_copy_state = true;
8714 if (unwind)
8715 fprintf (asm_out_file, "\t.restore sp\n");
8716 if (IA64_CHANGE_CFA_IN_EPILOGUE && frame)
8717 dwarf2out_def_cfa (ia64_emit_deleted_label_after_insn (insn),
8718 STACK_POINTER_REGNUM, INCOMING_FRAME_SP_OFFSET);
8721 /* This function processes a SET pattern looking for specific patterns
8722 which result in emitting an assembly directive required for unwinding. */
8724 static int
8725 process_set (FILE *asm_out_file, rtx pat, rtx insn, bool unwind, bool frame)
8727 rtx src = SET_SRC (pat);
8728 rtx dest = SET_DEST (pat);
8729 int src_regno, dest_regno;
8731 /* Look for the ALLOC insn. */
8732 if (GET_CODE (src) == UNSPEC_VOLATILE
8733 && XINT (src, 1) == UNSPECV_ALLOC
8734 && GET_CODE (dest) == REG)
8736 dest_regno = REGNO (dest);
8738 /* If this is the final destination for ar.pfs, then this must
8739 be the alloc in the prologue. */
8740 if (dest_regno == current_frame_info.reg_save_ar_pfs)
8742 if (unwind)
8743 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
8744 ia64_dbx_register_number (dest_regno));
8746 else
8748 /* This must be an alloc before a sibcall. We must drop the
8749 old frame info. The easiest way to drop the old frame
8750 info is to ensure we had a ".restore sp" directive
8751 followed by a new prologue. If the procedure doesn't
8752 have a memory-stack frame, we'll issue a dummy ".restore
8753 sp" now. */
8754 if (current_frame_info.total_size == 0 && !frame_pointer_needed)
8755 /* if haven't done process_epilogue() yet, do it now */
8756 process_epilogue (asm_out_file, insn, unwind, frame);
8757 if (unwind)
8758 fprintf (asm_out_file, "\t.prologue\n");
8760 return 1;
8763 /* Look for SP = .... */
8764 if (GET_CODE (dest) == REG && REGNO (dest) == STACK_POINTER_REGNUM)
8766 if (GET_CODE (src) == PLUS)
8768 rtx op0 = XEXP (src, 0);
8769 rtx op1 = XEXP (src, 1);
8771 gcc_assert (op0 == dest && GET_CODE (op1) == CONST_INT);
8773 if (INTVAL (op1) < 0)
8775 gcc_assert (!frame_pointer_needed);
8776 if (unwind)
8777 fprintf (asm_out_file, "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
8778 -INTVAL (op1));
8779 if (frame)
8780 ia64_dwarf2out_def_steady_cfa (insn);
8782 else
8783 process_epilogue (asm_out_file, insn, unwind, frame);
8785 else
8787 gcc_assert (GET_CODE (src) == REG
8788 && REGNO (src) == HARD_FRAME_POINTER_REGNUM);
8789 process_epilogue (asm_out_file, insn, unwind, frame);
8792 return 1;
8795 /* Register move we need to look at. */
8796 if (GET_CODE (dest) == REG && GET_CODE (src) == REG)
8798 src_regno = REGNO (src);
8799 dest_regno = REGNO (dest);
8801 switch (src_regno)
8803 case BR_REG (0):
8804 /* Saving return address pointer. */
8805 gcc_assert (dest_regno == current_frame_info.reg_save_b0);
8806 if (unwind)
8807 fprintf (asm_out_file, "\t.save rp, r%d\n",
8808 ia64_dbx_register_number (dest_regno));
8809 return 1;
8811 case PR_REG (0):
8812 gcc_assert (dest_regno == current_frame_info.reg_save_pr);
8813 if (unwind)
8814 fprintf (asm_out_file, "\t.save pr, r%d\n",
8815 ia64_dbx_register_number (dest_regno));
8816 return 1;
8818 case AR_UNAT_REGNUM:
8819 gcc_assert (dest_regno == current_frame_info.reg_save_ar_unat);
8820 if (unwind)
8821 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
8822 ia64_dbx_register_number (dest_regno));
8823 return 1;
8825 case AR_LC_REGNUM:
8826 gcc_assert (dest_regno == current_frame_info.reg_save_ar_lc);
8827 if (unwind)
8828 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
8829 ia64_dbx_register_number (dest_regno));
8830 return 1;
8832 case STACK_POINTER_REGNUM:
8833 gcc_assert (dest_regno == HARD_FRAME_POINTER_REGNUM
8834 && frame_pointer_needed);
8835 if (unwind)
8836 fprintf (asm_out_file, "\t.vframe r%d\n",
8837 ia64_dbx_register_number (dest_regno));
8838 if (frame)
8839 ia64_dwarf2out_def_steady_cfa (insn);
8840 return 1;
8842 default:
8843 /* Everything else should indicate being stored to memory. */
8844 gcc_unreachable ();
8848 /* Memory store we need to look at. */
8849 if (GET_CODE (dest) == MEM && GET_CODE (src) == REG)
8851 long off;
8852 rtx base;
8853 const char *saveop;
8855 if (GET_CODE (XEXP (dest, 0)) == REG)
8857 base = XEXP (dest, 0);
8858 off = 0;
8860 else
8862 gcc_assert (GET_CODE (XEXP (dest, 0)) == PLUS
8863 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT);
8864 base = XEXP (XEXP (dest, 0), 0);
8865 off = INTVAL (XEXP (XEXP (dest, 0), 1));
8868 if (base == hard_frame_pointer_rtx)
8870 saveop = ".savepsp";
8871 off = - off;
8873 else
8875 gcc_assert (base == stack_pointer_rtx);
8876 saveop = ".savesp";
8879 src_regno = REGNO (src);
8880 switch (src_regno)
8882 case BR_REG (0):
8883 gcc_assert (!current_frame_info.reg_save_b0);
8884 if (unwind)
8885 fprintf (asm_out_file, "\t%s rp, %ld\n", saveop, off);
8886 return 1;
8888 case PR_REG (0):
8889 gcc_assert (!current_frame_info.reg_save_pr);
8890 if (unwind)
8891 fprintf (asm_out_file, "\t%s pr, %ld\n", saveop, off);
8892 return 1;
8894 case AR_LC_REGNUM:
8895 gcc_assert (!current_frame_info.reg_save_ar_lc);
8896 if (unwind)
8897 fprintf (asm_out_file, "\t%s ar.lc, %ld\n", saveop, off);
8898 return 1;
8900 case AR_PFS_REGNUM:
8901 gcc_assert (!current_frame_info.reg_save_ar_pfs);
8902 if (unwind)
8903 fprintf (asm_out_file, "\t%s ar.pfs, %ld\n", saveop, off);
8904 return 1;
8906 case AR_UNAT_REGNUM:
8907 gcc_assert (!current_frame_info.reg_save_ar_unat);
8908 if (unwind)
8909 fprintf (asm_out_file, "\t%s ar.unat, %ld\n", saveop, off);
8910 return 1;
8912 case GR_REG (4):
8913 case GR_REG (5):
8914 case GR_REG (6):
8915 case GR_REG (7):
8916 if (unwind)
8917 fprintf (asm_out_file, "\t.save.g 0x%x\n",
8918 1 << (src_regno - GR_REG (4)));
8919 return 1;
8921 case BR_REG (1):
8922 case BR_REG (2):
8923 case BR_REG (3):
8924 case BR_REG (4):
8925 case BR_REG (5):
8926 if (unwind)
8927 fprintf (asm_out_file, "\t.save.b 0x%x\n",
8928 1 << (src_regno - BR_REG (1)));
8929 return 1;
8931 case FR_REG (2):
8932 case FR_REG (3):
8933 case FR_REG (4):
8934 case FR_REG (5):
8935 if (unwind)
8936 fprintf (asm_out_file, "\t.save.f 0x%x\n",
8937 1 << (src_regno - FR_REG (2)));
8938 return 1;
8940 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
8941 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
8942 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
8943 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
8944 if (unwind)
8945 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
8946 1 << (src_regno - FR_REG (12)));
8947 return 1;
8949 default:
8950 return 0;
8954 return 0;
8958 /* This function looks at a single insn and emits any directives
8959 required to unwind this insn. */
8960 void
8961 process_for_unwind_directive (FILE *asm_out_file, rtx insn)
8963 bool unwind = (flag_unwind_tables
8964 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS));
8965 bool frame = dwarf2out_do_frame ();
8967 if (unwind || frame)
8969 rtx pat;
8971 if (NOTE_INSN_BASIC_BLOCK_P (insn))
8973 last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
8975 /* Restore unwind state from immediately before the epilogue. */
8976 if (need_copy_state)
8978 if (unwind)
8980 fprintf (asm_out_file, "\t.body\n");
8981 fprintf (asm_out_file, "\t.copy_state %d\n",
8982 cfun->machine->state_num);
8984 if (IA64_CHANGE_CFA_IN_EPILOGUE && frame)
8985 ia64_dwarf2out_def_steady_cfa (insn);
8986 need_copy_state = false;
8990 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
8991 return;
8993 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
8994 if (pat)
8995 pat = XEXP (pat, 0);
8996 else
8997 pat = PATTERN (insn);
8999 switch (GET_CODE (pat))
9001 case SET:
9002 process_set (asm_out_file, pat, insn, unwind, frame);
9003 break;
9005 case PARALLEL:
9007 int par_index;
9008 int limit = XVECLEN (pat, 0);
9009 for (par_index = 0; par_index < limit; par_index++)
9011 rtx x = XVECEXP (pat, 0, par_index);
9012 if (GET_CODE (x) == SET)
9013 process_set (asm_out_file, x, insn, unwind, frame);
9015 break;
9018 default:
9019 gcc_unreachable ();
9025 enum ia64_builtins
9027 IA64_BUILTIN_BSP,
9028 IA64_BUILTIN_FLUSHRS
9031 void
9032 ia64_init_builtins (void)
9034 tree fpreg_type;
9035 tree float80_type;
9037 /* The __fpreg type. */
9038 fpreg_type = make_node (REAL_TYPE);
9039 TYPE_PRECISION (fpreg_type) = 82;
9040 layout_type (fpreg_type);
9041 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
9043 /* The __float80 type. */
9044 float80_type = make_node (REAL_TYPE);
9045 TYPE_PRECISION (float80_type) = 80;
9046 layout_type (float80_type);
9047 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
9049 /* The __float128 type. */
9050 if (!TARGET_HPUX)
9052 tree float128_type = make_node (REAL_TYPE);
9053 TYPE_PRECISION (float128_type) = 128;
9054 layout_type (float128_type);
9055 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
9057 else
9058 /* Under HPUX, this is a synonym for "long double". */
9059 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
9060 "__float128");
9062 #define def_builtin(name, type, code) \
9063 add_builtin_function ((name), (type), (code), BUILT_IN_MD, \
9064 NULL, NULL_TREE)
9066 def_builtin ("__builtin_ia64_bsp",
9067 build_function_type (ptr_type_node, void_list_node),
9068 IA64_BUILTIN_BSP);
9070 def_builtin ("__builtin_ia64_flushrs",
9071 build_function_type (void_type_node, void_list_node),
9072 IA64_BUILTIN_FLUSHRS);
9074 #undef def_builtin
9076 if (TARGET_HPUX)
9078 if (built_in_decls [BUILT_IN_FINITE])
9079 set_user_assembler_name (built_in_decls [BUILT_IN_FINITE],
9080 "_Isfinite");
9081 if (built_in_decls [BUILT_IN_FINITEF])
9082 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEF],
9083 "_Isfinitef");
9084 if (built_in_decls [BUILT_IN_FINITEL])
9085 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEL],
9086 "_Isfinitef128");
9091 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9092 enum machine_mode mode ATTRIBUTE_UNUSED,
9093 int ignore ATTRIBUTE_UNUSED)
9095 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9096 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
9098 switch (fcode)
9100 case IA64_BUILTIN_BSP:
9101 if (! target || ! register_operand (target, DImode))
9102 target = gen_reg_rtx (DImode);
9103 emit_insn (gen_bsp_value (target));
9104 #ifdef POINTERS_EXTEND_UNSIGNED
9105 target = convert_memory_address (ptr_mode, target);
9106 #endif
9107 return target;
9109 case IA64_BUILTIN_FLUSHRS:
9110 emit_insn (gen_flushrs ());
9111 return const0_rtx;
9113 default:
9114 break;
9117 return NULL_RTX;
9120 /* For the HP-UX IA64 aggregate parameters are passed stored in the
9121 most significant bits of the stack slot. */
9123 enum direction
9124 ia64_hpux_function_arg_padding (enum machine_mode mode, tree type)
9126 /* Exception to normal case for structures/unions/etc. */
9128 if (type && AGGREGATE_TYPE_P (type)
9129 && int_size_in_bytes (type) < UNITS_PER_WORD)
9130 return upward;
9132 /* Fall back to the default. */
9133 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
9136 /* Emit text to declare externally defined variables and functions, because
9137 the Intel assembler does not support undefined externals. */
9139 void
9140 ia64_asm_output_external (FILE *file, tree decl, const char *name)
9142 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
9143 set in order to avoid putting out names that are never really
9144 used. */
9145 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
9147 /* maybe_assemble_visibility will return 1 if the assembler
9148 visibility directive is output. */
9149 int need_visibility = ((*targetm.binds_local_p) (decl)
9150 && maybe_assemble_visibility (decl));
9152 /* GNU as does not need anything here, but the HP linker does
9153 need something for external functions. */
9154 if ((TARGET_HPUX_LD || !TARGET_GNU_AS)
9155 && TREE_CODE (decl) == FUNCTION_DECL)
9156 (*targetm.asm_out.globalize_decl_name) (file, decl);
9157 else if (need_visibility && !TARGET_GNU_AS)
9158 (*targetm.asm_out.globalize_label) (file, name);
9162 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
9163 modes of word_mode and larger. Rename the TFmode libfuncs using the
9164 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
9165 backward compatibility. */
9167 static void
9168 ia64_init_libfuncs (void)
9170 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
9171 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
9172 set_optab_libfunc (smod_optab, SImode, "__modsi3");
9173 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
9175 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
9176 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
9177 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
9178 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
9179 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
9181 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
9182 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
9183 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
9184 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
9185 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
9186 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
9188 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
9189 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
9190 set_conv_libfunc (sfix_optab, TImode, TFmode, "_U_Qfcnvfxt_quad_to_quad");
9191 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
9192 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
9194 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
9195 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
9196 set_conv_libfunc (sfloat_optab, TFmode, TImode, "_U_Qfcnvxf_quad_to_quad");
9197 /* HP-UX 11.23 libc does not have a function for unsigned
9198 SImode-to-TFmode conversion. */
9199 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxuf_dbl_to_quad");
9202 /* Rename all the TFmode libfuncs using the HPUX conventions. */
9204 static void
9205 ia64_hpux_init_libfuncs (void)
9207 ia64_init_libfuncs ();
9209 /* The HP SI millicode division and mod functions expect DI arguments.
9210 By turning them off completely we avoid using both libgcc and the
9211 non-standard millicode routines and use the HP DI millicode routines
9212 instead. */
9214 set_optab_libfunc (sdiv_optab, SImode, 0);
9215 set_optab_libfunc (udiv_optab, SImode, 0);
9216 set_optab_libfunc (smod_optab, SImode, 0);
9217 set_optab_libfunc (umod_optab, SImode, 0);
9219 set_optab_libfunc (sdiv_optab, DImode, "__milli_divI");
9220 set_optab_libfunc (udiv_optab, DImode, "__milli_divU");
9221 set_optab_libfunc (smod_optab, DImode, "__milli_remI");
9222 set_optab_libfunc (umod_optab, DImode, "__milli_remU");
9224 /* HP-UX libc has TF min/max/abs routines in it. */
9225 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
9226 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
9227 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
9229 /* ia64_expand_compare uses this. */
9230 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
9232 /* These should never be used. */
9233 set_optab_libfunc (eq_optab, TFmode, 0);
9234 set_optab_libfunc (ne_optab, TFmode, 0);
9235 set_optab_libfunc (gt_optab, TFmode, 0);
9236 set_optab_libfunc (ge_optab, TFmode, 0);
9237 set_optab_libfunc (lt_optab, TFmode, 0);
9238 set_optab_libfunc (le_optab, TFmode, 0);
9241 /* Rename the division and modulus functions in VMS. */
9243 static void
9244 ia64_vms_init_libfuncs (void)
9246 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
9247 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
9248 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
9249 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
9250 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
9251 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
9252 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
9253 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
9256 /* Rename the TFmode libfuncs available from soft-fp in glibc using
9257 the HPUX conventions. */
9259 static void
9260 ia64_sysv4_init_libfuncs (void)
9262 ia64_init_libfuncs ();
9264 /* These functions are not part of the HPUX TFmode interface. We
9265 use them instead of _U_Qfcmp, which doesn't work the way we
9266 expect. */
9267 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
9268 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
9269 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
9270 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
9271 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
9272 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
9274 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
9275 glibc doesn't have them. */
9278 /* For HPUX, it is illegal to have relocations in shared segments. */
9280 static int
9281 ia64_hpux_reloc_rw_mask (void)
9283 return 3;
9286 /* For others, relax this so that relocations to local data goes in
9287 read-only segments, but we still cannot allow global relocations
9288 in read-only segments. */
9290 static int
9291 ia64_reloc_rw_mask (void)
9293 return flag_pic ? 3 : 2;
9296 /* Return the section to use for X. The only special thing we do here
9297 is to honor small data. */
9299 static section *
9300 ia64_select_rtx_section (enum machine_mode mode, rtx x,
9301 unsigned HOST_WIDE_INT align)
9303 if (GET_MODE_SIZE (mode) > 0
9304 && GET_MODE_SIZE (mode) <= ia64_section_threshold
9305 && !TARGET_NO_SDATA)
9306 return sdata_section;
9307 else
9308 return default_elf_select_rtx_section (mode, x, align);
9311 static unsigned int
9312 ia64_section_type_flags (tree decl, const char *name, int reloc)
9314 unsigned int flags = 0;
9316 if (strcmp (name, ".sdata") == 0
9317 || strncmp (name, ".sdata.", 7) == 0
9318 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9319 || strncmp (name, ".sdata2.", 8) == 0
9320 || strncmp (name, ".gnu.linkonce.s2.", 17) == 0
9321 || strcmp (name, ".sbss") == 0
9322 || strncmp (name, ".sbss.", 6) == 0
9323 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9324 flags = SECTION_SMALL;
9326 flags |= default_section_type_flags (decl, name, reloc);
9327 return flags;
9330 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
9331 structure type and that the address of that type should be passed
9332 in out0, rather than in r8. */
9334 static bool
9335 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
9337 tree ret_type = TREE_TYPE (fntype);
9339 /* The Itanium C++ ABI requires that out0, rather than r8, be used
9340 as the structure return address parameter, if the return value
9341 type has a non-trivial copy constructor or destructor. It is not
9342 clear if this same convention should be used for other
9343 programming languages. Until G++ 3.4, we incorrectly used r8 for
9344 these return values. */
9345 return (abi_version_at_least (2)
9346 && ret_type
9347 && TYPE_MODE (ret_type) == BLKmode
9348 && TREE_ADDRESSABLE (ret_type)
9349 && strcmp (lang_hooks.name, "GNU C++") == 0);
9352 /* Output the assembler code for a thunk function. THUNK_DECL is the
9353 declaration for the thunk function itself, FUNCTION is the decl for
9354 the target function. DELTA is an immediate constant offset to be
9355 added to THIS. If VCALL_OFFSET is nonzero, the word at
9356 *(*this + vcall_offset) should be added to THIS. */
9358 static void
9359 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
9360 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9361 tree function)
9363 rtx this, insn, funexp;
9364 unsigned int this_parmno;
9365 unsigned int this_regno;
9366 rtx delta_rtx;
9368 reload_completed = 1;
9369 epilogue_completed = 1;
9370 no_new_pseudos = 1;
9372 /* Set things up as ia64_expand_prologue might. */
9373 last_scratch_gr_reg = 15;
9375 memset (&current_frame_info, 0, sizeof (current_frame_info));
9376 current_frame_info.spill_cfa_off = -16;
9377 current_frame_info.n_input_regs = 1;
9378 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
9380 /* Mark the end of the (empty) prologue. */
9381 emit_note (NOTE_INSN_PROLOGUE_END);
9383 /* Figure out whether "this" will be the first parameter (the
9384 typical case) or the second parameter (as happens when the
9385 virtual function returns certain class objects). */
9386 this_parmno
9387 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
9388 ? 1 : 0);
9389 this_regno = IN_REG (this_parmno);
9390 if (!TARGET_REG_NAMES)
9391 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
9393 this = gen_rtx_REG (Pmode, this_regno);
9395 /* Apply the constant offset, if required. */
9396 delta_rtx = GEN_INT (delta);
9397 if (TARGET_ILP32)
9399 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
9400 REG_POINTER (tmp) = 1;
9401 if (delta && satisfies_constraint_I (delta_rtx))
9403 emit_insn (gen_ptr_extend_plus_imm (this, tmp, delta_rtx));
9404 delta = 0;
9406 else
9407 emit_insn (gen_ptr_extend (this, tmp));
9409 if (delta)
9411 if (!satisfies_constraint_I (delta_rtx))
9413 rtx tmp = gen_rtx_REG (Pmode, 2);
9414 emit_move_insn (tmp, delta_rtx);
9415 delta_rtx = tmp;
9417 emit_insn (gen_adddi3 (this, this, delta_rtx));
9420 /* Apply the offset from the vtable, if required. */
9421 if (vcall_offset)
9423 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
9424 rtx tmp = gen_rtx_REG (Pmode, 2);
9426 if (TARGET_ILP32)
9428 rtx t = gen_rtx_REG (ptr_mode, 2);
9429 REG_POINTER (t) = 1;
9430 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this));
9431 if (satisfies_constraint_I (vcall_offset_rtx))
9433 emit_insn (gen_ptr_extend_plus_imm (tmp, t, vcall_offset_rtx));
9434 vcall_offset = 0;
9436 else
9437 emit_insn (gen_ptr_extend (tmp, t));
9439 else
9440 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
9442 if (vcall_offset)
9444 if (!satisfies_constraint_J (vcall_offset_rtx))
9446 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
9447 emit_move_insn (tmp2, vcall_offset_rtx);
9448 vcall_offset_rtx = tmp2;
9450 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
9453 if (TARGET_ILP32)
9454 emit_insn (gen_zero_extendsidi2 (tmp, gen_rtx_MEM (ptr_mode, tmp)));
9455 else
9456 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
9458 emit_insn (gen_adddi3 (this, this, tmp));
9461 /* Generate a tail call to the target function. */
9462 if (! TREE_USED (function))
9464 assemble_external (function);
9465 TREE_USED (function) = 1;
9467 funexp = XEXP (DECL_RTL (function), 0);
9468 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
9469 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
9470 insn = get_last_insn ();
9471 SIBLING_CALL_P (insn) = 1;
9473 /* Code generation for calls relies on splitting. */
9474 reload_completed = 1;
9475 epilogue_completed = 1;
9476 try_split (PATTERN (insn), insn, 0);
9478 emit_barrier ();
9480 /* Run just enough of rest_of_compilation to get the insns emitted.
9481 There's not really enough bulk here to make other passes such as
9482 instruction scheduling worth while. Note that use_thunk calls
9483 assemble_start_function and assemble_end_function. */
9485 insn_locators_alloc ();
9486 emit_all_insn_group_barriers (NULL);
9487 insn = get_insns ();
9488 shorten_branches (insn);
9489 final_start_function (insn, file, 1);
9490 final (insn, file, 1);
9491 final_end_function ();
9493 reload_completed = 0;
9494 epilogue_completed = 0;
9495 no_new_pseudos = 0;
9498 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9500 static rtx
9501 ia64_struct_value_rtx (tree fntype,
9502 int incoming ATTRIBUTE_UNUSED)
9504 if (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype))
9505 return NULL_RTX;
9506 return gen_rtx_REG (Pmode, GR_REG (8));
9509 static bool
9510 ia64_scalar_mode_supported_p (enum machine_mode mode)
9512 switch (mode)
9514 case QImode:
9515 case HImode:
9516 case SImode:
9517 case DImode:
9518 case TImode:
9519 return true;
9521 case SFmode:
9522 case DFmode:
9523 case XFmode:
9524 case RFmode:
9525 return true;
9527 case TFmode:
9528 return TARGET_HPUX;
9530 default:
9531 return false;
9535 static bool
9536 ia64_vector_mode_supported_p (enum machine_mode mode)
9538 switch (mode)
9540 case V8QImode:
9541 case V4HImode:
9542 case V2SImode:
9543 return true;
9545 case V2SFmode:
9546 return true;
9548 default:
9549 return false;
9553 /* Implement the FUNCTION_PROFILER macro. */
9555 void
9556 ia64_output_function_profiler (FILE *file, int labelno)
9558 bool indirect_call;
9560 /* If the function needs a static chain and the static chain
9561 register is r15, we use an indirect call so as to bypass
9562 the PLT stub in case the executable is dynamically linked,
9563 because the stub clobbers r15 as per 5.3.6 of the psABI.
9564 We don't need to do that in non canonical PIC mode. */
9566 if (cfun->static_chain_decl && !TARGET_NO_PIC && !TARGET_AUTO_PIC)
9568 gcc_assert (STATIC_CHAIN_REGNUM == 15);
9569 indirect_call = true;
9571 else
9572 indirect_call = false;
9574 if (TARGET_GNU_AS)
9575 fputs ("\t.prologue 4, r40\n", file);
9576 else
9577 fputs ("\t.prologue\n\t.save ar.pfs, r40\n", file);
9578 fputs ("\talloc out0 = ar.pfs, 8, 0, 4, 0\n", file);
9580 if (NO_PROFILE_COUNTERS)
9581 fputs ("\tmov out3 = r0\n", file);
9582 else
9584 char buf[20];
9585 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
9587 if (TARGET_AUTO_PIC)
9588 fputs ("\tmovl out3 = @gprel(", file);
9589 else
9590 fputs ("\taddl out3 = @ltoff(", file);
9591 assemble_name (file, buf);
9592 if (TARGET_AUTO_PIC)
9593 fputs (")\n", file);
9594 else
9595 fputs ("), r1\n", file);
9598 if (indirect_call)
9599 fputs ("\taddl r14 = @ltoff(@fptr(_mcount)), r1\n", file);
9600 fputs ("\t;;\n", file);
9602 fputs ("\t.save rp, r42\n", file);
9603 fputs ("\tmov out2 = b0\n", file);
9604 if (indirect_call)
9605 fputs ("\tld8 r14 = [r14]\n\t;;\n", file);
9606 fputs ("\t.body\n", file);
9607 fputs ("\tmov out1 = r1\n", file);
9608 if (indirect_call)
9610 fputs ("\tld8 r16 = [r14], 8\n\t;;\n", file);
9611 fputs ("\tmov b6 = r16\n", file);
9612 fputs ("\tld8 r1 = [r14]\n", file);
9613 fputs ("\tbr.call.sptk.many b0 = b6\n\t;;\n", file);
9615 else
9616 fputs ("\tbr.call.sptk.many b0 = _mcount\n\t;;\n", file);
9619 static GTY(()) rtx mcount_func_rtx;
9620 static rtx
9621 gen_mcount_func_rtx (void)
9623 if (!mcount_func_rtx)
9624 mcount_func_rtx = init_one_libfunc ("_mcount");
9625 return mcount_func_rtx;
9628 void
9629 ia64_profile_hook (int labelno)
9631 rtx label, ip;
9633 if (NO_PROFILE_COUNTERS)
9634 label = const0_rtx;
9635 else
9637 char buf[30];
9638 const char *label_name;
9639 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
9640 label_name = (*targetm.strip_name_encoding) (ggc_strdup (buf));
9641 label = gen_rtx_SYMBOL_REF (Pmode, label_name);
9642 SYMBOL_REF_FLAGS (label) = SYMBOL_FLAG_LOCAL;
9644 ip = gen_reg_rtx (Pmode);
9645 emit_insn (gen_ip_value (ip));
9646 emit_library_call (gen_mcount_func_rtx (), LCT_NORMAL,
9647 VOIDmode, 3,
9648 gen_rtx_REG (Pmode, BR_REG (0)), Pmode,
9649 ip, Pmode,
9650 label, Pmode);
9653 /* Return the mangling of TYPE if it is an extended fundamental type. */
9655 static const char *
9656 ia64_mangle_fundamental_type (tree type)
9658 /* On HP-UX, "long double" is mangled as "e" so __float128 is
9659 mangled as "e". */
9660 if (!TARGET_HPUX && TYPE_MODE (type) == TFmode)
9661 return "g";
9662 /* On HP-UX, "e" is not available as a mangling of __float80 so use
9663 an extended mangling. Elsewhere, "e" is available since long
9664 double is 80 bits. */
9665 if (TYPE_MODE (type) == XFmode)
9666 return TARGET_HPUX ? "u9__float80" : "e";
9667 if (TYPE_MODE (type) == RFmode)
9668 return "u7__fpreg";
9669 return NULL;
9672 /* Return the diagnostic message string if conversion from FROMTYPE to
9673 TOTYPE is not allowed, NULL otherwise. */
9674 static const char *
9675 ia64_invalid_conversion (tree fromtype, tree totype)
9677 /* Reject nontrivial conversion to or from __fpreg. */
9678 if (TYPE_MODE (fromtype) == RFmode
9679 && TYPE_MODE (totype) != RFmode
9680 && TYPE_MODE (totype) != VOIDmode)
9681 return N_("invalid conversion from %<__fpreg%>");
9682 if (TYPE_MODE (totype) == RFmode
9683 && TYPE_MODE (fromtype) != RFmode)
9684 return N_("invalid conversion to %<__fpreg%>");
9685 return NULL;
9688 /* Return the diagnostic message string if the unary operation OP is
9689 not permitted on TYPE, NULL otherwise. */
9690 static const char *
9691 ia64_invalid_unary_op (int op, tree type)
9693 /* Reject operations on __fpreg other than unary + or &. */
9694 if (TYPE_MODE (type) == RFmode
9695 && op != CONVERT_EXPR
9696 && op != ADDR_EXPR)
9697 return N_("invalid operation on %<__fpreg%>");
9698 return NULL;
9701 /* Return the diagnostic message string if the binary operation OP is
9702 not permitted on TYPE1 and TYPE2, NULL otherwise. */
9703 static const char *
9704 ia64_invalid_binary_op (int op ATTRIBUTE_UNUSED, tree type1, tree type2)
9706 /* Reject operations on __fpreg. */
9707 if (TYPE_MODE (type1) == RFmode || TYPE_MODE (type2) == RFmode)
9708 return N_("invalid operation on %<__fpreg%>");
9709 return NULL;
9712 /* Implement overriding of the optimization options. */
9713 void
9714 ia64_optimization_options (int level ATTRIBUTE_UNUSED,
9715 int size ATTRIBUTE_UNUSED)
9717 /* Let the scheduler form additional regions. */
9718 set_param_value ("max-sched-extend-regions-iters", 2);
9720 /* Set the default values for cache-related parameters. */
9721 set_param_value ("simultaneous-prefetches", 6);
9722 set_param_value ("l1-cache-line-size", 32);
9726 /* HP-UX version_id attribute.
9727 For object foo, if the version_id is set to 1234 put out an alias
9728 of '.alias foo "foo{1234}" We can't use "foo{1234}" in anything
9729 other than an alias statement because it is an illegal symbol name. */
9731 static tree
9732 ia64_handle_version_id_attribute (tree *node ATTRIBUTE_UNUSED,
9733 tree name ATTRIBUTE_UNUSED,
9734 tree args,
9735 int flags ATTRIBUTE_UNUSED,
9736 bool *no_add_attrs)
9738 tree arg = TREE_VALUE (args);
9740 if (TREE_CODE (arg) != STRING_CST)
9742 error("version attribute is not a string");
9743 *no_add_attrs = true;
9744 return NULL_TREE;
9746 return NULL_TREE;
9749 #include "gt-ia64.h"