* config/ia64/ia64.c (ia64_print_operand): Fix compare strings.
[official-gcc.git] / gcc / config / ia64 / ia64.c
blobead7f6652655788601e773896d8849111805f6e1
1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
3 Free Software Foundation, Inc.
4 Contributed by James E. Wilson <wilson@cygnus.com> and
5 David Mosberger <davidm@hpl.hp.com>.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
12 any later version.
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to
21 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
22 Boston, MA 02110-1301, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "recog.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "except.h"
42 #include "function.h"
43 #include "ggc.h"
44 #include "basic-block.h"
45 #include "toplev.h"
46 #include "sched-int.h"
47 #include "timevar.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "tm_p.h"
51 #include "hashtab.h"
52 #include "langhooks.h"
53 #include "cfglayout.h"
54 #include "tree-gimple.h"
55 #include "intl.h"
56 #include "debug.h"
57 #include "params.h"
59 /* This is used for communication between ASM_OUTPUT_LABEL and
60 ASM_OUTPUT_LABELREF. */
61 int ia64_asm_output_label = 0;
63 /* Define the information needed to generate branch and scc insns. This is
64 stored from the compare operation. */
65 struct rtx_def * ia64_compare_op0;
66 struct rtx_def * ia64_compare_op1;
68 /* Register names for ia64_expand_prologue. */
69 static const char * const ia64_reg_numbers[96] =
70 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
71 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
72 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
73 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
74 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
75 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
76 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
77 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
78 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
79 "r104","r105","r106","r107","r108","r109","r110","r111",
80 "r112","r113","r114","r115","r116","r117","r118","r119",
81 "r120","r121","r122","r123","r124","r125","r126","r127"};
83 /* ??? These strings could be shared with REGISTER_NAMES. */
84 static const char * const ia64_input_reg_names[8] =
85 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
87 /* ??? These strings could be shared with REGISTER_NAMES. */
88 static const char * const ia64_local_reg_names[80] =
89 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
90 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
91 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
92 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
93 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
94 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
95 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
96 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
97 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
98 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
100 /* ??? These strings could be shared with REGISTER_NAMES. */
101 static const char * const ia64_output_reg_names[8] =
102 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
104 /* Which cpu are we scheduling for. */
105 enum processor_type ia64_tune = PROCESSOR_ITANIUM2;
107 /* Determines whether we run our final scheduling pass or not. We always
108 avoid the normal second scheduling pass. */
109 static int ia64_flag_schedule_insns2;
111 /* Determines whether we run variable tracking in machine dependent
112 reorganization. */
113 static int ia64_flag_var_tracking;
115 /* Variables which are this size or smaller are put in the sdata/sbss
116 sections. */
118 unsigned int ia64_section_threshold;
120 /* The following variable is used by the DFA insn scheduler. The value is
121 TRUE if we do insn bundling instead of insn scheduling. */
122 int bundling_p = 0;
124 /* Structure to be filled in by ia64_compute_frame_size with register
125 save masks and offsets for the current function. */
127 struct ia64_frame_info
129 HOST_WIDE_INT total_size; /* size of the stack frame, not including
130 the caller's scratch area. */
131 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
132 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
133 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
134 HARD_REG_SET mask; /* mask of saved registers. */
135 unsigned int gr_used_mask; /* mask of registers in use as gr spill
136 registers or long-term scratches. */
137 int n_spilled; /* number of spilled registers. */
138 int reg_fp; /* register for fp. */
139 int reg_save_b0; /* save register for b0. */
140 int reg_save_pr; /* save register for prs. */
141 int reg_save_ar_pfs; /* save register for ar.pfs. */
142 int reg_save_ar_unat; /* save register for ar.unat. */
143 int reg_save_ar_lc; /* save register for ar.lc. */
144 int reg_save_gp; /* save register for gp. */
145 int n_input_regs; /* number of input registers used. */
146 int n_local_regs; /* number of local registers used. */
147 int n_output_regs; /* number of output registers used. */
148 int n_rotate_regs; /* number of rotating registers used. */
150 char need_regstk; /* true if a .regstk directive needed. */
151 char initialized; /* true if the data is finalized. */
154 /* Current frame information calculated by ia64_compute_frame_size. */
155 static struct ia64_frame_info current_frame_info;
157 static int ia64_first_cycle_multipass_dfa_lookahead (void);
158 static void ia64_dependencies_evaluation_hook (rtx, rtx);
159 static void ia64_init_dfa_pre_cycle_insn (void);
160 static rtx ia64_dfa_pre_cycle_insn (void);
161 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
162 static bool ia64_first_cycle_multipass_dfa_lookahead_guard_spec (rtx);
163 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
164 static void ia64_h_i_d_extended (void);
165 static int ia64_mode_to_int (enum machine_mode);
166 static void ia64_set_sched_flags (spec_info_t);
167 static int ia64_speculate_insn (rtx, ds_t, rtx *);
168 static rtx ia64_gen_spec_insn (rtx, ds_t, int, bool, bool);
169 static bool ia64_needs_block_p (rtx);
170 static rtx ia64_gen_check (rtx, rtx, bool);
171 static int ia64_spec_check_p (rtx);
172 static int ia64_spec_check_src_p (rtx);
173 static rtx gen_tls_get_addr (void);
174 static rtx gen_thread_pointer (void);
175 static int find_gr_spill (int);
176 static int next_scratch_gr_reg (void);
177 static void mark_reg_gr_used_mask (rtx, void *);
178 static void ia64_compute_frame_size (HOST_WIDE_INT);
179 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
180 static void finish_spill_pointers (void);
181 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
182 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
183 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
184 static rtx gen_movdi_x (rtx, rtx, rtx);
185 static rtx gen_fr_spill_x (rtx, rtx, rtx);
186 static rtx gen_fr_restore_x (rtx, rtx, rtx);
188 static enum machine_mode hfa_element_mode (tree, bool);
189 static void ia64_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
190 tree, int *, int);
191 static int ia64_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
192 tree, bool);
193 static bool ia64_function_ok_for_sibcall (tree, tree);
194 static bool ia64_return_in_memory (tree, tree);
195 static bool ia64_rtx_costs (rtx, int, int, int *);
196 static void fix_range (const char *);
197 static bool ia64_handle_option (size_t, const char *, int);
198 static struct machine_function * ia64_init_machine_status (void);
199 static void emit_insn_group_barriers (FILE *);
200 static void emit_all_insn_group_barriers (FILE *);
201 static void final_emit_insn_group_barriers (FILE *);
202 static void emit_predicate_relation_info (void);
203 static void ia64_reorg (void);
204 static bool ia64_in_small_data_p (tree);
205 static void process_epilogue (FILE *, rtx, bool, bool);
206 static int process_set (FILE *, rtx, rtx, bool, bool);
208 static bool ia64_assemble_integer (rtx, unsigned int, int);
209 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
210 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
211 static void ia64_output_function_end_prologue (FILE *);
213 static int ia64_issue_rate (void);
214 static int ia64_adjust_cost (rtx, rtx, rtx, int);
215 static void ia64_sched_init (FILE *, int, int);
216 static void ia64_sched_init_global (FILE *, int, int);
217 static void ia64_sched_finish_global (FILE *, int);
218 static void ia64_sched_finish (FILE *, int);
219 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
220 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
221 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
222 static int ia64_variable_issue (FILE *, int, rtx, int);
224 static struct bundle_state *get_free_bundle_state (void);
225 static void free_bundle_state (struct bundle_state *);
226 static void initiate_bundle_states (void);
227 static void finish_bundle_states (void);
228 static unsigned bundle_state_hash (const void *);
229 static int bundle_state_eq_p (const void *, const void *);
230 static int insert_bundle_state (struct bundle_state *);
231 static void initiate_bundle_state_table (void);
232 static void finish_bundle_state_table (void);
233 static int try_issue_nops (struct bundle_state *, int);
234 static int try_issue_insn (struct bundle_state *, rtx);
235 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
236 static int get_max_pos (state_t);
237 static int get_template (state_t, int);
239 static rtx get_next_important_insn (rtx, rtx);
240 static void bundling (FILE *, int, rtx, rtx);
242 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
243 HOST_WIDE_INT, tree);
244 static void ia64_file_start (void);
245 static void ia64_globalize_decl_name (FILE *, tree);
247 static section *ia64_select_rtx_section (enum machine_mode, rtx,
248 unsigned HOST_WIDE_INT);
249 static void ia64_output_dwarf_dtprel (FILE *, int, rtx)
250 ATTRIBUTE_UNUSED;
251 static section *ia64_rwreloc_select_section (tree, int, unsigned HOST_WIDE_INT)
252 ATTRIBUTE_UNUSED;
253 static void ia64_rwreloc_unique_section (tree, int)
254 ATTRIBUTE_UNUSED;
255 static section *ia64_rwreloc_select_rtx_section (enum machine_mode, rtx,
256 unsigned HOST_WIDE_INT)
257 ATTRIBUTE_UNUSED;
258 static unsigned int ia64_section_type_flags (tree, const char *, int);
259 static void ia64_init_libfuncs (void)
260 ATTRIBUTE_UNUSED;
261 static void ia64_hpux_init_libfuncs (void)
262 ATTRIBUTE_UNUSED;
263 static void ia64_sysv4_init_libfuncs (void)
264 ATTRIBUTE_UNUSED;
265 static void ia64_vms_init_libfuncs (void)
266 ATTRIBUTE_UNUSED;
268 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
269 static tree ia64_handle_version_id_attribute (tree *, tree, tree, int, bool *);
270 static void ia64_encode_section_info (tree, rtx, int);
271 static rtx ia64_struct_value_rtx (tree, int);
272 static tree ia64_gimplify_va_arg (tree, tree, tree *, tree *);
273 static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
274 static bool ia64_vector_mode_supported_p (enum machine_mode mode);
275 static bool ia64_cannot_force_const_mem (rtx);
276 static const char *ia64_mangle_fundamental_type (tree);
277 static const char *ia64_invalid_conversion (tree, tree);
278 static const char *ia64_invalid_unary_op (int, tree);
279 static const char *ia64_invalid_binary_op (int, tree, tree);
281 /* Table of valid machine attributes. */
282 static const struct attribute_spec ia64_attribute_table[] =
284 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
285 { "syscall_linkage", 0, 0, false, true, true, NULL },
286 { "model", 1, 1, true, false, false, ia64_handle_model_attribute },
287 { "version_id", 1, 1, true, false, false,
288 ia64_handle_version_id_attribute },
289 { NULL, 0, 0, false, false, false, NULL }
292 /* Initialize the GCC target structure. */
293 #undef TARGET_ATTRIBUTE_TABLE
294 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
296 #undef TARGET_INIT_BUILTINS
297 #define TARGET_INIT_BUILTINS ia64_init_builtins
299 #undef TARGET_EXPAND_BUILTIN
300 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
302 #undef TARGET_ASM_BYTE_OP
303 #define TARGET_ASM_BYTE_OP "\tdata1\t"
304 #undef TARGET_ASM_ALIGNED_HI_OP
305 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
306 #undef TARGET_ASM_ALIGNED_SI_OP
307 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
308 #undef TARGET_ASM_ALIGNED_DI_OP
309 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
310 #undef TARGET_ASM_UNALIGNED_HI_OP
311 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
312 #undef TARGET_ASM_UNALIGNED_SI_OP
313 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
314 #undef TARGET_ASM_UNALIGNED_DI_OP
315 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
316 #undef TARGET_ASM_INTEGER
317 #define TARGET_ASM_INTEGER ia64_assemble_integer
319 #undef TARGET_ASM_FUNCTION_PROLOGUE
320 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
321 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
322 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
323 #undef TARGET_ASM_FUNCTION_EPILOGUE
324 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
326 #undef TARGET_IN_SMALL_DATA_P
327 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
329 #undef TARGET_SCHED_ADJUST_COST
330 #define TARGET_SCHED_ADJUST_COST ia64_adjust_cost
331 #undef TARGET_SCHED_ISSUE_RATE
332 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
333 #undef TARGET_SCHED_VARIABLE_ISSUE
334 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
335 #undef TARGET_SCHED_INIT
336 #define TARGET_SCHED_INIT ia64_sched_init
337 #undef TARGET_SCHED_FINISH
338 #define TARGET_SCHED_FINISH ia64_sched_finish
339 #undef TARGET_SCHED_INIT_GLOBAL
340 #define TARGET_SCHED_INIT_GLOBAL ia64_sched_init_global
341 #undef TARGET_SCHED_FINISH_GLOBAL
342 #define TARGET_SCHED_FINISH_GLOBAL ia64_sched_finish_global
343 #undef TARGET_SCHED_REORDER
344 #define TARGET_SCHED_REORDER ia64_sched_reorder
345 #undef TARGET_SCHED_REORDER2
346 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
348 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
349 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
351 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
352 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
354 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
355 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
356 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
357 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
359 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
360 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
361 ia64_first_cycle_multipass_dfa_lookahead_guard
363 #undef TARGET_SCHED_DFA_NEW_CYCLE
364 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
366 #undef TARGET_SCHED_H_I_D_EXTENDED
367 #define TARGET_SCHED_H_I_D_EXTENDED ia64_h_i_d_extended
369 #undef TARGET_SCHED_SET_SCHED_FLAGS
370 #define TARGET_SCHED_SET_SCHED_FLAGS ia64_set_sched_flags
372 #undef TARGET_SCHED_SPECULATE_INSN
373 #define TARGET_SCHED_SPECULATE_INSN ia64_speculate_insn
375 #undef TARGET_SCHED_NEEDS_BLOCK_P
376 #define TARGET_SCHED_NEEDS_BLOCK_P ia64_needs_block_p
378 #undef TARGET_SCHED_GEN_CHECK
379 #define TARGET_SCHED_GEN_CHECK ia64_gen_check
381 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC
382 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC\
383 ia64_first_cycle_multipass_dfa_lookahead_guard_spec
385 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
386 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
387 #undef TARGET_ARG_PARTIAL_BYTES
388 #define TARGET_ARG_PARTIAL_BYTES ia64_arg_partial_bytes
390 #undef TARGET_ASM_OUTPUT_MI_THUNK
391 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
392 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
393 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
395 #undef TARGET_ASM_FILE_START
396 #define TARGET_ASM_FILE_START ia64_file_start
398 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
399 #define TARGET_ASM_GLOBALIZE_DECL_NAME ia64_globalize_decl_name
401 #undef TARGET_RTX_COSTS
402 #define TARGET_RTX_COSTS ia64_rtx_costs
403 #undef TARGET_ADDRESS_COST
404 #define TARGET_ADDRESS_COST hook_int_rtx_0
406 #undef TARGET_MACHINE_DEPENDENT_REORG
407 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
409 #undef TARGET_ENCODE_SECTION_INFO
410 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
412 #undef TARGET_SECTION_TYPE_FLAGS
413 #define TARGET_SECTION_TYPE_FLAGS ia64_section_type_flags
415 #ifdef HAVE_AS_TLS
416 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
417 #define TARGET_ASM_OUTPUT_DWARF_DTPREL ia64_output_dwarf_dtprel
418 #endif
420 /* ??? ABI doesn't allow us to define this. */
421 #if 0
422 #undef TARGET_PROMOTE_FUNCTION_ARGS
423 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
424 #endif
426 /* ??? ABI doesn't allow us to define this. */
427 #if 0
428 #undef TARGET_PROMOTE_FUNCTION_RETURN
429 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
430 #endif
432 /* ??? Investigate. */
433 #if 0
434 #undef TARGET_PROMOTE_PROTOTYPES
435 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
436 #endif
438 #undef TARGET_STRUCT_VALUE_RTX
439 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
440 #undef TARGET_RETURN_IN_MEMORY
441 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
442 #undef TARGET_SETUP_INCOMING_VARARGS
443 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
444 #undef TARGET_STRICT_ARGUMENT_NAMING
445 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
446 #undef TARGET_MUST_PASS_IN_STACK
447 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
449 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
450 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
452 #undef TARGET_UNWIND_EMIT
453 #define TARGET_UNWIND_EMIT process_for_unwind_directive
455 #undef TARGET_SCALAR_MODE_SUPPORTED_P
456 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
457 #undef TARGET_VECTOR_MODE_SUPPORTED_P
458 #define TARGET_VECTOR_MODE_SUPPORTED_P ia64_vector_mode_supported_p
460 /* ia64 architecture manual 4.4.7: ... reads, writes, and flushes may occur
461 in an order different from the specified program order. */
462 #undef TARGET_RELAXED_ORDERING
463 #define TARGET_RELAXED_ORDERING true
465 #undef TARGET_DEFAULT_TARGET_FLAGS
466 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
467 #undef TARGET_HANDLE_OPTION
468 #define TARGET_HANDLE_OPTION ia64_handle_option
470 #undef TARGET_CANNOT_FORCE_CONST_MEM
471 #define TARGET_CANNOT_FORCE_CONST_MEM ia64_cannot_force_const_mem
473 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
474 #define TARGET_MANGLE_FUNDAMENTAL_TYPE ia64_mangle_fundamental_type
476 #undef TARGET_INVALID_CONVERSION
477 #define TARGET_INVALID_CONVERSION ia64_invalid_conversion
478 #undef TARGET_INVALID_UNARY_OP
479 #define TARGET_INVALID_UNARY_OP ia64_invalid_unary_op
480 #undef TARGET_INVALID_BINARY_OP
481 #define TARGET_INVALID_BINARY_OP ia64_invalid_binary_op
483 struct gcc_target targetm = TARGET_INITIALIZER;
485 typedef enum
487 ADDR_AREA_NORMAL, /* normal address area */
488 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
490 ia64_addr_area;
492 static GTY(()) tree small_ident1;
493 static GTY(()) tree small_ident2;
495 static void
496 init_idents (void)
498 if (small_ident1 == 0)
500 small_ident1 = get_identifier ("small");
501 small_ident2 = get_identifier ("__small__");
505 /* Retrieve the address area that has been chosen for the given decl. */
507 static ia64_addr_area
508 ia64_get_addr_area (tree decl)
510 tree model_attr;
512 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
513 if (model_attr)
515 tree id;
517 init_idents ();
518 id = TREE_VALUE (TREE_VALUE (model_attr));
519 if (id == small_ident1 || id == small_ident2)
520 return ADDR_AREA_SMALL;
522 return ADDR_AREA_NORMAL;
525 static tree
526 ia64_handle_model_attribute (tree *node, tree name, tree args,
527 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
529 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
530 ia64_addr_area area;
531 tree arg, decl = *node;
533 init_idents ();
534 arg = TREE_VALUE (args);
535 if (arg == small_ident1 || arg == small_ident2)
537 addr_area = ADDR_AREA_SMALL;
539 else
541 warning (OPT_Wattributes, "invalid argument of %qs attribute",
542 IDENTIFIER_POINTER (name));
543 *no_add_attrs = true;
546 switch (TREE_CODE (decl))
548 case VAR_DECL:
549 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
550 == FUNCTION_DECL)
551 && !TREE_STATIC (decl))
553 error ("%Jan address area attribute cannot be specified for "
554 "local variables", decl);
555 *no_add_attrs = true;
557 area = ia64_get_addr_area (decl);
558 if (area != ADDR_AREA_NORMAL && addr_area != area)
560 error ("address area of %q+D conflicts with previous "
561 "declaration", decl);
562 *no_add_attrs = true;
564 break;
566 case FUNCTION_DECL:
567 error ("%Jaddress area attribute cannot be specified for functions",
568 decl);
569 *no_add_attrs = true;
570 break;
572 default:
573 warning (OPT_Wattributes, "%qs attribute ignored",
574 IDENTIFIER_POINTER (name));
575 *no_add_attrs = true;
576 break;
579 return NULL_TREE;
582 static void
583 ia64_encode_addr_area (tree decl, rtx symbol)
585 int flags;
587 flags = SYMBOL_REF_FLAGS (symbol);
588 switch (ia64_get_addr_area (decl))
590 case ADDR_AREA_NORMAL: break;
591 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
592 default: gcc_unreachable ();
594 SYMBOL_REF_FLAGS (symbol) = flags;
597 static void
598 ia64_encode_section_info (tree decl, rtx rtl, int first)
600 default_encode_section_info (decl, rtl, first);
602 /* Careful not to prod global register variables. */
603 if (TREE_CODE (decl) == VAR_DECL
604 && GET_CODE (DECL_RTL (decl)) == MEM
605 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
606 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
607 ia64_encode_addr_area (decl, XEXP (rtl, 0));
610 /* Implement CONST_OK_FOR_LETTER_P. */
612 bool
613 ia64_const_ok_for_letter_p (HOST_WIDE_INT value, char c)
615 switch (c)
617 case 'I':
618 return CONST_OK_FOR_I (value);
619 case 'J':
620 return CONST_OK_FOR_J (value);
621 case 'K':
622 return CONST_OK_FOR_K (value);
623 case 'L':
624 return CONST_OK_FOR_L (value);
625 case 'M':
626 return CONST_OK_FOR_M (value);
627 case 'N':
628 return CONST_OK_FOR_N (value);
629 case 'O':
630 return CONST_OK_FOR_O (value);
631 case 'P':
632 return CONST_OK_FOR_P (value);
633 default:
634 return false;
638 /* Implement CONST_DOUBLE_OK_FOR_LETTER_P. */
640 bool
641 ia64_const_double_ok_for_letter_p (rtx value, char c)
643 switch (c)
645 case 'G':
646 return CONST_DOUBLE_OK_FOR_G (value);
647 default:
648 return false;
652 /* Implement EXTRA_CONSTRAINT. */
654 bool
655 ia64_extra_constraint (rtx value, char c)
657 switch (c)
659 case 'Q':
660 /* Non-volatile memory for FP_REG loads/stores. */
661 return memory_operand(value, VOIDmode) && !MEM_VOLATILE_P (value);
663 case 'R':
664 /* 1..4 for shladd arguments. */
665 return (GET_CODE (value) == CONST_INT
666 && INTVAL (value) >= 1 && INTVAL (value) <= 4);
668 case 'S':
669 /* Non-post-inc memory for asms and other unsavory creatures. */
670 return (GET_CODE (value) == MEM
671 && GET_RTX_CLASS (GET_CODE (XEXP (value, 0))) != RTX_AUTOINC
672 && (reload_in_progress || memory_operand (value, VOIDmode)));
674 case 'T':
675 /* Symbol ref to small-address-area. */
676 return small_addr_symbolic_operand (value, VOIDmode);
678 case 'U':
679 /* Vector zero. */
680 return value == CONST0_RTX (GET_MODE (value));
682 case 'W':
683 /* An integer vector, such that conversion to an integer yields a
684 value appropriate for an integer 'J' constraint. */
685 if (GET_CODE (value) == CONST_VECTOR
686 && GET_MODE_CLASS (GET_MODE (value)) == MODE_VECTOR_INT)
688 value = simplify_subreg (DImode, value, GET_MODE (value), 0);
689 return ia64_const_ok_for_letter_p (INTVAL (value), 'J');
691 return false;
693 case 'Y':
694 /* A V2SF vector containing elements that satisfy 'G'. */
695 return
696 (GET_CODE (value) == CONST_VECTOR
697 && GET_MODE (value) == V2SFmode
698 && ia64_const_double_ok_for_letter_p (XVECEXP (value, 0, 0), 'G')
699 && ia64_const_double_ok_for_letter_p (XVECEXP (value, 0, 1), 'G'));
701 default:
702 return false;
706 /* Return 1 if the operands of a move are ok. */
709 ia64_move_ok (rtx dst, rtx src)
711 /* If we're under init_recog_no_volatile, we'll not be able to use
712 memory_operand. So check the code directly and don't worry about
713 the validity of the underlying address, which should have been
714 checked elsewhere anyway. */
715 if (GET_CODE (dst) != MEM)
716 return 1;
717 if (GET_CODE (src) == MEM)
718 return 0;
719 if (register_operand (src, VOIDmode))
720 return 1;
722 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
723 if (INTEGRAL_MODE_P (GET_MODE (dst)))
724 return src == const0_rtx;
725 else
726 return GET_CODE (src) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_G (src);
729 /* Return 1 if the operands are ok for a floating point load pair. */
732 ia64_load_pair_ok (rtx dst, rtx src)
734 if (GET_CODE (dst) != REG || !FP_REGNO_P (REGNO (dst)))
735 return 0;
736 if (GET_CODE (src) != MEM || MEM_VOLATILE_P (src))
737 return 0;
738 switch (GET_CODE (XEXP (src, 0)))
740 case REG:
741 case POST_INC:
742 break;
743 case POST_DEC:
744 return 0;
745 case POST_MODIFY:
747 rtx adjust = XEXP (XEXP (XEXP (src, 0), 1), 1);
749 if (GET_CODE (adjust) != CONST_INT
750 || INTVAL (adjust) != GET_MODE_SIZE (GET_MODE (src)))
751 return 0;
753 break;
754 default:
755 abort ();
757 return 1;
761 addp4_optimize_ok (rtx op1, rtx op2)
763 return (basereg_operand (op1, GET_MODE(op1)) !=
764 basereg_operand (op2, GET_MODE(op2)));
767 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
768 Return the length of the field, or <= 0 on failure. */
771 ia64_depz_field_mask (rtx rop, rtx rshift)
773 unsigned HOST_WIDE_INT op = INTVAL (rop);
774 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
776 /* Get rid of the zero bits we're shifting in. */
777 op >>= shift;
779 /* We must now have a solid block of 1's at bit 0. */
780 return exact_log2 (op + 1);
783 /* Return the TLS model to use for ADDR. */
785 static enum tls_model
786 tls_symbolic_operand_type (rtx addr)
788 enum tls_model tls_kind = 0;
790 if (GET_CODE (addr) == CONST)
792 if (GET_CODE (XEXP (addr, 0)) == PLUS
793 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF)
794 tls_kind = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (addr, 0), 0));
796 else if (GET_CODE (addr) == SYMBOL_REF)
797 tls_kind = SYMBOL_REF_TLS_MODEL (addr);
799 return tls_kind;
802 /* Return true if X is a constant that is valid for some immediate
803 field in an instruction. */
805 bool
806 ia64_legitimate_constant_p (rtx x)
808 switch (GET_CODE (x))
810 case CONST_INT:
811 case LABEL_REF:
812 return true;
814 case CONST_DOUBLE:
815 if (GET_MODE (x) == VOIDmode)
816 return true;
817 return CONST_DOUBLE_OK_FOR_G (x);
819 case CONST:
820 case SYMBOL_REF:
821 /* ??? Short term workaround for PR 28490. We must make the code here
822 match the code in ia64_expand_move and move_operand, even though they
823 are both technically wrong. */
824 if (tls_symbolic_operand_type (x) == 0)
826 HOST_WIDE_INT addend = 0;
827 rtx op = x;
829 if (GET_CODE (op) == CONST
830 && GET_CODE (XEXP (op, 0)) == PLUS
831 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
833 addend = INTVAL (XEXP (XEXP (op, 0), 1));
834 op = XEXP (XEXP (op, 0), 0);
837 if (any_offset_symbol_operand (op, GET_MODE (op))
838 || function_operand (op, GET_MODE (op)))
839 return true;
840 if (aligned_offset_symbol_operand (op, GET_MODE (op)))
841 return (addend & 0x3fff) == 0;
842 return false;
844 return false;
846 case CONST_VECTOR:
848 enum machine_mode mode = GET_MODE (x);
850 if (mode == V2SFmode)
851 return ia64_extra_constraint (x, 'Y');
853 return (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
854 && GET_MODE_SIZE (mode) <= 8);
857 default:
858 return false;
862 /* Don't allow TLS addresses to get spilled to memory. */
864 static bool
865 ia64_cannot_force_const_mem (rtx x)
867 return tls_symbolic_operand_type (x) != 0;
870 /* Expand a symbolic constant load. */
872 bool
873 ia64_expand_load_address (rtx dest, rtx src)
875 gcc_assert (GET_CODE (dest) == REG);
877 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
878 having to pointer-extend the value afterward. Other forms of address
879 computation below are also more natural to compute as 64-bit quantities.
880 If we've been given an SImode destination register, change it. */
881 if (GET_MODE (dest) != Pmode)
882 dest = gen_rtx_REG_offset (dest, Pmode, REGNO (dest), 0);
884 if (TARGET_NO_PIC)
885 return false;
886 if (small_addr_symbolic_operand (src, VOIDmode))
887 return false;
889 if (TARGET_AUTO_PIC)
890 emit_insn (gen_load_gprel64 (dest, src));
891 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
892 emit_insn (gen_load_fptr (dest, src));
893 else if (sdata_symbolic_operand (src, VOIDmode))
894 emit_insn (gen_load_gprel (dest, src));
895 else
897 HOST_WIDE_INT addend = 0;
898 rtx tmp;
900 /* We did split constant offsets in ia64_expand_move, and we did try
901 to keep them split in move_operand, but we also allowed reload to
902 rematerialize arbitrary constants rather than spill the value to
903 the stack and reload it. So we have to be prepared here to split
904 them apart again. */
905 if (GET_CODE (src) == CONST)
907 HOST_WIDE_INT hi, lo;
909 hi = INTVAL (XEXP (XEXP (src, 0), 1));
910 lo = ((hi & 0x3fff) ^ 0x2000) - 0x2000;
911 hi = hi - lo;
913 if (lo != 0)
915 addend = lo;
916 src = plus_constant (XEXP (XEXP (src, 0), 0), hi);
920 tmp = gen_rtx_HIGH (Pmode, src);
921 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
922 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
924 tmp = gen_rtx_LO_SUM (Pmode, dest, src);
925 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
927 if (addend)
929 tmp = gen_rtx_PLUS (Pmode, dest, GEN_INT (addend));
930 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
934 return true;
937 static GTY(()) rtx gen_tls_tga;
938 static rtx
939 gen_tls_get_addr (void)
941 if (!gen_tls_tga)
942 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
943 return gen_tls_tga;
946 static GTY(()) rtx thread_pointer_rtx;
947 static rtx
948 gen_thread_pointer (void)
950 if (!thread_pointer_rtx)
951 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
952 return thread_pointer_rtx;
955 static rtx
956 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1,
957 rtx orig_op1, HOST_WIDE_INT addend)
959 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
960 rtx orig_op0 = op0;
961 HOST_WIDE_INT addend_lo, addend_hi;
963 switch (tls_kind)
965 case TLS_MODEL_GLOBAL_DYNAMIC:
966 start_sequence ();
968 tga_op1 = gen_reg_rtx (Pmode);
969 emit_insn (gen_load_dtpmod (tga_op1, op1));
971 tga_op2 = gen_reg_rtx (Pmode);
972 emit_insn (gen_load_dtprel (tga_op2, op1));
974 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
975 LCT_CONST, Pmode, 2, tga_op1,
976 Pmode, tga_op2, Pmode);
978 insns = get_insns ();
979 end_sequence ();
981 if (GET_MODE (op0) != Pmode)
982 op0 = tga_ret;
983 emit_libcall_block (insns, op0, tga_ret, op1);
984 break;
986 case TLS_MODEL_LOCAL_DYNAMIC:
987 /* ??? This isn't the completely proper way to do local-dynamic
988 If the call to __tls_get_addr is used only by a single symbol,
989 then we should (somehow) move the dtprel to the second arg
990 to avoid the extra add. */
991 start_sequence ();
993 tga_op1 = gen_reg_rtx (Pmode);
994 emit_insn (gen_load_dtpmod (tga_op1, op1));
996 tga_op2 = const0_rtx;
998 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
999 LCT_CONST, Pmode, 2, tga_op1,
1000 Pmode, tga_op2, Pmode);
1002 insns = get_insns ();
1003 end_sequence ();
1005 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1006 UNSPEC_LD_BASE);
1007 tmp = gen_reg_rtx (Pmode);
1008 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
1010 if (!register_operand (op0, Pmode))
1011 op0 = gen_reg_rtx (Pmode);
1012 if (TARGET_TLS64)
1014 emit_insn (gen_load_dtprel (op0, op1));
1015 emit_insn (gen_adddi3 (op0, tmp, op0));
1017 else
1018 emit_insn (gen_add_dtprel (op0, op1, tmp));
1019 break;
1021 case TLS_MODEL_INITIAL_EXEC:
1022 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1023 addend_hi = addend - addend_lo;
1025 op1 = plus_constant (op1, addend_hi);
1026 addend = addend_lo;
1028 tmp = gen_reg_rtx (Pmode);
1029 emit_insn (gen_load_tprel (tmp, op1));
1031 if (!register_operand (op0, Pmode))
1032 op0 = gen_reg_rtx (Pmode);
1033 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
1034 break;
1036 case TLS_MODEL_LOCAL_EXEC:
1037 if (!register_operand (op0, Pmode))
1038 op0 = gen_reg_rtx (Pmode);
1040 op1 = orig_op1;
1041 addend = 0;
1042 if (TARGET_TLS64)
1044 emit_insn (gen_load_tprel (op0, op1));
1045 emit_insn (gen_adddi3 (op0, op0, gen_thread_pointer ()));
1047 else
1048 emit_insn (gen_add_tprel (op0, op1, gen_thread_pointer ()));
1049 break;
1051 default:
1052 gcc_unreachable ();
1055 if (addend)
1056 op0 = expand_simple_binop (Pmode, PLUS, op0, GEN_INT (addend),
1057 orig_op0, 1, OPTAB_DIRECT);
1058 if (orig_op0 == op0)
1059 return NULL_RTX;
1060 if (GET_MODE (orig_op0) == Pmode)
1061 return op0;
1062 return gen_lowpart (GET_MODE (orig_op0), op0);
1066 ia64_expand_move (rtx op0, rtx op1)
1068 enum machine_mode mode = GET_MODE (op0);
1070 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
1071 op1 = force_reg (mode, op1);
1073 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
1075 HOST_WIDE_INT addend = 0;
1076 enum tls_model tls_kind;
1077 rtx sym = op1;
1079 if (GET_CODE (op1) == CONST
1080 && GET_CODE (XEXP (op1, 0)) == PLUS
1081 && GET_CODE (XEXP (XEXP (op1, 0), 1)) == CONST_INT)
1083 addend = INTVAL (XEXP (XEXP (op1, 0), 1));
1084 sym = XEXP (XEXP (op1, 0), 0);
1087 tls_kind = tls_symbolic_operand_type (sym);
1088 if (tls_kind)
1089 return ia64_expand_tls_address (tls_kind, op0, sym, op1, addend);
1091 if (any_offset_symbol_operand (sym, mode))
1092 addend = 0;
1093 else if (aligned_offset_symbol_operand (sym, mode))
1095 HOST_WIDE_INT addend_lo, addend_hi;
1097 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1098 addend_hi = addend - addend_lo;
1100 if (addend_lo != 0)
1102 op1 = plus_constant (sym, addend_hi);
1103 addend = addend_lo;
1105 else
1106 addend = 0;
1108 else
1109 op1 = sym;
1111 if (reload_completed)
1113 /* We really should have taken care of this offset earlier. */
1114 gcc_assert (addend == 0);
1115 if (ia64_expand_load_address (op0, op1))
1116 return NULL_RTX;
1119 if (addend)
1121 rtx subtarget = no_new_pseudos ? op0 : gen_reg_rtx (mode);
1123 emit_insn (gen_rtx_SET (VOIDmode, subtarget, op1));
1125 op1 = expand_simple_binop (mode, PLUS, subtarget,
1126 GEN_INT (addend), op0, 1, OPTAB_DIRECT);
1127 if (op0 == op1)
1128 return NULL_RTX;
1132 return op1;
1135 /* Split a move from OP1 to OP0 conditional on COND. */
1137 void
1138 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
1140 rtx insn, first = get_last_insn ();
1142 emit_move_insn (op0, op1);
1144 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
1145 if (INSN_P (insn))
1146 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
1147 PATTERN (insn));
1150 /* Split a post-reload TImode or TFmode reference into two DImode
1151 components. This is made extra difficult by the fact that we do
1152 not get any scratch registers to work with, because reload cannot
1153 be prevented from giving us a scratch that overlaps the register
1154 pair involved. So instead, when addressing memory, we tweak the
1155 pointer register up and back down with POST_INCs. Or up and not
1156 back down when we can get away with it.
1158 REVERSED is true when the loads must be done in reversed order
1159 (high word first) for correctness. DEAD is true when the pointer
1160 dies with the second insn we generate and therefore the second
1161 address must not carry a postmodify.
1163 May return an insn which is to be emitted after the moves. */
1165 static rtx
1166 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
1168 rtx fixup = 0;
1170 switch (GET_CODE (in))
1172 case REG:
1173 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
1174 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
1175 break;
1177 case CONST_INT:
1178 case CONST_DOUBLE:
1179 /* Cannot occur reversed. */
1180 gcc_assert (!reversed);
1182 if (GET_MODE (in) != TFmode)
1183 split_double (in, &out[0], &out[1]);
1184 else
1185 /* split_double does not understand how to split a TFmode
1186 quantity into a pair of DImode constants. */
1188 REAL_VALUE_TYPE r;
1189 unsigned HOST_WIDE_INT p[2];
1190 long l[4]; /* TFmode is 128 bits */
1192 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
1193 real_to_target (l, &r, TFmode);
1195 if (FLOAT_WORDS_BIG_ENDIAN)
1197 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
1198 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
1200 else
1202 p[0] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
1203 p[1] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
1205 out[0] = GEN_INT (p[0]);
1206 out[1] = GEN_INT (p[1]);
1208 break;
1210 case MEM:
1212 rtx base = XEXP (in, 0);
1213 rtx offset;
1215 switch (GET_CODE (base))
1217 case REG:
1218 if (!reversed)
1220 out[0] = adjust_automodify_address
1221 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1222 out[1] = adjust_automodify_address
1223 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
1225 else
1227 /* Reversal requires a pre-increment, which can only
1228 be done as a separate insn. */
1229 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
1230 out[0] = adjust_automodify_address
1231 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
1232 out[1] = adjust_address (in, DImode, 0);
1234 break;
1236 case POST_INC:
1237 gcc_assert (!reversed && !dead);
1239 /* Just do the increment in two steps. */
1240 out[0] = adjust_automodify_address (in, DImode, 0, 0);
1241 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1242 break;
1244 case POST_DEC:
1245 gcc_assert (!reversed && !dead);
1247 /* Add 8, subtract 24. */
1248 base = XEXP (base, 0);
1249 out[0] = adjust_automodify_address
1250 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1251 out[1] = adjust_automodify_address
1252 (in, DImode,
1253 gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -24)),
1255 break;
1257 case POST_MODIFY:
1258 gcc_assert (!reversed && !dead);
1260 /* Extract and adjust the modification. This case is
1261 trickier than the others, because we might have an
1262 index register, or we might have a combined offset that
1263 doesn't fit a signed 9-bit displacement field. We can
1264 assume the incoming expression is already legitimate. */
1265 offset = XEXP (base, 1);
1266 base = XEXP (base, 0);
1268 out[0] = adjust_automodify_address
1269 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1271 if (GET_CODE (XEXP (offset, 1)) == REG)
1273 /* Can't adjust the postmodify to match. Emit the
1274 original, then a separate addition insn. */
1275 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1276 fixup = gen_adddi3 (base, base, GEN_INT (-8));
1278 else
1280 gcc_assert (GET_CODE (XEXP (offset, 1)) == CONST_INT);
1281 if (INTVAL (XEXP (offset, 1)) < -256 + 8)
1283 /* Again the postmodify cannot be made to match,
1284 but in this case it's more efficient to get rid
1285 of the postmodify entirely and fix up with an
1286 add insn. */
1287 out[1] = adjust_automodify_address (in, DImode, base, 8);
1288 fixup = gen_adddi3
1289 (base, base, GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
1291 else
1293 /* Combined offset still fits in the displacement field.
1294 (We cannot overflow it at the high end.) */
1295 out[1] = adjust_automodify_address
1296 (in, DImode, gen_rtx_POST_MODIFY
1297 (Pmode, base, gen_rtx_PLUS
1298 (Pmode, base,
1299 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
1303 break;
1305 default:
1306 gcc_unreachable ();
1308 break;
1311 default:
1312 gcc_unreachable ();
1315 return fixup;
1318 /* Split a TImode or TFmode move instruction after reload.
1319 This is used by *movtf_internal and *movti_internal. */
1320 void
1321 ia64_split_tmode_move (rtx operands[])
1323 rtx in[2], out[2], insn;
1324 rtx fixup[2];
1325 bool dead = false;
1326 bool reversed = false;
1328 /* It is possible for reload to decide to overwrite a pointer with
1329 the value it points to. In that case we have to do the loads in
1330 the appropriate order so that the pointer is not destroyed too
1331 early. Also we must not generate a postmodify for that second
1332 load, or rws_access_regno will die. */
1333 if (GET_CODE (operands[1]) == MEM
1334 && reg_overlap_mentioned_p (operands[0], operands[1]))
1336 rtx base = XEXP (operands[1], 0);
1337 while (GET_CODE (base) != REG)
1338 base = XEXP (base, 0);
1340 if (REGNO (base) == REGNO (operands[0]))
1341 reversed = true;
1342 dead = true;
1344 /* Another reason to do the moves in reversed order is if the first
1345 element of the target register pair is also the second element of
1346 the source register pair. */
1347 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1348 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1349 reversed = true;
1351 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1352 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1354 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1355 if (GET_CODE (EXP) == MEM \
1356 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1357 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1358 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1359 REG_NOTES (INSN) = gen_rtx_EXPR_LIST (REG_INC, \
1360 XEXP (XEXP (EXP, 0), 0), \
1361 REG_NOTES (INSN))
1363 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1364 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1365 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1367 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1368 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1369 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1371 if (fixup[0])
1372 emit_insn (fixup[0]);
1373 if (fixup[1])
1374 emit_insn (fixup[1]);
1376 #undef MAYBE_ADD_REG_INC_NOTE
1379 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1380 through memory plus an extra GR scratch register. Except that you can
1381 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1382 SECONDARY_RELOAD_CLASS, but not both.
1384 We got into problems in the first place by allowing a construct like
1385 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1386 This solution attempts to prevent this situation from occurring. When
1387 we see something like the above, we spill the inner register to memory. */
1389 static rtx
1390 spill_xfmode_rfmode_operand (rtx in, int force, enum machine_mode mode)
1392 if (GET_CODE (in) == SUBREG
1393 && GET_MODE (SUBREG_REG (in)) == TImode
1394 && GET_CODE (SUBREG_REG (in)) == REG)
1396 rtx memt = assign_stack_temp (TImode, 16, 0);
1397 emit_move_insn (memt, SUBREG_REG (in));
1398 return adjust_address (memt, mode, 0);
1400 else if (force && GET_CODE (in) == REG)
1402 rtx memx = assign_stack_temp (mode, 16, 0);
1403 emit_move_insn (memx, in);
1404 return memx;
1406 else
1407 return in;
1410 /* Expand the movxf or movrf pattern (MODE says which) with the given
1411 OPERANDS, returning true if the pattern should then invoke
1412 DONE. */
1414 bool
1415 ia64_expand_movxf_movrf (enum machine_mode mode, rtx operands[])
1417 rtx op0 = operands[0];
1419 if (GET_CODE (op0) == SUBREG)
1420 op0 = SUBREG_REG (op0);
1422 /* We must support XFmode loads into general registers for stdarg/vararg,
1423 unprototyped calls, and a rare case where a long double is passed as
1424 an argument after a float HFA fills the FP registers. We split them into
1425 DImode loads for convenience. We also need to support XFmode stores
1426 for the last case. This case does not happen for stdarg/vararg routines,
1427 because we do a block store to memory of unnamed arguments. */
1429 if (GET_CODE (op0) == REG && GR_REGNO_P (REGNO (op0)))
1431 rtx out[2];
1433 /* We're hoping to transform everything that deals with XFmode
1434 quantities and GR registers early in the compiler. */
1435 gcc_assert (!no_new_pseudos);
1437 /* Struct to register can just use TImode instead. */
1438 if ((GET_CODE (operands[1]) == SUBREG
1439 && GET_MODE (SUBREG_REG (operands[1])) == TImode)
1440 || (GET_CODE (operands[1]) == REG
1441 && GR_REGNO_P (REGNO (operands[1]))))
1443 rtx op1 = operands[1];
1445 if (GET_CODE (op1) == SUBREG)
1446 op1 = SUBREG_REG (op1);
1447 else
1448 op1 = gen_rtx_REG (TImode, REGNO (op1));
1450 emit_move_insn (gen_rtx_REG (TImode, REGNO (op0)), op1);
1451 return true;
1454 if (GET_CODE (operands[1]) == CONST_DOUBLE)
1456 /* Don't word-swap when reading in the constant. */
1457 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0)),
1458 operand_subword (operands[1], WORDS_BIG_ENDIAN,
1459 0, mode));
1460 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0) + 1),
1461 operand_subword (operands[1], !WORDS_BIG_ENDIAN,
1462 0, mode));
1463 return true;
1466 /* If the quantity is in a register not known to be GR, spill it. */
1467 if (register_operand (operands[1], mode))
1468 operands[1] = spill_xfmode_rfmode_operand (operands[1], 1, mode);
1470 gcc_assert (GET_CODE (operands[1]) == MEM);
1472 /* Don't word-swap when reading in the value. */
1473 out[0] = gen_rtx_REG (DImode, REGNO (op0));
1474 out[1] = gen_rtx_REG (DImode, REGNO (op0) + 1);
1476 emit_move_insn (out[0], adjust_address (operands[1], DImode, 0));
1477 emit_move_insn (out[1], adjust_address (operands[1], DImode, 8));
1478 return true;
1481 if (GET_CODE (operands[1]) == REG && GR_REGNO_P (REGNO (operands[1])))
1483 /* We're hoping to transform everything that deals with XFmode
1484 quantities and GR registers early in the compiler. */
1485 gcc_assert (!no_new_pseudos);
1487 /* Op0 can't be a GR_REG here, as that case is handled above.
1488 If op0 is a register, then we spill op1, so that we now have a
1489 MEM operand. This requires creating an XFmode subreg of a TImode reg
1490 to force the spill. */
1491 if (register_operand (operands[0], mode))
1493 rtx op1 = gen_rtx_REG (TImode, REGNO (operands[1]));
1494 op1 = gen_rtx_SUBREG (mode, op1, 0);
1495 operands[1] = spill_xfmode_rfmode_operand (op1, 0, mode);
1498 else
1500 rtx in[2];
1502 gcc_assert (GET_CODE (operands[0]) == MEM);
1504 /* Don't word-swap when writing out the value. */
1505 in[0] = gen_rtx_REG (DImode, REGNO (operands[1]));
1506 in[1] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
1508 emit_move_insn (adjust_address (operands[0], DImode, 0), in[0]);
1509 emit_move_insn (adjust_address (operands[0], DImode, 8), in[1]);
1510 return true;
1514 if (!reload_in_progress && !reload_completed)
1516 operands[1] = spill_xfmode_rfmode_operand (operands[1], 0, mode);
1518 if (GET_MODE (op0) == TImode && GET_CODE (op0) == REG)
1520 rtx memt, memx, in = operands[1];
1521 if (CONSTANT_P (in))
1522 in = validize_mem (force_const_mem (mode, in));
1523 if (GET_CODE (in) == MEM)
1524 memt = adjust_address (in, TImode, 0);
1525 else
1527 memt = assign_stack_temp (TImode, 16, 0);
1528 memx = adjust_address (memt, mode, 0);
1529 emit_move_insn (memx, in);
1531 emit_move_insn (op0, memt);
1532 return true;
1535 if (!ia64_move_ok (operands[0], operands[1]))
1536 operands[1] = force_reg (mode, operands[1]);
1539 return false;
1542 /* Emit comparison instruction if necessary, returning the expression
1543 that holds the compare result in the proper mode. */
1545 static GTY(()) rtx cmptf_libfunc;
1548 ia64_expand_compare (enum rtx_code code, enum machine_mode mode)
1550 rtx op0 = ia64_compare_op0, op1 = ia64_compare_op1;
1551 rtx cmp;
1553 /* If we have a BImode input, then we already have a compare result, and
1554 do not need to emit another comparison. */
1555 if (GET_MODE (op0) == BImode)
1557 gcc_assert ((code == NE || code == EQ) && op1 == const0_rtx);
1558 cmp = op0;
1560 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1561 magic number as its third argument, that indicates what to do.
1562 The return value is an integer to be compared against zero. */
1563 else if (GET_MODE (op0) == TFmode)
1565 enum qfcmp_magic {
1566 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1567 QCMP_UNORD = 2,
1568 QCMP_EQ = 4,
1569 QCMP_LT = 8,
1570 QCMP_GT = 16
1571 } magic;
1572 enum rtx_code ncode;
1573 rtx ret, insns;
1575 gcc_assert (cmptf_libfunc && GET_MODE (op1) == TFmode);
1576 switch (code)
1578 /* 1 = equal, 0 = not equal. Equality operators do
1579 not raise FP_INVALID when given an SNaN operand. */
1580 case EQ: magic = QCMP_EQ; ncode = NE; break;
1581 case NE: magic = QCMP_EQ; ncode = EQ; break;
1582 /* isunordered() from C99. */
1583 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1584 case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
1585 /* Relational operators raise FP_INVALID when given
1586 an SNaN operand. */
1587 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1588 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1589 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1590 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1591 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1592 Expanders for buneq etc. weuld have to be added to ia64.md
1593 for this to be useful. */
1594 default: gcc_unreachable ();
1597 start_sequence ();
1599 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1600 op0, TFmode, op1, TFmode,
1601 GEN_INT (magic), DImode);
1602 cmp = gen_reg_rtx (BImode);
1603 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1604 gen_rtx_fmt_ee (ncode, BImode,
1605 ret, const0_rtx)));
1607 insns = get_insns ();
1608 end_sequence ();
1610 emit_libcall_block (insns, cmp, cmp,
1611 gen_rtx_fmt_ee (code, BImode, op0, op1));
1612 code = NE;
1614 else
1616 cmp = gen_reg_rtx (BImode);
1617 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1618 gen_rtx_fmt_ee (code, BImode, op0, op1)));
1619 code = NE;
1622 return gen_rtx_fmt_ee (code, mode, cmp, const0_rtx);
1625 /* Generate an integral vector comparison. Return true if the condition has
1626 been reversed, and so the sense of the comparison should be inverted. */
1628 static bool
1629 ia64_expand_vecint_compare (enum rtx_code code, enum machine_mode mode,
1630 rtx dest, rtx op0, rtx op1)
1632 bool negate = false;
1633 rtx x;
1635 /* Canonicalize the comparison to EQ, GT, GTU. */
1636 switch (code)
1638 case EQ:
1639 case GT:
1640 case GTU:
1641 break;
1643 case NE:
1644 case LE:
1645 case LEU:
1646 code = reverse_condition (code);
1647 negate = true;
1648 break;
1650 case GE:
1651 case GEU:
1652 code = reverse_condition (code);
1653 negate = true;
1654 /* FALLTHRU */
1656 case LT:
1657 case LTU:
1658 code = swap_condition (code);
1659 x = op0, op0 = op1, op1 = x;
1660 break;
1662 default:
1663 gcc_unreachable ();
1666 /* Unsigned parallel compare is not supported by the hardware. Play some
1667 tricks to turn this into a signed comparison against 0. */
1668 if (code == GTU)
1670 switch (mode)
1672 case V2SImode:
1674 rtx t1, t2, mask;
1676 /* Perform a parallel modulo subtraction. */
1677 t1 = gen_reg_rtx (V2SImode);
1678 emit_insn (gen_subv2si3 (t1, op0, op1));
1680 /* Extract the original sign bit of op0. */
1681 mask = GEN_INT (-0x80000000);
1682 mask = gen_rtx_CONST_VECTOR (V2SImode, gen_rtvec (2, mask, mask));
1683 mask = force_reg (V2SImode, mask);
1684 t2 = gen_reg_rtx (V2SImode);
1685 emit_insn (gen_andv2si3 (t2, op0, mask));
1687 /* XOR it back into the result of the subtraction. This results
1688 in the sign bit set iff we saw unsigned underflow. */
1689 x = gen_reg_rtx (V2SImode);
1690 emit_insn (gen_xorv2si3 (x, t1, t2));
1692 code = GT;
1693 op0 = x;
1694 op1 = CONST0_RTX (mode);
1696 break;
1698 case V8QImode:
1699 case V4HImode:
1700 /* Perform a parallel unsigned saturating subtraction. */
1701 x = gen_reg_rtx (mode);
1702 emit_insn (gen_rtx_SET (VOIDmode, x,
1703 gen_rtx_US_MINUS (mode, op0, op1)));
1705 code = EQ;
1706 op0 = x;
1707 op1 = CONST0_RTX (mode);
1708 negate = !negate;
1709 break;
1711 default:
1712 gcc_unreachable ();
1716 x = gen_rtx_fmt_ee (code, mode, op0, op1);
1717 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
1719 return negate;
1722 /* Emit an integral vector conditional move. */
1724 void
1725 ia64_expand_vecint_cmov (rtx operands[])
1727 enum machine_mode mode = GET_MODE (operands[0]);
1728 enum rtx_code code = GET_CODE (operands[3]);
1729 bool negate;
1730 rtx cmp, x, ot, of;
1732 cmp = gen_reg_rtx (mode);
1733 negate = ia64_expand_vecint_compare (code, mode, cmp,
1734 operands[4], operands[5]);
1736 ot = operands[1+negate];
1737 of = operands[2-negate];
1739 if (ot == CONST0_RTX (mode))
1741 if (of == CONST0_RTX (mode))
1743 emit_move_insn (operands[0], ot);
1744 return;
1747 x = gen_rtx_NOT (mode, cmp);
1748 x = gen_rtx_AND (mode, x, of);
1749 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1751 else if (of == CONST0_RTX (mode))
1753 x = gen_rtx_AND (mode, cmp, ot);
1754 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1756 else
1758 rtx t, f;
1760 t = gen_reg_rtx (mode);
1761 x = gen_rtx_AND (mode, cmp, operands[1+negate]);
1762 emit_insn (gen_rtx_SET (VOIDmode, t, x));
1764 f = gen_reg_rtx (mode);
1765 x = gen_rtx_NOT (mode, cmp);
1766 x = gen_rtx_AND (mode, x, operands[2-negate]);
1767 emit_insn (gen_rtx_SET (VOIDmode, f, x));
1769 x = gen_rtx_IOR (mode, t, f);
1770 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1774 /* Emit an integral vector min or max operation. Return true if all done. */
1776 bool
1777 ia64_expand_vecint_minmax (enum rtx_code code, enum machine_mode mode,
1778 rtx operands[])
1780 rtx xops[6];
1782 /* These four combinations are supported directly. */
1783 if (mode == V8QImode && (code == UMIN || code == UMAX))
1784 return false;
1785 if (mode == V4HImode && (code == SMIN || code == SMAX))
1786 return false;
1788 /* This combination can be implemented with only saturating subtraction. */
1789 if (mode == V4HImode && code == UMAX)
1791 rtx x, tmp = gen_reg_rtx (mode);
1793 x = gen_rtx_US_MINUS (mode, operands[1], operands[2]);
1794 emit_insn (gen_rtx_SET (VOIDmode, tmp, x));
1796 emit_insn (gen_addv4hi3 (operands[0], tmp, operands[2]));
1797 return true;
1800 /* Everything else implemented via vector comparisons. */
1801 xops[0] = operands[0];
1802 xops[4] = xops[1] = operands[1];
1803 xops[5] = xops[2] = operands[2];
1805 switch (code)
1807 case UMIN:
1808 code = LTU;
1809 break;
1810 case UMAX:
1811 code = GTU;
1812 break;
1813 case SMIN:
1814 code = LT;
1815 break;
1816 case SMAX:
1817 code = GT;
1818 break;
1819 default:
1820 gcc_unreachable ();
1822 xops[3] = gen_rtx_fmt_ee (code, VOIDmode, operands[1], operands[2]);
1824 ia64_expand_vecint_cmov (xops);
1825 return true;
1828 /* Emit an integral vector widening sum operations. */
1830 void
1831 ia64_expand_widen_sum (rtx operands[3], bool unsignedp)
1833 rtx l, h, x, s;
1834 enum machine_mode wmode, mode;
1835 rtx (*unpack_l) (rtx, rtx, rtx);
1836 rtx (*unpack_h) (rtx, rtx, rtx);
1837 rtx (*plus) (rtx, rtx, rtx);
1839 wmode = GET_MODE (operands[0]);
1840 mode = GET_MODE (operands[1]);
1842 switch (mode)
1844 case V8QImode:
1845 unpack_l = gen_unpack1_l;
1846 unpack_h = gen_unpack1_h;
1847 plus = gen_addv4hi3;
1848 break;
1849 case V4HImode:
1850 unpack_l = gen_unpack2_l;
1851 unpack_h = gen_unpack2_h;
1852 plus = gen_addv2si3;
1853 break;
1854 default:
1855 gcc_unreachable ();
1858 /* Fill in x with the sign extension of each element in op1. */
1859 if (unsignedp)
1860 x = CONST0_RTX (mode);
1861 else
1863 bool neg;
1865 x = gen_reg_rtx (mode);
1867 neg = ia64_expand_vecint_compare (LT, mode, x, operands[1],
1868 CONST0_RTX (mode));
1869 gcc_assert (!neg);
1872 l = gen_reg_rtx (wmode);
1873 h = gen_reg_rtx (wmode);
1874 s = gen_reg_rtx (wmode);
1876 emit_insn (unpack_l (gen_lowpart (mode, l), operands[1], x));
1877 emit_insn (unpack_h (gen_lowpart (mode, h), operands[1], x));
1878 emit_insn (plus (s, l, operands[2]));
1879 emit_insn (plus (operands[0], h, s));
1882 /* Emit a signed or unsigned V8QI dot product operation. */
1884 void
1885 ia64_expand_dot_prod_v8qi (rtx operands[4], bool unsignedp)
1887 rtx l1, l2, h1, h2, x1, x2, p1, p2, p3, p4, s1, s2, s3;
1889 /* Fill in x1 and x2 with the sign extension of each element. */
1890 if (unsignedp)
1891 x1 = x2 = CONST0_RTX (V8QImode);
1892 else
1894 bool neg;
1896 x1 = gen_reg_rtx (V8QImode);
1897 x2 = gen_reg_rtx (V8QImode);
1899 neg = ia64_expand_vecint_compare (LT, V8QImode, x1, operands[1],
1900 CONST0_RTX (V8QImode));
1901 gcc_assert (!neg);
1902 neg = ia64_expand_vecint_compare (LT, V8QImode, x2, operands[2],
1903 CONST0_RTX (V8QImode));
1904 gcc_assert (!neg);
1907 l1 = gen_reg_rtx (V4HImode);
1908 l2 = gen_reg_rtx (V4HImode);
1909 h1 = gen_reg_rtx (V4HImode);
1910 h2 = gen_reg_rtx (V4HImode);
1912 emit_insn (gen_unpack1_l (gen_lowpart (V8QImode, l1), operands[1], x1));
1913 emit_insn (gen_unpack1_l (gen_lowpart (V8QImode, l2), operands[2], x2));
1914 emit_insn (gen_unpack1_h (gen_lowpart (V8QImode, h1), operands[1], x1));
1915 emit_insn (gen_unpack1_h (gen_lowpart (V8QImode, h2), operands[2], x2));
1917 p1 = gen_reg_rtx (V2SImode);
1918 p2 = gen_reg_rtx (V2SImode);
1919 p3 = gen_reg_rtx (V2SImode);
1920 p4 = gen_reg_rtx (V2SImode);
1921 emit_insn (gen_pmpy2_r (p1, l1, l2));
1922 emit_insn (gen_pmpy2_l (p2, l1, l2));
1923 emit_insn (gen_pmpy2_r (p3, h1, h2));
1924 emit_insn (gen_pmpy2_l (p4, h1, h2));
1926 s1 = gen_reg_rtx (V2SImode);
1927 s2 = gen_reg_rtx (V2SImode);
1928 s3 = gen_reg_rtx (V2SImode);
1929 emit_insn (gen_addv2si3 (s1, p1, p2));
1930 emit_insn (gen_addv2si3 (s2, p3, p4));
1931 emit_insn (gen_addv2si3 (s3, s1, operands[3]));
1932 emit_insn (gen_addv2si3 (operands[0], s2, s3));
1935 /* Emit the appropriate sequence for a call. */
1937 void
1938 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
1939 int sibcall_p)
1941 rtx insn, b0;
1943 addr = XEXP (addr, 0);
1944 addr = convert_memory_address (DImode, addr);
1945 b0 = gen_rtx_REG (DImode, R_BR (0));
1947 /* ??? Should do this for functions known to bind local too. */
1948 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
1950 if (sibcall_p)
1951 insn = gen_sibcall_nogp (addr);
1952 else if (! retval)
1953 insn = gen_call_nogp (addr, b0);
1954 else
1955 insn = gen_call_value_nogp (retval, addr, b0);
1956 insn = emit_call_insn (insn);
1958 else
1960 if (sibcall_p)
1961 insn = gen_sibcall_gp (addr);
1962 else if (! retval)
1963 insn = gen_call_gp (addr, b0);
1964 else
1965 insn = gen_call_value_gp (retval, addr, b0);
1966 insn = emit_call_insn (insn);
1968 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
1971 if (sibcall_p)
1972 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
1975 void
1976 ia64_reload_gp (void)
1978 rtx tmp;
1980 if (current_frame_info.reg_save_gp)
1981 tmp = gen_rtx_REG (DImode, current_frame_info.reg_save_gp);
1982 else
1984 HOST_WIDE_INT offset;
1986 offset = (current_frame_info.spill_cfa_off
1987 + current_frame_info.spill_size);
1988 if (frame_pointer_needed)
1990 tmp = hard_frame_pointer_rtx;
1991 offset = -offset;
1993 else
1995 tmp = stack_pointer_rtx;
1996 offset = current_frame_info.total_size - offset;
1999 if (CONST_OK_FOR_I (offset))
2000 emit_insn (gen_adddi3 (pic_offset_table_rtx,
2001 tmp, GEN_INT (offset)));
2002 else
2004 emit_move_insn (pic_offset_table_rtx, GEN_INT (offset));
2005 emit_insn (gen_adddi3 (pic_offset_table_rtx,
2006 pic_offset_table_rtx, tmp));
2009 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
2012 emit_move_insn (pic_offset_table_rtx, tmp);
2015 void
2016 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
2017 rtx scratch_b, int noreturn_p, int sibcall_p)
2019 rtx insn;
2020 bool is_desc = false;
2022 /* If we find we're calling through a register, then we're actually
2023 calling through a descriptor, so load up the values. */
2024 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
2026 rtx tmp;
2027 bool addr_dead_p;
2029 /* ??? We are currently constrained to *not* use peep2, because
2030 we can legitimately change the global lifetime of the GP
2031 (in the form of killing where previously live). This is
2032 because a call through a descriptor doesn't use the previous
2033 value of the GP, while a direct call does, and we do not
2034 commit to either form until the split here.
2036 That said, this means that we lack precise life info for
2037 whether ADDR is dead after this call. This is not terribly
2038 important, since we can fix things up essentially for free
2039 with the POST_DEC below, but it's nice to not use it when we
2040 can immediately tell it's not necessary. */
2041 addr_dead_p = ((noreturn_p || sibcall_p
2042 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
2043 REGNO (addr)))
2044 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
2046 /* Load the code address into scratch_b. */
2047 tmp = gen_rtx_POST_INC (Pmode, addr);
2048 tmp = gen_rtx_MEM (Pmode, tmp);
2049 emit_move_insn (scratch_r, tmp);
2050 emit_move_insn (scratch_b, scratch_r);
2052 /* Load the GP address. If ADDR is not dead here, then we must
2053 revert the change made above via the POST_INCREMENT. */
2054 if (!addr_dead_p)
2055 tmp = gen_rtx_POST_DEC (Pmode, addr);
2056 else
2057 tmp = addr;
2058 tmp = gen_rtx_MEM (Pmode, tmp);
2059 emit_move_insn (pic_offset_table_rtx, tmp);
2061 is_desc = true;
2062 addr = scratch_b;
2065 if (sibcall_p)
2066 insn = gen_sibcall_nogp (addr);
2067 else if (retval)
2068 insn = gen_call_value_nogp (retval, addr, retaddr);
2069 else
2070 insn = gen_call_nogp (addr, retaddr);
2071 emit_call_insn (insn);
2073 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
2074 ia64_reload_gp ();
2077 /* Expand an atomic operation. We want to perform MEM <CODE>= VAL atomically.
2079 This differs from the generic code in that we know about the zero-extending
2080 properties of cmpxchg, and the zero-extending requirements of ar.ccv. We
2081 also know that ld.acq+cmpxchg.rel equals a full barrier.
2083 The loop we want to generate looks like
2085 cmp_reg = mem;
2086 label:
2087 old_reg = cmp_reg;
2088 new_reg = cmp_reg op val;
2089 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
2090 if (cmp_reg != old_reg)
2091 goto label;
2093 Note that we only do the plain load from memory once. Subsequent
2094 iterations use the value loaded by the compare-and-swap pattern. */
2096 void
2097 ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
2098 rtx old_dst, rtx new_dst)
2100 enum machine_mode mode = GET_MODE (mem);
2101 rtx old_reg, new_reg, cmp_reg, ar_ccv, label;
2102 enum insn_code icode;
2104 /* Special case for using fetchadd. */
2105 if ((mode == SImode || mode == DImode)
2106 && (code == PLUS || code == MINUS)
2107 && fetchadd_operand (val, mode))
2109 if (code == MINUS)
2110 val = GEN_INT (-INTVAL (val));
2112 if (!old_dst)
2113 old_dst = gen_reg_rtx (mode);
2115 emit_insn (gen_memory_barrier ());
2117 if (mode == SImode)
2118 icode = CODE_FOR_fetchadd_acq_si;
2119 else
2120 icode = CODE_FOR_fetchadd_acq_di;
2121 emit_insn (GEN_FCN (icode) (old_dst, mem, val));
2123 if (new_dst)
2125 new_reg = expand_simple_binop (mode, PLUS, old_dst, val, new_dst,
2126 true, OPTAB_WIDEN);
2127 if (new_reg != new_dst)
2128 emit_move_insn (new_dst, new_reg);
2130 return;
2133 /* Because of the volatile mem read, we get an ld.acq, which is the
2134 front half of the full barrier. The end half is the cmpxchg.rel. */
2135 gcc_assert (MEM_VOLATILE_P (mem));
2137 old_reg = gen_reg_rtx (DImode);
2138 cmp_reg = gen_reg_rtx (DImode);
2139 label = gen_label_rtx ();
2141 if (mode != DImode)
2143 val = simplify_gen_subreg (DImode, val, mode, 0);
2144 emit_insn (gen_extend_insn (cmp_reg, mem, DImode, mode, 1));
2146 else
2147 emit_move_insn (cmp_reg, mem);
2149 emit_label (label);
2151 ar_ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
2152 emit_move_insn (old_reg, cmp_reg);
2153 emit_move_insn (ar_ccv, cmp_reg);
2155 if (old_dst)
2156 emit_move_insn (old_dst, gen_lowpart (mode, cmp_reg));
2158 new_reg = cmp_reg;
2159 if (code == NOT)
2161 new_reg = expand_simple_unop (DImode, NOT, new_reg, NULL_RTX, true);
2162 code = AND;
2164 new_reg = expand_simple_binop (DImode, code, new_reg, val, NULL_RTX,
2165 true, OPTAB_DIRECT);
2167 if (mode != DImode)
2168 new_reg = gen_lowpart (mode, new_reg);
2169 if (new_dst)
2170 emit_move_insn (new_dst, new_reg);
2172 switch (mode)
2174 case QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;
2175 case HImode: icode = CODE_FOR_cmpxchg_rel_hi; break;
2176 case SImode: icode = CODE_FOR_cmpxchg_rel_si; break;
2177 case DImode: icode = CODE_FOR_cmpxchg_rel_di; break;
2178 default:
2179 gcc_unreachable ();
2182 emit_insn (GEN_FCN (icode) (cmp_reg, mem, ar_ccv, new_reg));
2184 emit_cmp_and_jump_insns (cmp_reg, old_reg, NE, NULL, DImode, true, label);
2187 /* Begin the assembly file. */
2189 static void
2190 ia64_file_start (void)
2192 /* Variable tracking should be run after all optimizations which change order
2193 of insns. It also needs a valid CFG. This can't be done in
2194 ia64_override_options, because flag_var_tracking is finalized after
2195 that. */
2196 ia64_flag_var_tracking = flag_var_tracking;
2197 flag_var_tracking = 0;
2199 default_file_start ();
2200 emit_safe_across_calls ();
2203 void
2204 emit_safe_across_calls (void)
2206 unsigned int rs, re;
2207 int out_state;
2209 rs = 1;
2210 out_state = 0;
2211 while (1)
2213 while (rs < 64 && call_used_regs[PR_REG (rs)])
2214 rs++;
2215 if (rs >= 64)
2216 break;
2217 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
2218 continue;
2219 if (out_state == 0)
2221 fputs ("\t.pred.safe_across_calls ", asm_out_file);
2222 out_state = 1;
2224 else
2225 fputc (',', asm_out_file);
2226 if (re == rs + 1)
2227 fprintf (asm_out_file, "p%u", rs);
2228 else
2229 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
2230 rs = re + 1;
2232 if (out_state)
2233 fputc ('\n', asm_out_file);
2236 /* Globalize a declaration. */
2238 static void
2239 ia64_globalize_decl_name (FILE * stream, tree decl)
2241 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2242 tree version_attr = lookup_attribute ("version_id", DECL_ATTRIBUTES (decl));
2243 if (version_attr)
2245 tree v = TREE_VALUE (TREE_VALUE (version_attr));
2246 const char *p = TREE_STRING_POINTER (v);
2247 fprintf (stream, "\t.alias %s#, \"%s{%s}\"\n", name, name, p);
2249 targetm.asm_out.globalize_label (stream, name);
2250 if (TREE_CODE (decl) == FUNCTION_DECL)
2251 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "function");
2254 /* Helper function for ia64_compute_frame_size: find an appropriate general
2255 register to spill some special register to. SPECIAL_SPILL_MASK contains
2256 bits in GR0 to GR31 that have already been allocated by this routine.
2257 TRY_LOCALS is true if we should attempt to locate a local regnum. */
2259 static int
2260 find_gr_spill (int try_locals)
2262 int regno;
2264 /* If this is a leaf function, first try an otherwise unused
2265 call-clobbered register. */
2266 if (current_function_is_leaf)
2268 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2269 if (! regs_ever_live[regno]
2270 && call_used_regs[regno]
2271 && ! fixed_regs[regno]
2272 && ! global_regs[regno]
2273 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
2275 current_frame_info.gr_used_mask |= 1 << regno;
2276 return regno;
2280 if (try_locals)
2282 regno = current_frame_info.n_local_regs;
2283 /* If there is a frame pointer, then we can't use loc79, because
2284 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
2285 reg_name switching code in ia64_expand_prologue. */
2286 if (regno < (80 - frame_pointer_needed))
2288 current_frame_info.n_local_regs = regno + 1;
2289 return LOC_REG (0) + regno;
2293 /* Failed to find a general register to spill to. Must use stack. */
2294 return 0;
2297 /* In order to make for nice schedules, we try to allocate every temporary
2298 to a different register. We must of course stay away from call-saved,
2299 fixed, and global registers. We must also stay away from registers
2300 allocated in current_frame_info.gr_used_mask, since those include regs
2301 used all through the prologue.
2303 Any register allocated here must be used immediately. The idea is to
2304 aid scheduling, not to solve data flow problems. */
2306 static int last_scratch_gr_reg;
2308 static int
2309 next_scratch_gr_reg (void)
2311 int i, regno;
2313 for (i = 0; i < 32; ++i)
2315 regno = (last_scratch_gr_reg + i + 1) & 31;
2316 if (call_used_regs[regno]
2317 && ! fixed_regs[regno]
2318 && ! global_regs[regno]
2319 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
2321 last_scratch_gr_reg = regno;
2322 return regno;
2326 /* There must be _something_ available. */
2327 gcc_unreachable ();
2330 /* Helper function for ia64_compute_frame_size, called through
2331 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
2333 static void
2334 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
2336 unsigned int regno = REGNO (reg);
2337 if (regno < 32)
2339 unsigned int i, n = hard_regno_nregs[regno][GET_MODE (reg)];
2340 for (i = 0; i < n; ++i)
2341 current_frame_info.gr_used_mask |= 1 << (regno + i);
2345 /* Returns the number of bytes offset between the frame pointer and the stack
2346 pointer for the current function. SIZE is the number of bytes of space
2347 needed for local variables. */
2349 static void
2350 ia64_compute_frame_size (HOST_WIDE_INT size)
2352 HOST_WIDE_INT total_size;
2353 HOST_WIDE_INT spill_size = 0;
2354 HOST_WIDE_INT extra_spill_size = 0;
2355 HOST_WIDE_INT pretend_args_size;
2356 HARD_REG_SET mask;
2357 int n_spilled = 0;
2358 int spilled_gr_p = 0;
2359 int spilled_fr_p = 0;
2360 unsigned int regno;
2361 int i;
2363 if (current_frame_info.initialized)
2364 return;
2366 memset (&current_frame_info, 0, sizeof current_frame_info);
2367 CLEAR_HARD_REG_SET (mask);
2369 /* Don't allocate scratches to the return register. */
2370 diddle_return_value (mark_reg_gr_used_mask, NULL);
2372 /* Don't allocate scratches to the EH scratch registers. */
2373 if (cfun->machine->ia64_eh_epilogue_sp)
2374 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
2375 if (cfun->machine->ia64_eh_epilogue_bsp)
2376 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
2378 /* Find the size of the register stack frame. We have only 80 local
2379 registers, because we reserve 8 for the inputs and 8 for the
2380 outputs. */
2382 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
2383 since we'll be adjusting that down later. */
2384 regno = LOC_REG (78) + ! frame_pointer_needed;
2385 for (; regno >= LOC_REG (0); regno--)
2386 if (regs_ever_live[regno])
2387 break;
2388 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2390 /* For functions marked with the syscall_linkage attribute, we must mark
2391 all eight input registers as in use, so that locals aren't visible to
2392 the caller. */
2394 if (cfun->machine->n_varargs > 0
2395 || lookup_attribute ("syscall_linkage",
2396 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
2397 current_frame_info.n_input_regs = 8;
2398 else
2400 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
2401 if (regs_ever_live[regno])
2402 break;
2403 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
2406 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
2407 if (regs_ever_live[regno])
2408 break;
2409 i = regno - OUT_REG (0) + 1;
2411 #ifndef PROFILE_HOOK
2412 /* When -p profiling, we need one output register for the mcount argument.
2413 Likewise for -a profiling for the bb_init_func argument. For -ax
2414 profiling, we need two output registers for the two bb_init_trace_func
2415 arguments. */
2416 if (current_function_profile)
2417 i = MAX (i, 1);
2418 #endif
2419 current_frame_info.n_output_regs = i;
2421 /* ??? No rotating register support yet. */
2422 current_frame_info.n_rotate_regs = 0;
2424 /* Discover which registers need spilling, and how much room that
2425 will take. Begin with floating point and general registers,
2426 which will always wind up on the stack. */
2428 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
2429 if (regs_ever_live[regno] && ! call_used_regs[regno])
2431 SET_HARD_REG_BIT (mask, regno);
2432 spill_size += 16;
2433 n_spilled += 1;
2434 spilled_fr_p = 1;
2437 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2438 if (regs_ever_live[regno] && ! call_used_regs[regno])
2440 SET_HARD_REG_BIT (mask, regno);
2441 spill_size += 8;
2442 n_spilled += 1;
2443 spilled_gr_p = 1;
2446 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
2447 if (regs_ever_live[regno] && ! call_used_regs[regno])
2449 SET_HARD_REG_BIT (mask, regno);
2450 spill_size += 8;
2451 n_spilled += 1;
2454 /* Now come all special registers that might get saved in other
2455 general registers. */
2457 if (frame_pointer_needed)
2459 current_frame_info.reg_fp = find_gr_spill (1);
2460 /* If we did not get a register, then we take LOC79. This is guaranteed
2461 to be free, even if regs_ever_live is already set, because this is
2462 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
2463 as we don't count loc79 above. */
2464 if (current_frame_info.reg_fp == 0)
2466 current_frame_info.reg_fp = LOC_REG (79);
2467 current_frame_info.n_local_regs++;
2471 if (! current_function_is_leaf)
2473 /* Emit a save of BR0 if we call other functions. Do this even
2474 if this function doesn't return, as EH depends on this to be
2475 able to unwind the stack. */
2476 SET_HARD_REG_BIT (mask, BR_REG (0));
2478 current_frame_info.reg_save_b0 = find_gr_spill (1);
2479 if (current_frame_info.reg_save_b0 == 0)
2481 extra_spill_size += 8;
2482 n_spilled += 1;
2485 /* Similarly for ar.pfs. */
2486 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2487 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
2488 if (current_frame_info.reg_save_ar_pfs == 0)
2490 extra_spill_size += 8;
2491 n_spilled += 1;
2494 /* Similarly for gp. Note that if we're calling setjmp, the stacked
2495 registers are clobbered, so we fall back to the stack. */
2496 current_frame_info.reg_save_gp
2497 = (current_function_calls_setjmp ? 0 : find_gr_spill (1));
2498 if (current_frame_info.reg_save_gp == 0)
2500 SET_HARD_REG_BIT (mask, GR_REG (1));
2501 spill_size += 8;
2502 n_spilled += 1;
2505 else
2507 if (regs_ever_live[BR_REG (0)] && ! call_used_regs[BR_REG (0)])
2509 SET_HARD_REG_BIT (mask, BR_REG (0));
2510 extra_spill_size += 8;
2511 n_spilled += 1;
2514 if (regs_ever_live[AR_PFS_REGNUM])
2516 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2517 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
2518 if (current_frame_info.reg_save_ar_pfs == 0)
2520 extra_spill_size += 8;
2521 n_spilled += 1;
2526 /* Unwind descriptor hackery: things are most efficient if we allocate
2527 consecutive GR save registers for RP, PFS, FP in that order. However,
2528 it is absolutely critical that FP get the only hard register that's
2529 guaranteed to be free, so we allocated it first. If all three did
2530 happen to be allocated hard regs, and are consecutive, rearrange them
2531 into the preferred order now. */
2532 if (current_frame_info.reg_fp != 0
2533 && current_frame_info.reg_save_b0 == current_frame_info.reg_fp + 1
2534 && current_frame_info.reg_save_ar_pfs == current_frame_info.reg_fp + 2)
2536 current_frame_info.reg_save_b0 = current_frame_info.reg_fp;
2537 current_frame_info.reg_save_ar_pfs = current_frame_info.reg_fp + 1;
2538 current_frame_info.reg_fp = current_frame_info.reg_fp + 2;
2541 /* See if we need to store the predicate register block. */
2542 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2543 if (regs_ever_live[regno] && ! call_used_regs[regno])
2544 break;
2545 if (regno <= PR_REG (63))
2547 SET_HARD_REG_BIT (mask, PR_REG (0));
2548 current_frame_info.reg_save_pr = find_gr_spill (1);
2549 if (current_frame_info.reg_save_pr == 0)
2551 extra_spill_size += 8;
2552 n_spilled += 1;
2555 /* ??? Mark them all as used so that register renaming and such
2556 are free to use them. */
2557 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2558 regs_ever_live[regno] = 1;
2561 /* If we're forced to use st8.spill, we're forced to save and restore
2562 ar.unat as well. The check for existing liveness allows inline asm
2563 to touch ar.unat. */
2564 if (spilled_gr_p || cfun->machine->n_varargs
2565 || regs_ever_live[AR_UNAT_REGNUM])
2567 regs_ever_live[AR_UNAT_REGNUM] = 1;
2568 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
2569 current_frame_info.reg_save_ar_unat = find_gr_spill (spill_size == 0);
2570 if (current_frame_info.reg_save_ar_unat == 0)
2572 extra_spill_size += 8;
2573 n_spilled += 1;
2577 if (regs_ever_live[AR_LC_REGNUM])
2579 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
2580 current_frame_info.reg_save_ar_lc = find_gr_spill (spill_size == 0);
2581 if (current_frame_info.reg_save_ar_lc == 0)
2583 extra_spill_size += 8;
2584 n_spilled += 1;
2588 /* If we have an odd number of words of pretend arguments written to
2589 the stack, then the FR save area will be unaligned. We round the
2590 size of this area up to keep things 16 byte aligned. */
2591 if (spilled_fr_p)
2592 pretend_args_size = IA64_STACK_ALIGN (current_function_pretend_args_size);
2593 else
2594 pretend_args_size = current_function_pretend_args_size;
2596 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2597 + current_function_outgoing_args_size);
2598 total_size = IA64_STACK_ALIGN (total_size);
2600 /* We always use the 16-byte scratch area provided by the caller, but
2601 if we are a leaf function, there's no one to which we need to provide
2602 a scratch area. */
2603 if (current_function_is_leaf)
2604 total_size = MAX (0, total_size - 16);
2606 current_frame_info.total_size = total_size;
2607 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2608 current_frame_info.spill_size = spill_size;
2609 current_frame_info.extra_spill_size = extra_spill_size;
2610 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2611 current_frame_info.n_spilled = n_spilled;
2612 current_frame_info.initialized = reload_completed;
2615 /* Compute the initial difference between the specified pair of registers. */
2617 HOST_WIDE_INT
2618 ia64_initial_elimination_offset (int from, int to)
2620 HOST_WIDE_INT offset;
2622 ia64_compute_frame_size (get_frame_size ());
2623 switch (from)
2625 case FRAME_POINTER_REGNUM:
2626 switch (to)
2628 case HARD_FRAME_POINTER_REGNUM:
2629 if (current_function_is_leaf)
2630 offset = -current_frame_info.total_size;
2631 else
2632 offset = -(current_frame_info.total_size
2633 - current_function_outgoing_args_size - 16);
2634 break;
2636 case STACK_POINTER_REGNUM:
2637 if (current_function_is_leaf)
2638 offset = 0;
2639 else
2640 offset = 16 + current_function_outgoing_args_size;
2641 break;
2643 default:
2644 gcc_unreachable ();
2646 break;
2648 case ARG_POINTER_REGNUM:
2649 /* Arguments start above the 16 byte save area, unless stdarg
2650 in which case we store through the 16 byte save area. */
2651 switch (to)
2653 case HARD_FRAME_POINTER_REGNUM:
2654 offset = 16 - current_function_pretend_args_size;
2655 break;
2657 case STACK_POINTER_REGNUM:
2658 offset = (current_frame_info.total_size
2659 + 16 - current_function_pretend_args_size);
2660 break;
2662 default:
2663 gcc_unreachable ();
2665 break;
2667 default:
2668 gcc_unreachable ();
2671 return offset;
2674 /* If there are more than a trivial number of register spills, we use
2675 two interleaved iterators so that we can get two memory references
2676 per insn group.
2678 In order to simplify things in the prologue and epilogue expanders,
2679 we use helper functions to fix up the memory references after the
2680 fact with the appropriate offsets to a POST_MODIFY memory mode.
2681 The following data structure tracks the state of the two iterators
2682 while insns are being emitted. */
2684 struct spill_fill_data
2686 rtx init_after; /* point at which to emit initializations */
2687 rtx init_reg[2]; /* initial base register */
2688 rtx iter_reg[2]; /* the iterator registers */
2689 rtx *prev_addr[2]; /* address of last memory use */
2690 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
2691 HOST_WIDE_INT prev_off[2]; /* last offset */
2692 int n_iter; /* number of iterators in use */
2693 int next_iter; /* next iterator to use */
2694 unsigned int save_gr_used_mask;
2697 static struct spill_fill_data spill_fill_data;
2699 static void
2700 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
2702 int i;
2704 spill_fill_data.init_after = get_last_insn ();
2705 spill_fill_data.init_reg[0] = init_reg;
2706 spill_fill_data.init_reg[1] = init_reg;
2707 spill_fill_data.prev_addr[0] = NULL;
2708 spill_fill_data.prev_addr[1] = NULL;
2709 spill_fill_data.prev_insn[0] = NULL;
2710 spill_fill_data.prev_insn[1] = NULL;
2711 spill_fill_data.prev_off[0] = cfa_off;
2712 spill_fill_data.prev_off[1] = cfa_off;
2713 spill_fill_data.next_iter = 0;
2714 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
2716 spill_fill_data.n_iter = 1 + (n_spills > 2);
2717 for (i = 0; i < spill_fill_data.n_iter; ++i)
2719 int regno = next_scratch_gr_reg ();
2720 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
2721 current_frame_info.gr_used_mask |= 1 << regno;
2725 static void
2726 finish_spill_pointers (void)
2728 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
2731 static rtx
2732 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
2734 int iter = spill_fill_data.next_iter;
2735 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
2736 rtx disp_rtx = GEN_INT (disp);
2737 rtx mem;
2739 if (spill_fill_data.prev_addr[iter])
2741 if (CONST_OK_FOR_N (disp))
2743 *spill_fill_data.prev_addr[iter]
2744 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
2745 gen_rtx_PLUS (DImode,
2746 spill_fill_data.iter_reg[iter],
2747 disp_rtx));
2748 REG_NOTES (spill_fill_data.prev_insn[iter])
2749 = gen_rtx_EXPR_LIST (REG_INC, spill_fill_data.iter_reg[iter],
2750 REG_NOTES (spill_fill_data.prev_insn[iter]));
2752 else
2754 /* ??? Could use register post_modify for loads. */
2755 if (! CONST_OK_FOR_I (disp))
2757 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2758 emit_move_insn (tmp, disp_rtx);
2759 disp_rtx = tmp;
2761 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2762 spill_fill_data.iter_reg[iter], disp_rtx));
2765 /* Micro-optimization: if we've created a frame pointer, it's at
2766 CFA 0, which may allow the real iterator to be initialized lower,
2767 slightly increasing parallelism. Also, if there are few saves
2768 it may eliminate the iterator entirely. */
2769 else if (disp == 0
2770 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
2771 && frame_pointer_needed)
2773 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
2774 set_mem_alias_set (mem, get_varargs_alias_set ());
2775 return mem;
2777 else
2779 rtx seq, insn;
2781 if (disp == 0)
2782 seq = gen_movdi (spill_fill_data.iter_reg[iter],
2783 spill_fill_data.init_reg[iter]);
2784 else
2786 start_sequence ();
2788 if (! CONST_OK_FOR_I (disp))
2790 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2791 emit_move_insn (tmp, disp_rtx);
2792 disp_rtx = tmp;
2795 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2796 spill_fill_data.init_reg[iter],
2797 disp_rtx));
2799 seq = get_insns ();
2800 end_sequence ();
2803 /* Careful for being the first insn in a sequence. */
2804 if (spill_fill_data.init_after)
2805 insn = emit_insn_after (seq, spill_fill_data.init_after);
2806 else
2808 rtx first = get_insns ();
2809 if (first)
2810 insn = emit_insn_before (seq, first);
2811 else
2812 insn = emit_insn (seq);
2814 spill_fill_data.init_after = insn;
2816 /* If DISP is 0, we may or may not have a further adjustment
2817 afterward. If we do, then the load/store insn may be modified
2818 to be a post-modify. If we don't, then this copy may be
2819 eliminated by copyprop_hardreg_forward, which makes this
2820 insn garbage, which runs afoul of the sanity check in
2821 propagate_one_insn. So mark this insn as legal to delete. */
2822 if (disp == 0)
2823 REG_NOTES(insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx,
2824 REG_NOTES (insn));
2827 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
2829 /* ??? Not all of the spills are for varargs, but some of them are.
2830 The rest of the spills belong in an alias set of their own. But
2831 it doesn't actually hurt to include them here. */
2832 set_mem_alias_set (mem, get_varargs_alias_set ());
2834 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
2835 spill_fill_data.prev_off[iter] = cfa_off;
2837 if (++iter >= spill_fill_data.n_iter)
2838 iter = 0;
2839 spill_fill_data.next_iter = iter;
2841 return mem;
2844 static void
2845 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
2846 rtx frame_reg)
2848 int iter = spill_fill_data.next_iter;
2849 rtx mem, insn;
2851 mem = spill_restore_mem (reg, cfa_off);
2852 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
2853 spill_fill_data.prev_insn[iter] = insn;
2855 if (frame_reg)
2857 rtx base;
2858 HOST_WIDE_INT off;
2860 RTX_FRAME_RELATED_P (insn) = 1;
2862 /* Don't even pretend that the unwind code can intuit its way
2863 through a pair of interleaved post_modify iterators. Just
2864 provide the correct answer. */
2866 if (frame_pointer_needed)
2868 base = hard_frame_pointer_rtx;
2869 off = - cfa_off;
2871 else
2873 base = stack_pointer_rtx;
2874 off = current_frame_info.total_size - cfa_off;
2877 REG_NOTES (insn)
2878 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2879 gen_rtx_SET (VOIDmode,
2880 gen_rtx_MEM (GET_MODE (reg),
2881 plus_constant (base, off)),
2882 frame_reg),
2883 REG_NOTES (insn));
2887 static void
2888 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
2890 int iter = spill_fill_data.next_iter;
2891 rtx insn;
2893 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
2894 GEN_INT (cfa_off)));
2895 spill_fill_data.prev_insn[iter] = insn;
2898 /* Wrapper functions that discards the CONST_INT spill offset. These
2899 exist so that we can give gr_spill/gr_fill the offset they need and
2900 use a consistent function interface. */
2902 static rtx
2903 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2905 return gen_movdi (dest, src);
2908 static rtx
2909 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2911 return gen_fr_spill (dest, src);
2914 static rtx
2915 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2917 return gen_fr_restore (dest, src);
2920 /* Called after register allocation to add any instructions needed for the
2921 prologue. Using a prologue insn is favored compared to putting all of the
2922 instructions in output_function_prologue(), since it allows the scheduler
2923 to intermix instructions with the saves of the caller saved registers. In
2924 some cases, it might be necessary to emit a barrier instruction as the last
2925 insn to prevent such scheduling.
2927 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
2928 so that the debug info generation code can handle them properly.
2930 The register save area is layed out like so:
2931 cfa+16
2932 [ varargs spill area ]
2933 [ fr register spill area ]
2934 [ br register spill area ]
2935 [ ar register spill area ]
2936 [ pr register spill area ]
2937 [ gr register spill area ] */
2939 /* ??? Get inefficient code when the frame size is larger than can fit in an
2940 adds instruction. */
2942 void
2943 ia64_expand_prologue (void)
2945 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
2946 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
2947 rtx reg, alt_reg;
2949 ia64_compute_frame_size (get_frame_size ());
2950 last_scratch_gr_reg = 15;
2952 /* If there is no epilogue, then we don't need some prologue insns.
2953 We need to avoid emitting the dead prologue insns, because flow
2954 will complain about them. */
2955 if (optimize)
2957 edge e;
2958 edge_iterator ei;
2960 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
2961 if ((e->flags & EDGE_FAKE) == 0
2962 && (e->flags & EDGE_FALLTHRU) != 0)
2963 break;
2964 epilogue_p = (e != NULL);
2966 else
2967 epilogue_p = 1;
2969 /* Set the local, input, and output register names. We need to do this
2970 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
2971 half. If we use in/loc/out register names, then we get assembler errors
2972 in crtn.S because there is no alloc insn or regstk directive in there. */
2973 if (! TARGET_REG_NAMES)
2975 int inputs = current_frame_info.n_input_regs;
2976 int locals = current_frame_info.n_local_regs;
2977 int outputs = current_frame_info.n_output_regs;
2979 for (i = 0; i < inputs; i++)
2980 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
2981 for (i = 0; i < locals; i++)
2982 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
2983 for (i = 0; i < outputs; i++)
2984 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
2987 /* Set the frame pointer register name. The regnum is logically loc79,
2988 but of course we'll not have allocated that many locals. Rather than
2989 worrying about renumbering the existing rtxs, we adjust the name. */
2990 /* ??? This code means that we can never use one local register when
2991 there is a frame pointer. loc79 gets wasted in this case, as it is
2992 renamed to a register that will never be used. See also the try_locals
2993 code in find_gr_spill. */
2994 if (current_frame_info.reg_fp)
2996 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
2997 reg_names[HARD_FRAME_POINTER_REGNUM]
2998 = reg_names[current_frame_info.reg_fp];
2999 reg_names[current_frame_info.reg_fp] = tmp;
3002 /* We don't need an alloc instruction if we've used no outputs or locals. */
3003 if (current_frame_info.n_local_regs == 0
3004 && current_frame_info.n_output_regs == 0
3005 && current_frame_info.n_input_regs <= current_function_args_info.int_regs
3006 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3008 /* If there is no alloc, but there are input registers used, then we
3009 need a .regstk directive. */
3010 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
3011 ar_pfs_save_reg = NULL_RTX;
3013 else
3015 current_frame_info.need_regstk = 0;
3017 if (current_frame_info.reg_save_ar_pfs)
3018 regno = current_frame_info.reg_save_ar_pfs;
3019 else
3020 regno = next_scratch_gr_reg ();
3021 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
3023 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
3024 GEN_INT (current_frame_info.n_input_regs),
3025 GEN_INT (current_frame_info.n_local_regs),
3026 GEN_INT (current_frame_info.n_output_regs),
3027 GEN_INT (current_frame_info.n_rotate_regs)));
3028 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_pfs != 0);
3031 /* Set up frame pointer, stack pointer, and spill iterators. */
3033 n_varargs = cfun->machine->n_varargs;
3034 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
3035 stack_pointer_rtx, 0);
3037 if (frame_pointer_needed)
3039 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3040 RTX_FRAME_RELATED_P (insn) = 1;
3043 if (current_frame_info.total_size != 0)
3045 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
3046 rtx offset;
3048 if (CONST_OK_FOR_I (- current_frame_info.total_size))
3049 offset = frame_size_rtx;
3050 else
3052 regno = next_scratch_gr_reg ();
3053 offset = gen_rtx_REG (DImode, regno);
3054 emit_move_insn (offset, frame_size_rtx);
3057 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
3058 stack_pointer_rtx, offset));
3060 if (! frame_pointer_needed)
3062 RTX_FRAME_RELATED_P (insn) = 1;
3063 if (GET_CODE (offset) != CONST_INT)
3065 REG_NOTES (insn)
3066 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3067 gen_rtx_SET (VOIDmode,
3068 stack_pointer_rtx,
3069 gen_rtx_PLUS (DImode,
3070 stack_pointer_rtx,
3071 frame_size_rtx)),
3072 REG_NOTES (insn));
3076 /* ??? At this point we must generate a magic insn that appears to
3077 modify the stack pointer, the frame pointer, and all spill
3078 iterators. This would allow the most scheduling freedom. For
3079 now, just hard stop. */
3080 emit_insn (gen_blockage ());
3083 /* Must copy out ar.unat before doing any integer spills. */
3084 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3086 if (current_frame_info.reg_save_ar_unat)
3087 ar_unat_save_reg
3088 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
3089 else
3091 alt_regno = next_scratch_gr_reg ();
3092 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3093 current_frame_info.gr_used_mask |= 1 << alt_regno;
3096 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3097 insn = emit_move_insn (ar_unat_save_reg, reg);
3098 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_unat != 0);
3100 /* Even if we're not going to generate an epilogue, we still
3101 need to save the register so that EH works. */
3102 if (! epilogue_p && current_frame_info.reg_save_ar_unat)
3103 emit_insn (gen_prologue_use (ar_unat_save_reg));
3105 else
3106 ar_unat_save_reg = NULL_RTX;
3108 /* Spill all varargs registers. Do this before spilling any GR registers,
3109 since we want the UNAT bits for the GR registers to override the UNAT
3110 bits from varargs, which we don't care about. */
3112 cfa_off = -16;
3113 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
3115 reg = gen_rtx_REG (DImode, regno);
3116 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
3119 /* Locate the bottom of the register save area. */
3120 cfa_off = (current_frame_info.spill_cfa_off
3121 + current_frame_info.spill_size
3122 + current_frame_info.extra_spill_size);
3124 /* Save the predicate register block either in a register or in memory. */
3125 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3127 reg = gen_rtx_REG (DImode, PR_REG (0));
3128 if (current_frame_info.reg_save_pr != 0)
3130 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
3131 insn = emit_move_insn (alt_reg, reg);
3133 /* ??? Denote pr spill/fill by a DImode move that modifies all
3134 64 hard registers. */
3135 RTX_FRAME_RELATED_P (insn) = 1;
3136 REG_NOTES (insn)
3137 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3138 gen_rtx_SET (VOIDmode, alt_reg, reg),
3139 REG_NOTES (insn));
3141 /* Even if we're not going to generate an epilogue, we still
3142 need to save the register so that EH works. */
3143 if (! epilogue_p)
3144 emit_insn (gen_prologue_use (alt_reg));
3146 else
3148 alt_regno = next_scratch_gr_reg ();
3149 alt_reg = gen_rtx_REG (DImode, alt_regno);
3150 insn = emit_move_insn (alt_reg, reg);
3151 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3152 cfa_off -= 8;
3156 /* Handle AR regs in numerical order. All of them get special handling. */
3157 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
3158 && current_frame_info.reg_save_ar_unat == 0)
3160 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3161 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
3162 cfa_off -= 8;
3165 /* The alloc insn already copied ar.pfs into a general register. The
3166 only thing we have to do now is copy that register to a stack slot
3167 if we'd not allocated a local register for the job. */
3168 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
3169 && current_frame_info.reg_save_ar_pfs == 0)
3171 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3172 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
3173 cfa_off -= 8;
3176 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3178 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3179 if (current_frame_info.reg_save_ar_lc != 0)
3181 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
3182 insn = emit_move_insn (alt_reg, reg);
3183 RTX_FRAME_RELATED_P (insn) = 1;
3185 /* Even if we're not going to generate an epilogue, we still
3186 need to save the register so that EH works. */
3187 if (! epilogue_p)
3188 emit_insn (gen_prologue_use (alt_reg));
3190 else
3192 alt_regno = next_scratch_gr_reg ();
3193 alt_reg = gen_rtx_REG (DImode, alt_regno);
3194 emit_move_insn (alt_reg, reg);
3195 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3196 cfa_off -= 8;
3200 /* Save the return pointer. */
3201 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3203 reg = gen_rtx_REG (DImode, BR_REG (0));
3204 if (current_frame_info.reg_save_b0 != 0)
3206 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
3207 insn = emit_move_insn (alt_reg, reg);
3208 RTX_FRAME_RELATED_P (insn) = 1;
3210 /* Even if we're not going to generate an epilogue, we still
3211 need to save the register so that EH works. */
3212 if (! epilogue_p)
3213 emit_insn (gen_prologue_use (alt_reg));
3215 else
3217 alt_regno = next_scratch_gr_reg ();
3218 alt_reg = gen_rtx_REG (DImode, alt_regno);
3219 emit_move_insn (alt_reg, reg);
3220 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3221 cfa_off -= 8;
3225 if (current_frame_info.reg_save_gp)
3227 insn = emit_move_insn (gen_rtx_REG (DImode,
3228 current_frame_info.reg_save_gp),
3229 pic_offset_table_rtx);
3230 /* We don't know for sure yet if this is actually needed, since
3231 we've not split the PIC call patterns. If all of the calls
3232 are indirect, and not followed by any uses of the gp, then
3233 this save is dead. Allow it to go away. */
3234 REG_NOTES (insn)
3235 = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, REG_NOTES (insn));
3238 /* We should now be at the base of the gr/br/fr spill area. */
3239 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3240 + current_frame_info.spill_size));
3242 /* Spill all general registers. */
3243 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3244 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3246 reg = gen_rtx_REG (DImode, regno);
3247 do_spill (gen_gr_spill, reg, cfa_off, reg);
3248 cfa_off -= 8;
3251 /* Spill the rest of the BR registers. */
3252 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3253 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3255 alt_regno = next_scratch_gr_reg ();
3256 alt_reg = gen_rtx_REG (DImode, alt_regno);
3257 reg = gen_rtx_REG (DImode, regno);
3258 emit_move_insn (alt_reg, reg);
3259 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3260 cfa_off -= 8;
3263 /* Align the frame and spill all FR registers. */
3264 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3265 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3267 gcc_assert (!(cfa_off & 15));
3268 reg = gen_rtx_REG (XFmode, regno);
3269 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
3270 cfa_off -= 16;
3273 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3275 finish_spill_pointers ();
3278 /* Called after register allocation to add any instructions needed for the
3279 epilogue. Using an epilogue insn is favored compared to putting all of the
3280 instructions in output_function_prologue(), since it allows the scheduler
3281 to intermix instructions with the saves of the caller saved registers. In
3282 some cases, it might be necessary to emit a barrier instruction as the last
3283 insn to prevent such scheduling. */
3285 void
3286 ia64_expand_epilogue (int sibcall_p)
3288 rtx insn, reg, alt_reg, ar_unat_save_reg;
3289 int regno, alt_regno, cfa_off;
3291 ia64_compute_frame_size (get_frame_size ());
3293 /* If there is a frame pointer, then we use it instead of the stack
3294 pointer, so that the stack pointer does not need to be valid when
3295 the epilogue starts. See EXIT_IGNORE_STACK. */
3296 if (frame_pointer_needed)
3297 setup_spill_pointers (current_frame_info.n_spilled,
3298 hard_frame_pointer_rtx, 0);
3299 else
3300 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
3301 current_frame_info.total_size);
3303 if (current_frame_info.total_size != 0)
3305 /* ??? At this point we must generate a magic insn that appears to
3306 modify the spill iterators and the frame pointer. This would
3307 allow the most scheduling freedom. For now, just hard stop. */
3308 emit_insn (gen_blockage ());
3311 /* Locate the bottom of the register save area. */
3312 cfa_off = (current_frame_info.spill_cfa_off
3313 + current_frame_info.spill_size
3314 + current_frame_info.extra_spill_size);
3316 /* Restore the predicate registers. */
3317 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3319 if (current_frame_info.reg_save_pr != 0)
3320 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
3321 else
3323 alt_regno = next_scratch_gr_reg ();
3324 alt_reg = gen_rtx_REG (DImode, alt_regno);
3325 do_restore (gen_movdi_x, alt_reg, cfa_off);
3326 cfa_off -= 8;
3328 reg = gen_rtx_REG (DImode, PR_REG (0));
3329 emit_move_insn (reg, alt_reg);
3332 /* Restore the application registers. */
3334 /* Load the saved unat from the stack, but do not restore it until
3335 after the GRs have been restored. */
3336 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3338 if (current_frame_info.reg_save_ar_unat != 0)
3339 ar_unat_save_reg
3340 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
3341 else
3343 alt_regno = next_scratch_gr_reg ();
3344 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3345 current_frame_info.gr_used_mask |= 1 << alt_regno;
3346 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
3347 cfa_off -= 8;
3350 else
3351 ar_unat_save_reg = NULL_RTX;
3353 if (current_frame_info.reg_save_ar_pfs != 0)
3355 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_pfs);
3356 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3357 emit_move_insn (reg, alt_reg);
3359 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3361 alt_regno = next_scratch_gr_reg ();
3362 alt_reg = gen_rtx_REG (DImode, alt_regno);
3363 do_restore (gen_movdi_x, alt_reg, cfa_off);
3364 cfa_off -= 8;
3365 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3366 emit_move_insn (reg, alt_reg);
3369 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3371 if (current_frame_info.reg_save_ar_lc != 0)
3372 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
3373 else
3375 alt_regno = next_scratch_gr_reg ();
3376 alt_reg = gen_rtx_REG (DImode, alt_regno);
3377 do_restore (gen_movdi_x, alt_reg, cfa_off);
3378 cfa_off -= 8;
3380 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3381 emit_move_insn (reg, alt_reg);
3384 /* Restore the return pointer. */
3385 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3387 if (current_frame_info.reg_save_b0 != 0)
3388 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
3389 else
3391 alt_regno = next_scratch_gr_reg ();
3392 alt_reg = gen_rtx_REG (DImode, alt_regno);
3393 do_restore (gen_movdi_x, alt_reg, cfa_off);
3394 cfa_off -= 8;
3396 reg = gen_rtx_REG (DImode, BR_REG (0));
3397 emit_move_insn (reg, alt_reg);
3400 /* We should now be at the base of the gr/br/fr spill area. */
3401 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3402 + current_frame_info.spill_size));
3404 /* The GP may be stored on the stack in the prologue, but it's
3405 never restored in the epilogue. Skip the stack slot. */
3406 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
3407 cfa_off -= 8;
3409 /* Restore all general registers. */
3410 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
3411 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3413 reg = gen_rtx_REG (DImode, regno);
3414 do_restore (gen_gr_restore, reg, cfa_off);
3415 cfa_off -= 8;
3418 /* Restore the branch registers. */
3419 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3420 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3422 alt_regno = next_scratch_gr_reg ();
3423 alt_reg = gen_rtx_REG (DImode, alt_regno);
3424 do_restore (gen_movdi_x, alt_reg, cfa_off);
3425 cfa_off -= 8;
3426 reg = gen_rtx_REG (DImode, regno);
3427 emit_move_insn (reg, alt_reg);
3430 /* Restore floating point registers. */
3431 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3432 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3434 gcc_assert (!(cfa_off & 15));
3435 reg = gen_rtx_REG (XFmode, regno);
3436 do_restore (gen_fr_restore_x, reg, cfa_off);
3437 cfa_off -= 16;
3440 /* Restore ar.unat for real. */
3441 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3443 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3444 emit_move_insn (reg, ar_unat_save_reg);
3447 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3449 finish_spill_pointers ();
3451 if (current_frame_info.total_size || cfun->machine->ia64_eh_epilogue_sp)
3453 /* ??? At this point we must generate a magic insn that appears to
3454 modify the spill iterators, the stack pointer, and the frame
3455 pointer. This would allow the most scheduling freedom. For now,
3456 just hard stop. */
3457 emit_insn (gen_blockage ());
3460 if (cfun->machine->ia64_eh_epilogue_sp)
3461 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
3462 else if (frame_pointer_needed)
3464 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
3465 RTX_FRAME_RELATED_P (insn) = 1;
3467 else if (current_frame_info.total_size)
3469 rtx offset, frame_size_rtx;
3471 frame_size_rtx = GEN_INT (current_frame_info.total_size);
3472 if (CONST_OK_FOR_I (current_frame_info.total_size))
3473 offset = frame_size_rtx;
3474 else
3476 regno = next_scratch_gr_reg ();
3477 offset = gen_rtx_REG (DImode, regno);
3478 emit_move_insn (offset, frame_size_rtx);
3481 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
3482 offset));
3484 RTX_FRAME_RELATED_P (insn) = 1;
3485 if (GET_CODE (offset) != CONST_INT)
3487 REG_NOTES (insn)
3488 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3489 gen_rtx_SET (VOIDmode,
3490 stack_pointer_rtx,
3491 gen_rtx_PLUS (DImode,
3492 stack_pointer_rtx,
3493 frame_size_rtx)),
3494 REG_NOTES (insn));
3498 if (cfun->machine->ia64_eh_epilogue_bsp)
3499 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
3501 if (! sibcall_p)
3502 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
3503 else
3505 int fp = GR_REG (2);
3506 /* We need a throw away register here, r0 and r1 are reserved, so r2 is the
3507 first available call clobbered register. If there was a frame_pointer
3508 register, we may have swapped the names of r2 and HARD_FRAME_POINTER_REGNUM,
3509 so we have to make sure we're using the string "r2" when emitting
3510 the register name for the assembler. */
3511 if (current_frame_info.reg_fp && current_frame_info.reg_fp == GR_REG (2))
3512 fp = HARD_FRAME_POINTER_REGNUM;
3514 /* We must emit an alloc to force the input registers to become output
3515 registers. Otherwise, if the callee tries to pass its parameters
3516 through to another call without an intervening alloc, then these
3517 values get lost. */
3518 /* ??? We don't need to preserve all input registers. We only need to
3519 preserve those input registers used as arguments to the sibling call.
3520 It is unclear how to compute that number here. */
3521 if (current_frame_info.n_input_regs != 0)
3523 rtx n_inputs = GEN_INT (current_frame_info.n_input_regs);
3524 insn = emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
3525 const0_rtx, const0_rtx,
3526 n_inputs, const0_rtx));
3527 RTX_FRAME_RELATED_P (insn) = 1;
3532 /* Return 1 if br.ret can do all the work required to return from a
3533 function. */
3536 ia64_direct_return (void)
3538 if (reload_completed && ! frame_pointer_needed)
3540 ia64_compute_frame_size (get_frame_size ());
3542 return (current_frame_info.total_size == 0
3543 && current_frame_info.n_spilled == 0
3544 && current_frame_info.reg_save_b0 == 0
3545 && current_frame_info.reg_save_pr == 0
3546 && current_frame_info.reg_save_ar_pfs == 0
3547 && current_frame_info.reg_save_ar_unat == 0
3548 && current_frame_info.reg_save_ar_lc == 0);
3550 return 0;
3553 /* Return the magic cookie that we use to hold the return address
3554 during early compilation. */
3557 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
3559 if (count != 0)
3560 return NULL;
3561 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
3564 /* Split this value after reload, now that we know where the return
3565 address is saved. */
3567 void
3568 ia64_split_return_addr_rtx (rtx dest)
3570 rtx src;
3572 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3574 if (current_frame_info.reg_save_b0 != 0)
3575 src = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
3576 else
3578 HOST_WIDE_INT off;
3579 unsigned int regno;
3581 /* Compute offset from CFA for BR0. */
3582 /* ??? Must be kept in sync with ia64_expand_prologue. */
3583 off = (current_frame_info.spill_cfa_off
3584 + current_frame_info.spill_size);
3585 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3586 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3587 off -= 8;
3589 /* Convert CFA offset to a register based offset. */
3590 if (frame_pointer_needed)
3591 src = hard_frame_pointer_rtx;
3592 else
3594 src = stack_pointer_rtx;
3595 off += current_frame_info.total_size;
3598 /* Load address into scratch register. */
3599 if (CONST_OK_FOR_I (off))
3600 emit_insn (gen_adddi3 (dest, src, GEN_INT (off)));
3601 else
3603 emit_move_insn (dest, GEN_INT (off));
3604 emit_insn (gen_adddi3 (dest, src, dest));
3607 src = gen_rtx_MEM (Pmode, dest);
3610 else
3611 src = gen_rtx_REG (DImode, BR_REG (0));
3613 emit_move_insn (dest, src);
3617 ia64_hard_regno_rename_ok (int from, int to)
3619 /* Don't clobber any of the registers we reserved for the prologue. */
3620 if (to == current_frame_info.reg_fp
3621 || to == current_frame_info.reg_save_b0
3622 || to == current_frame_info.reg_save_pr
3623 || to == current_frame_info.reg_save_ar_pfs
3624 || to == current_frame_info.reg_save_ar_unat
3625 || to == current_frame_info.reg_save_ar_lc)
3626 return 0;
3628 if (from == current_frame_info.reg_fp
3629 || from == current_frame_info.reg_save_b0
3630 || from == current_frame_info.reg_save_pr
3631 || from == current_frame_info.reg_save_ar_pfs
3632 || from == current_frame_info.reg_save_ar_unat
3633 || from == current_frame_info.reg_save_ar_lc)
3634 return 0;
3636 /* Don't use output registers outside the register frame. */
3637 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
3638 return 0;
3640 /* Retain even/oddness on predicate register pairs. */
3641 if (PR_REGNO_P (from) && PR_REGNO_P (to))
3642 return (from & 1) == (to & 1);
3644 return 1;
3647 /* Target hook for assembling integer objects. Handle word-sized
3648 aligned objects and detect the cases when @fptr is needed. */
3650 static bool
3651 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
3653 if (size == POINTER_SIZE / BITS_PER_UNIT
3654 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
3655 && GET_CODE (x) == SYMBOL_REF
3656 && SYMBOL_REF_FUNCTION_P (x))
3658 static const char * const directive[2][2] = {
3659 /* 64-bit pointer */ /* 32-bit pointer */
3660 { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
3661 { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
3663 fputs (directive[(aligned_p != 0)][POINTER_SIZE == 32], asm_out_file);
3664 output_addr_const (asm_out_file, x);
3665 fputs (")\n", asm_out_file);
3666 return true;
3668 return default_assemble_integer (x, size, aligned_p);
3671 /* Emit the function prologue. */
3673 static void
3674 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3676 int mask, grsave, grsave_prev;
3678 if (current_frame_info.need_regstk)
3679 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
3680 current_frame_info.n_input_regs,
3681 current_frame_info.n_local_regs,
3682 current_frame_info.n_output_regs,
3683 current_frame_info.n_rotate_regs);
3685 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3686 return;
3688 /* Emit the .prologue directive. */
3690 mask = 0;
3691 grsave = grsave_prev = 0;
3692 if (current_frame_info.reg_save_b0 != 0)
3694 mask |= 8;
3695 grsave = grsave_prev = current_frame_info.reg_save_b0;
3697 if (current_frame_info.reg_save_ar_pfs != 0
3698 && (grsave_prev == 0
3699 || current_frame_info.reg_save_ar_pfs == grsave_prev + 1))
3701 mask |= 4;
3702 if (grsave_prev == 0)
3703 grsave = current_frame_info.reg_save_ar_pfs;
3704 grsave_prev = current_frame_info.reg_save_ar_pfs;
3706 if (current_frame_info.reg_fp != 0
3707 && (grsave_prev == 0
3708 || current_frame_info.reg_fp == grsave_prev + 1))
3710 mask |= 2;
3711 if (grsave_prev == 0)
3712 grsave = HARD_FRAME_POINTER_REGNUM;
3713 grsave_prev = current_frame_info.reg_fp;
3715 if (current_frame_info.reg_save_pr != 0
3716 && (grsave_prev == 0
3717 || current_frame_info.reg_save_pr == grsave_prev + 1))
3719 mask |= 1;
3720 if (grsave_prev == 0)
3721 grsave = current_frame_info.reg_save_pr;
3724 if (mask && TARGET_GNU_AS)
3725 fprintf (file, "\t.prologue %d, %d\n", mask,
3726 ia64_dbx_register_number (grsave));
3727 else
3728 fputs ("\t.prologue\n", file);
3730 /* Emit a .spill directive, if necessary, to relocate the base of
3731 the register spill area. */
3732 if (current_frame_info.spill_cfa_off != -16)
3733 fprintf (file, "\t.spill %ld\n",
3734 (long) (current_frame_info.spill_cfa_off
3735 + current_frame_info.spill_size));
3738 /* Emit the .body directive at the scheduled end of the prologue. */
3740 static void
3741 ia64_output_function_end_prologue (FILE *file)
3743 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3744 return;
3746 fputs ("\t.body\n", file);
3749 /* Emit the function epilogue. */
3751 static void
3752 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
3753 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3755 int i;
3757 if (current_frame_info.reg_fp)
3759 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3760 reg_names[HARD_FRAME_POINTER_REGNUM]
3761 = reg_names[current_frame_info.reg_fp];
3762 reg_names[current_frame_info.reg_fp] = tmp;
3764 if (! TARGET_REG_NAMES)
3766 for (i = 0; i < current_frame_info.n_input_regs; i++)
3767 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
3768 for (i = 0; i < current_frame_info.n_local_regs; i++)
3769 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
3770 for (i = 0; i < current_frame_info.n_output_regs; i++)
3771 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
3774 current_frame_info.initialized = 0;
3778 ia64_dbx_register_number (int regno)
3780 /* In ia64_expand_prologue we quite literally renamed the frame pointer
3781 from its home at loc79 to something inside the register frame. We
3782 must perform the same renumbering here for the debug info. */
3783 if (current_frame_info.reg_fp)
3785 if (regno == HARD_FRAME_POINTER_REGNUM)
3786 regno = current_frame_info.reg_fp;
3787 else if (regno == current_frame_info.reg_fp)
3788 regno = HARD_FRAME_POINTER_REGNUM;
3791 if (IN_REGNO_P (regno))
3792 return 32 + regno - IN_REG (0);
3793 else if (LOC_REGNO_P (regno))
3794 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
3795 else if (OUT_REGNO_P (regno))
3796 return (32 + current_frame_info.n_input_regs
3797 + current_frame_info.n_local_regs + regno - OUT_REG (0));
3798 else
3799 return regno;
3802 void
3803 ia64_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
3805 rtx addr_reg, eight = GEN_INT (8);
3807 /* The Intel assembler requires that the global __ia64_trampoline symbol
3808 be declared explicitly */
3809 if (!TARGET_GNU_AS)
3811 static bool declared_ia64_trampoline = false;
3813 if (!declared_ia64_trampoline)
3815 declared_ia64_trampoline = true;
3816 (*targetm.asm_out.globalize_label) (asm_out_file,
3817 "__ia64_trampoline");
3821 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
3822 addr = convert_memory_address (Pmode, addr);
3823 fnaddr = convert_memory_address (Pmode, fnaddr);
3824 static_chain = convert_memory_address (Pmode, static_chain);
3826 /* Load up our iterator. */
3827 addr_reg = gen_reg_rtx (Pmode);
3828 emit_move_insn (addr_reg, addr);
3830 /* The first two words are the fake descriptor:
3831 __ia64_trampoline, ADDR+16. */
3832 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3833 gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline"));
3834 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3836 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3837 copy_to_reg (plus_constant (addr, 16)));
3838 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3840 /* The third word is the target descriptor. */
3841 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), fnaddr);
3842 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3844 /* The fourth word is the static chain. */
3845 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), static_chain);
3848 /* Do any needed setup for a variadic function. CUM has not been updated
3849 for the last named argument which has type TYPE and mode MODE.
3851 We generate the actual spill instructions during prologue generation. */
3853 static void
3854 ia64_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3855 tree type, int * pretend_size,
3856 int second_time ATTRIBUTE_UNUSED)
3858 CUMULATIVE_ARGS next_cum = *cum;
3860 /* Skip the current argument. */
3861 ia64_function_arg_advance (&next_cum, mode, type, 1);
3863 if (next_cum.words < MAX_ARGUMENT_SLOTS)
3865 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
3866 *pretend_size = n * UNITS_PER_WORD;
3867 cfun->machine->n_varargs = n;
3871 /* Check whether TYPE is a homogeneous floating point aggregate. If
3872 it is, return the mode of the floating point type that appears
3873 in all leafs. If it is not, return VOIDmode.
3875 An aggregate is a homogeneous floating point aggregate is if all
3876 fields/elements in it have the same floating point type (e.g,
3877 SFmode). 128-bit quad-precision floats are excluded.
3879 Variable sized aggregates should never arrive here, since we should
3880 have already decided to pass them by reference. Top-level zero-sized
3881 aggregates are excluded because our parallels crash the middle-end. */
3883 static enum machine_mode
3884 hfa_element_mode (tree type, bool nested)
3886 enum machine_mode element_mode = VOIDmode;
3887 enum machine_mode mode;
3888 enum tree_code code = TREE_CODE (type);
3889 int know_element_mode = 0;
3890 tree t;
3892 if (!nested && (!TYPE_SIZE (type) || integer_zerop (TYPE_SIZE (type))))
3893 return VOIDmode;
3895 switch (code)
3897 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
3898 case BOOLEAN_TYPE: case POINTER_TYPE:
3899 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
3900 case LANG_TYPE: case FUNCTION_TYPE:
3901 return VOIDmode;
3903 /* Fortran complex types are supposed to be HFAs, so we need to handle
3904 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
3905 types though. */
3906 case COMPLEX_TYPE:
3907 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
3908 && TYPE_MODE (type) != TCmode)
3909 return GET_MODE_INNER (TYPE_MODE (type));
3910 else
3911 return VOIDmode;
3913 case REAL_TYPE:
3914 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
3915 mode if this is contained within an aggregate. */
3916 if (nested && TYPE_MODE (type) != TFmode)
3917 return TYPE_MODE (type);
3918 else
3919 return VOIDmode;
3921 case ARRAY_TYPE:
3922 return hfa_element_mode (TREE_TYPE (type), 1);
3924 case RECORD_TYPE:
3925 case UNION_TYPE:
3926 case QUAL_UNION_TYPE:
3927 for (t = TYPE_FIELDS (type); t; t = TREE_CHAIN (t))
3929 if (TREE_CODE (t) != FIELD_DECL)
3930 continue;
3932 mode = hfa_element_mode (TREE_TYPE (t), 1);
3933 if (know_element_mode)
3935 if (mode != element_mode)
3936 return VOIDmode;
3938 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
3939 return VOIDmode;
3940 else
3942 know_element_mode = 1;
3943 element_mode = mode;
3946 return element_mode;
3948 default:
3949 /* If we reach here, we probably have some front-end specific type
3950 that the backend doesn't know about. This can happen via the
3951 aggregate_value_p call in init_function_start. All we can do is
3952 ignore unknown tree types. */
3953 return VOIDmode;
3956 return VOIDmode;
3959 /* Return the number of words required to hold a quantity of TYPE and MODE
3960 when passed as an argument. */
3961 static int
3962 ia64_function_arg_words (tree type, enum machine_mode mode)
3964 int words;
3966 if (mode == BLKmode)
3967 words = int_size_in_bytes (type);
3968 else
3969 words = GET_MODE_SIZE (mode);
3971 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
3974 /* Return the number of registers that should be skipped so the current
3975 argument (described by TYPE and WORDS) will be properly aligned.
3977 Integer and float arguments larger than 8 bytes start at the next
3978 even boundary. Aggregates larger than 8 bytes start at the next
3979 even boundary if the aggregate has 16 byte alignment. Note that
3980 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
3981 but are still to be aligned in registers.
3983 ??? The ABI does not specify how to handle aggregates with
3984 alignment from 9 to 15 bytes, or greater than 16. We handle them
3985 all as if they had 16 byte alignment. Such aggregates can occur
3986 only if gcc extensions are used. */
3987 static int
3988 ia64_function_arg_offset (CUMULATIVE_ARGS *cum, tree type, int words)
3990 if ((cum->words & 1) == 0)
3991 return 0;
3993 if (type
3994 && TREE_CODE (type) != INTEGER_TYPE
3995 && TREE_CODE (type) != REAL_TYPE)
3996 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
3997 else
3998 return words > 1;
4001 /* Return rtx for register where argument is passed, or zero if it is passed
4002 on the stack. */
4003 /* ??? 128-bit quad-precision floats are always passed in general
4004 registers. */
4007 ia64_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
4008 int named, int incoming)
4010 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
4011 int words = ia64_function_arg_words (type, mode);
4012 int offset = ia64_function_arg_offset (cum, type, words);
4013 enum machine_mode hfa_mode = VOIDmode;
4015 /* If all argument slots are used, then it must go on the stack. */
4016 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4017 return 0;
4019 /* Check for and handle homogeneous FP aggregates. */
4020 if (type)
4021 hfa_mode = hfa_element_mode (type, 0);
4023 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4024 and unprototyped hfas are passed specially. */
4025 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4027 rtx loc[16];
4028 int i = 0;
4029 int fp_regs = cum->fp_regs;
4030 int int_regs = cum->words + offset;
4031 int hfa_size = GET_MODE_SIZE (hfa_mode);
4032 int byte_size;
4033 int args_byte_size;
4035 /* If prototyped, pass it in FR regs then GR regs.
4036 If not prototyped, pass it in both FR and GR regs.
4038 If this is an SFmode aggregate, then it is possible to run out of
4039 FR regs while GR regs are still left. In that case, we pass the
4040 remaining part in the GR regs. */
4042 /* Fill the FP regs. We do this always. We stop if we reach the end
4043 of the argument, the last FP register, or the last argument slot. */
4045 byte_size = ((mode == BLKmode)
4046 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4047 args_byte_size = int_regs * UNITS_PER_WORD;
4048 offset = 0;
4049 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4050 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
4052 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4053 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
4054 + fp_regs)),
4055 GEN_INT (offset));
4056 offset += hfa_size;
4057 args_byte_size += hfa_size;
4058 fp_regs++;
4061 /* If no prototype, then the whole thing must go in GR regs. */
4062 if (! cum->prototype)
4063 offset = 0;
4064 /* If this is an SFmode aggregate, then we might have some left over
4065 that needs to go in GR regs. */
4066 else if (byte_size != offset)
4067 int_regs += offset / UNITS_PER_WORD;
4069 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
4071 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
4073 enum machine_mode gr_mode = DImode;
4074 unsigned int gr_size;
4076 /* If we have an odd 4 byte hunk because we ran out of FR regs,
4077 then this goes in a GR reg left adjusted/little endian, right
4078 adjusted/big endian. */
4079 /* ??? Currently this is handled wrong, because 4-byte hunks are
4080 always right adjusted/little endian. */
4081 if (offset & 0x4)
4082 gr_mode = SImode;
4083 /* If we have an even 4 byte hunk because the aggregate is a
4084 multiple of 4 bytes in size, then this goes in a GR reg right
4085 adjusted/little endian. */
4086 else if (byte_size - offset == 4)
4087 gr_mode = SImode;
4089 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4090 gen_rtx_REG (gr_mode, (basereg
4091 + int_regs)),
4092 GEN_INT (offset));
4094 gr_size = GET_MODE_SIZE (gr_mode);
4095 offset += gr_size;
4096 if (gr_size == UNITS_PER_WORD
4097 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
4098 int_regs++;
4099 else if (gr_size > UNITS_PER_WORD)
4100 int_regs += gr_size / UNITS_PER_WORD;
4102 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4105 /* Integral and aggregates go in general registers. If we have run out of
4106 FR registers, then FP values must also go in general registers. This can
4107 happen when we have a SFmode HFA. */
4108 else if (mode == TFmode || mode == TCmode
4109 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4111 int byte_size = ((mode == BLKmode)
4112 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4113 if (BYTES_BIG_ENDIAN
4114 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
4115 && byte_size < UNITS_PER_WORD
4116 && byte_size > 0)
4118 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4119 gen_rtx_REG (DImode,
4120 (basereg + cum->words
4121 + offset)),
4122 const0_rtx);
4123 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
4125 else
4126 return gen_rtx_REG (mode, basereg + cum->words + offset);
4130 /* If there is a prototype, then FP values go in a FR register when
4131 named, and in a GR register when unnamed. */
4132 else if (cum->prototype)
4134 if (named)
4135 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
4136 /* In big-endian mode, an anonymous SFmode value must be represented
4137 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
4138 the value into the high half of the general register. */
4139 else if (BYTES_BIG_ENDIAN && mode == SFmode)
4140 return gen_rtx_PARALLEL (mode,
4141 gen_rtvec (1,
4142 gen_rtx_EXPR_LIST (VOIDmode,
4143 gen_rtx_REG (DImode, basereg + cum->words + offset),
4144 const0_rtx)));
4145 else
4146 return gen_rtx_REG (mode, basereg + cum->words + offset);
4148 /* If there is no prototype, then FP values go in both FR and GR
4149 registers. */
4150 else
4152 /* See comment above. */
4153 enum machine_mode inner_mode =
4154 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
4156 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
4157 gen_rtx_REG (mode, (FR_ARG_FIRST
4158 + cum->fp_regs)),
4159 const0_rtx);
4160 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4161 gen_rtx_REG (inner_mode,
4162 (basereg + cum->words
4163 + offset)),
4164 const0_rtx);
4166 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
4170 /* Return number of bytes, at the beginning of the argument, that must be
4171 put in registers. 0 is the argument is entirely in registers or entirely
4172 in memory. */
4174 static int
4175 ia64_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4176 tree type, bool named ATTRIBUTE_UNUSED)
4178 int words = ia64_function_arg_words (type, mode);
4179 int offset = ia64_function_arg_offset (cum, type, words);
4181 /* If all argument slots are used, then it must go on the stack. */
4182 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4183 return 0;
4185 /* It doesn't matter whether the argument goes in FR or GR regs. If
4186 it fits within the 8 argument slots, then it goes entirely in
4187 registers. If it extends past the last argument slot, then the rest
4188 goes on the stack. */
4190 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
4191 return 0;
4193 return (MAX_ARGUMENT_SLOTS - cum->words - offset) * UNITS_PER_WORD;
4196 /* Update CUM to point after this argument. This is patterned after
4197 ia64_function_arg. */
4199 void
4200 ia64_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4201 tree type, int named)
4203 int words = ia64_function_arg_words (type, mode);
4204 int offset = ia64_function_arg_offset (cum, type, words);
4205 enum machine_mode hfa_mode = VOIDmode;
4207 /* If all arg slots are already full, then there is nothing to do. */
4208 if (cum->words >= MAX_ARGUMENT_SLOTS)
4209 return;
4211 cum->words += words + offset;
4213 /* Check for and handle homogeneous FP aggregates. */
4214 if (type)
4215 hfa_mode = hfa_element_mode (type, 0);
4217 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4218 and unprototyped hfas are passed specially. */
4219 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4221 int fp_regs = cum->fp_regs;
4222 /* This is the original value of cum->words + offset. */
4223 int int_regs = cum->words - words;
4224 int hfa_size = GET_MODE_SIZE (hfa_mode);
4225 int byte_size;
4226 int args_byte_size;
4228 /* If prototyped, pass it in FR regs then GR regs.
4229 If not prototyped, pass it in both FR and GR regs.
4231 If this is an SFmode aggregate, then it is possible to run out of
4232 FR regs while GR regs are still left. In that case, we pass the
4233 remaining part in the GR regs. */
4235 /* Fill the FP regs. We do this always. We stop if we reach the end
4236 of the argument, the last FP register, or the last argument slot. */
4238 byte_size = ((mode == BLKmode)
4239 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4240 args_byte_size = int_regs * UNITS_PER_WORD;
4241 offset = 0;
4242 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4243 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
4245 offset += hfa_size;
4246 args_byte_size += hfa_size;
4247 fp_regs++;
4250 cum->fp_regs = fp_regs;
4253 /* Integral and aggregates go in general registers. So do TFmode FP values.
4254 If we have run out of FR registers, then other FP values must also go in
4255 general registers. This can happen when we have a SFmode HFA. */
4256 else if (mode == TFmode || mode == TCmode
4257 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4258 cum->int_regs = cum->words;
4260 /* If there is a prototype, then FP values go in a FR register when
4261 named, and in a GR register when unnamed. */
4262 else if (cum->prototype)
4264 if (! named)
4265 cum->int_regs = cum->words;
4266 else
4267 /* ??? Complex types should not reach here. */
4268 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4270 /* If there is no prototype, then FP values go in both FR and GR
4271 registers. */
4272 else
4274 /* ??? Complex types should not reach here. */
4275 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4276 cum->int_regs = cum->words;
4280 /* Arguments with alignment larger than 8 bytes start at the next even
4281 boundary. On ILP32 HPUX, TFmode arguments start on next even boundary
4282 even though their normal alignment is 8 bytes. See ia64_function_arg. */
4285 ia64_function_arg_boundary (enum machine_mode mode, tree type)
4288 if (mode == TFmode && TARGET_HPUX && TARGET_ILP32)
4289 return PARM_BOUNDARY * 2;
4291 if (type)
4293 if (TYPE_ALIGN (type) > PARM_BOUNDARY)
4294 return PARM_BOUNDARY * 2;
4295 else
4296 return PARM_BOUNDARY;
4299 if (GET_MODE_BITSIZE (mode) > PARM_BOUNDARY)
4300 return PARM_BOUNDARY * 2;
4301 else
4302 return PARM_BOUNDARY;
4305 /* True if it is OK to do sibling call optimization for the specified
4306 call expression EXP. DECL will be the called function, or NULL if
4307 this is an indirect call. */
4308 static bool
4309 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
4311 /* We can't perform a sibcall if the current function has the syscall_linkage
4312 attribute. */
4313 if (lookup_attribute ("syscall_linkage",
4314 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
4315 return false;
4317 /* We must always return with our current GP. This means we can
4318 only sibcall to functions defined in the current module. */
4319 return decl && (*targetm.binds_local_p) (decl);
4323 /* Implement va_arg. */
4325 static tree
4326 ia64_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
4328 /* Variable sized types are passed by reference. */
4329 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
4331 tree ptrtype = build_pointer_type (type);
4332 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
4333 return build_va_arg_indirect_ref (addr);
4336 /* Aggregate arguments with alignment larger than 8 bytes start at
4337 the next even boundary. Integer and floating point arguments
4338 do so if they are larger than 8 bytes, whether or not they are
4339 also aligned larger than 8 bytes. */
4340 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
4341 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
4343 tree t = build2 (PLUS_EXPR, TREE_TYPE (valist), valist,
4344 build_int_cst (NULL_TREE, 2 * UNITS_PER_WORD - 1));
4345 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
4346 build_int_cst (NULL_TREE, -2 * UNITS_PER_WORD));
4347 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (valist), valist, t);
4348 gimplify_and_add (t, pre_p);
4351 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4354 /* Return 1 if function return value returned in memory. Return 0 if it is
4355 in a register. */
4357 static bool
4358 ia64_return_in_memory (tree valtype, tree fntype ATTRIBUTE_UNUSED)
4360 enum machine_mode mode;
4361 enum machine_mode hfa_mode;
4362 HOST_WIDE_INT byte_size;
4364 mode = TYPE_MODE (valtype);
4365 byte_size = GET_MODE_SIZE (mode);
4366 if (mode == BLKmode)
4368 byte_size = int_size_in_bytes (valtype);
4369 if (byte_size < 0)
4370 return true;
4373 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
4375 hfa_mode = hfa_element_mode (valtype, 0);
4376 if (hfa_mode != VOIDmode)
4378 int hfa_size = GET_MODE_SIZE (hfa_mode);
4380 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
4381 return true;
4382 else
4383 return false;
4385 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
4386 return true;
4387 else
4388 return false;
4391 /* Return rtx for register that holds the function return value. */
4394 ia64_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
4396 enum machine_mode mode;
4397 enum machine_mode hfa_mode;
4399 mode = TYPE_MODE (valtype);
4400 hfa_mode = hfa_element_mode (valtype, 0);
4402 if (hfa_mode != VOIDmode)
4404 rtx loc[8];
4405 int i;
4406 int hfa_size;
4407 int byte_size;
4408 int offset;
4410 hfa_size = GET_MODE_SIZE (hfa_mode);
4411 byte_size = ((mode == BLKmode)
4412 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
4413 offset = 0;
4414 for (i = 0; offset < byte_size; i++)
4416 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4417 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
4418 GEN_INT (offset));
4419 offset += hfa_size;
4421 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4423 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
4424 return gen_rtx_REG (mode, FR_ARG_FIRST);
4425 else
4427 bool need_parallel = false;
4429 /* In big-endian mode, we need to manage the layout of aggregates
4430 in the registers so that we get the bits properly aligned in
4431 the highpart of the registers. */
4432 if (BYTES_BIG_ENDIAN
4433 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
4434 need_parallel = true;
4436 /* Something like struct S { long double x; char a[0] } is not an
4437 HFA structure, and therefore doesn't go in fp registers. But
4438 the middle-end will give it XFmode anyway, and XFmode values
4439 don't normally fit in integer registers. So we need to smuggle
4440 the value inside a parallel. */
4441 else if (mode == XFmode || mode == XCmode || mode == RFmode)
4442 need_parallel = true;
4444 if (need_parallel)
4446 rtx loc[8];
4447 int offset;
4448 int bytesize;
4449 int i;
4451 offset = 0;
4452 bytesize = int_size_in_bytes (valtype);
4453 /* An empty PARALLEL is invalid here, but the return value
4454 doesn't matter for empty structs. */
4455 if (bytesize == 0)
4456 return gen_rtx_REG (mode, GR_RET_FIRST);
4457 for (i = 0; offset < bytesize; i++)
4459 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4460 gen_rtx_REG (DImode,
4461 GR_RET_FIRST + i),
4462 GEN_INT (offset));
4463 offset += UNITS_PER_WORD;
4465 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4468 return gen_rtx_REG (mode, GR_RET_FIRST);
4472 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4473 We need to emit DTP-relative relocations. */
4475 static void
4476 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
4478 gcc_assert (size == 4 || size == 8);
4479 if (size == 4)
4480 fputs ("\tdata4.ua\t@dtprel(", file);
4481 else
4482 fputs ("\tdata8.ua\t@dtprel(", file);
4483 output_addr_const (file, x);
4484 fputs (")", file);
4487 /* Print a memory address as an operand to reference that memory location. */
4489 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
4490 also call this from ia64_print_operand for memory addresses. */
4492 void
4493 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
4494 rtx address ATTRIBUTE_UNUSED)
4498 /* Print an operand to an assembler instruction.
4499 C Swap and print a comparison operator.
4500 D Print an FP comparison operator.
4501 E Print 32 - constant, for SImode shifts as extract.
4502 e Print 64 - constant, for DImode rotates.
4503 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
4504 a floating point register emitted normally.
4505 I Invert a predicate register by adding 1.
4506 J Select the proper predicate register for a condition.
4507 j Select the inverse predicate register for a condition.
4508 O Append .acq for volatile load.
4509 P Postincrement of a MEM.
4510 Q Append .rel for volatile store.
4511 S Shift amount for shladd instruction.
4512 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
4513 for Intel assembler.
4514 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
4515 for Intel assembler.
4516 X A pair of floating point registers.
4517 r Print register name, or constant 0 as r0. HP compatibility for
4518 Linux kernel.
4519 v Print vector constant value as an 8-byte integer value. */
4521 void
4522 ia64_print_operand (FILE * file, rtx x, int code)
4524 const char *str;
4526 switch (code)
4528 case 0:
4529 /* Handled below. */
4530 break;
4532 case 'C':
4534 enum rtx_code c = swap_condition (GET_CODE (x));
4535 fputs (GET_RTX_NAME (c), file);
4536 return;
4539 case 'D':
4540 switch (GET_CODE (x))
4542 case NE:
4543 str = "neq";
4544 break;
4545 case UNORDERED:
4546 str = "unord";
4547 break;
4548 case ORDERED:
4549 str = "ord";
4550 break;
4551 case UNLT:
4552 str = "nge";
4553 break;
4554 case UNLE:
4555 str = "ngt";
4556 break;
4557 case UNGT:
4558 str = "nle";
4559 break;
4560 case UNGE:
4561 str = "nlt";
4562 break;
4563 default:
4564 str = GET_RTX_NAME (GET_CODE (x));
4565 break;
4567 fputs (str, file);
4568 return;
4570 case 'E':
4571 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
4572 return;
4574 case 'e':
4575 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
4576 return;
4578 case 'F':
4579 if (x == CONST0_RTX (GET_MODE (x)))
4580 str = reg_names [FR_REG (0)];
4581 else if (x == CONST1_RTX (GET_MODE (x)))
4582 str = reg_names [FR_REG (1)];
4583 else
4585 gcc_assert (GET_CODE (x) == REG);
4586 str = reg_names [REGNO (x)];
4588 fputs (str, file);
4589 return;
4591 case 'I':
4592 fputs (reg_names [REGNO (x) + 1], file);
4593 return;
4595 case 'J':
4596 case 'j':
4598 unsigned int regno = REGNO (XEXP (x, 0));
4599 if (GET_CODE (x) == EQ)
4600 regno += 1;
4601 if (code == 'j')
4602 regno ^= 1;
4603 fputs (reg_names [regno], file);
4605 return;
4607 case 'O':
4608 if (MEM_VOLATILE_P (x))
4609 fputs(".acq", file);
4610 return;
4612 case 'P':
4614 HOST_WIDE_INT value;
4616 switch (GET_CODE (XEXP (x, 0)))
4618 default:
4619 return;
4621 case POST_MODIFY:
4622 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
4623 if (GET_CODE (x) == CONST_INT)
4624 value = INTVAL (x);
4625 else
4627 gcc_assert (GET_CODE (x) == REG);
4628 fprintf (file, ", %s", reg_names[REGNO (x)]);
4629 return;
4631 break;
4633 case POST_INC:
4634 value = GET_MODE_SIZE (GET_MODE (x));
4635 break;
4637 case POST_DEC:
4638 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
4639 break;
4642 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
4643 return;
4646 case 'Q':
4647 if (MEM_VOLATILE_P (x))
4648 fputs(".rel", file);
4649 return;
4651 case 'S':
4652 fprintf (file, "%d", exact_log2 (INTVAL (x)));
4653 return;
4655 case 'T':
4656 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4658 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
4659 return;
4661 break;
4663 case 'U':
4664 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4666 const char *prefix = "0x";
4667 if (INTVAL (x) & 0x80000000)
4669 fprintf (file, "0xffffffff");
4670 prefix = "";
4672 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
4673 return;
4675 break;
4677 case 'X':
4679 unsigned int regno = REGNO (x);
4680 fprintf (file, "%s, %s", reg_names [regno], reg_names [regno + 1]);
4682 return;
4684 case 'r':
4685 /* If this operand is the constant zero, write it as register zero.
4686 Any register, zero, or CONST_INT value is OK here. */
4687 if (GET_CODE (x) == REG)
4688 fputs (reg_names[REGNO (x)], file);
4689 else if (x == CONST0_RTX (GET_MODE (x)))
4690 fputs ("r0", file);
4691 else if (GET_CODE (x) == CONST_INT)
4692 output_addr_const (file, x);
4693 else
4694 output_operand_lossage ("invalid %%r value");
4695 return;
4697 case 'v':
4698 gcc_assert (GET_CODE (x) == CONST_VECTOR);
4699 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
4700 break;
4702 case '+':
4704 const char *which;
4706 /* For conditional branches, returns or calls, substitute
4707 sptk, dptk, dpnt, or spnt for %s. */
4708 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
4709 if (x)
4711 int pred_val = INTVAL (XEXP (x, 0));
4713 /* Guess top and bottom 10% statically predicted. */
4714 if (pred_val < REG_BR_PROB_BASE / 50
4715 && br_prob_note_reliable_p (x))
4716 which = ".spnt";
4717 else if (pred_val < REG_BR_PROB_BASE / 2)
4718 which = ".dpnt";
4719 else if (pred_val < REG_BR_PROB_BASE / 100 * 98
4720 || !br_prob_note_reliable_p (x))
4721 which = ".dptk";
4722 else
4723 which = ".sptk";
4725 else if (GET_CODE (current_output_insn) == CALL_INSN)
4726 which = ".sptk";
4727 else
4728 which = ".dptk";
4730 fputs (which, file);
4731 return;
4734 case ',':
4735 x = current_insn_predicate;
4736 if (x)
4738 unsigned int regno = REGNO (XEXP (x, 0));
4739 if (GET_CODE (x) == EQ)
4740 regno += 1;
4741 fprintf (file, "(%s) ", reg_names [regno]);
4743 return;
4745 default:
4746 output_operand_lossage ("ia64_print_operand: unknown code");
4747 return;
4750 switch (GET_CODE (x))
4752 /* This happens for the spill/restore instructions. */
4753 case POST_INC:
4754 case POST_DEC:
4755 case POST_MODIFY:
4756 x = XEXP (x, 0);
4757 /* ... fall through ... */
4759 case REG:
4760 fputs (reg_names [REGNO (x)], file);
4761 break;
4763 case MEM:
4765 rtx addr = XEXP (x, 0);
4766 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
4767 addr = XEXP (addr, 0);
4768 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
4769 break;
4772 default:
4773 output_addr_const (file, x);
4774 break;
4777 return;
4780 /* Compute a (partial) cost for rtx X. Return true if the complete
4781 cost has been computed, and false if subexpressions should be
4782 scanned. In either case, *TOTAL contains the cost result. */
4783 /* ??? This is incomplete. */
4785 static bool
4786 ia64_rtx_costs (rtx x, int code, int outer_code, int *total)
4788 switch (code)
4790 case CONST_INT:
4791 switch (outer_code)
4793 case SET:
4794 *total = CONST_OK_FOR_J (INTVAL (x)) ? 0 : COSTS_N_INSNS (1);
4795 return true;
4796 case PLUS:
4797 if (CONST_OK_FOR_I (INTVAL (x)))
4798 *total = 0;
4799 else if (CONST_OK_FOR_J (INTVAL (x)))
4800 *total = 1;
4801 else
4802 *total = COSTS_N_INSNS (1);
4803 return true;
4804 default:
4805 if (CONST_OK_FOR_K (INTVAL (x)) || CONST_OK_FOR_L (INTVAL (x)))
4806 *total = 0;
4807 else
4808 *total = COSTS_N_INSNS (1);
4809 return true;
4812 case CONST_DOUBLE:
4813 *total = COSTS_N_INSNS (1);
4814 return true;
4816 case CONST:
4817 case SYMBOL_REF:
4818 case LABEL_REF:
4819 *total = COSTS_N_INSNS (3);
4820 return true;
4822 case MULT:
4823 /* For multiplies wider than HImode, we have to go to the FPU,
4824 which normally involves copies. Plus there's the latency
4825 of the multiply itself, and the latency of the instructions to
4826 transfer integer regs to FP regs. */
4827 /* ??? Check for FP mode. */
4828 if (GET_MODE_SIZE (GET_MODE (x)) > 2)
4829 *total = COSTS_N_INSNS (10);
4830 else
4831 *total = COSTS_N_INSNS (2);
4832 return true;
4834 case PLUS:
4835 case MINUS:
4836 case ASHIFT:
4837 case ASHIFTRT:
4838 case LSHIFTRT:
4839 *total = COSTS_N_INSNS (1);
4840 return true;
4842 case DIV:
4843 case UDIV:
4844 case MOD:
4845 case UMOD:
4846 /* We make divide expensive, so that divide-by-constant will be
4847 optimized to a multiply. */
4848 *total = COSTS_N_INSNS (60);
4849 return true;
4851 default:
4852 return false;
4856 /* Calculate the cost of moving data from a register in class FROM to
4857 one in class TO, using MODE. */
4860 ia64_register_move_cost (enum machine_mode mode, enum reg_class from,
4861 enum reg_class to)
4863 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
4864 if (to == ADDL_REGS)
4865 to = GR_REGS;
4866 if (from == ADDL_REGS)
4867 from = GR_REGS;
4869 /* All costs are symmetric, so reduce cases by putting the
4870 lower number class as the destination. */
4871 if (from < to)
4873 enum reg_class tmp = to;
4874 to = from, from = tmp;
4877 /* Moving from FR<->GR in XFmode must be more expensive than 2,
4878 so that we get secondary memory reloads. Between FR_REGS,
4879 we have to make this at least as expensive as MEMORY_MOVE_COST
4880 to avoid spectacularly poor register class preferencing. */
4881 if (mode == XFmode || mode == RFmode)
4883 if (to != GR_REGS || from != GR_REGS)
4884 return MEMORY_MOVE_COST (mode, to, 0);
4885 else
4886 return 3;
4889 switch (to)
4891 case PR_REGS:
4892 /* Moving between PR registers takes two insns. */
4893 if (from == PR_REGS)
4894 return 3;
4895 /* Moving between PR and anything but GR is impossible. */
4896 if (from != GR_REGS)
4897 return MEMORY_MOVE_COST (mode, to, 0);
4898 break;
4900 case BR_REGS:
4901 /* Moving between BR and anything but GR is impossible. */
4902 if (from != GR_REGS && from != GR_AND_BR_REGS)
4903 return MEMORY_MOVE_COST (mode, to, 0);
4904 break;
4906 case AR_I_REGS:
4907 case AR_M_REGS:
4908 /* Moving between AR and anything but GR is impossible. */
4909 if (from != GR_REGS)
4910 return MEMORY_MOVE_COST (mode, to, 0);
4911 break;
4913 case GR_REGS:
4914 case FR_REGS:
4915 case FP_REGS:
4916 case GR_AND_FR_REGS:
4917 case GR_AND_BR_REGS:
4918 case ALL_REGS:
4919 break;
4921 default:
4922 gcc_unreachable ();
4925 return 2;
4928 /* Implement PREFERRED_RELOAD_CLASS. Place additional restrictions on CLASS
4929 to use when copying X into that class. */
4931 enum reg_class
4932 ia64_preferred_reload_class (rtx x, enum reg_class class)
4934 switch (class)
4936 case FR_REGS:
4937 case FP_REGS:
4938 /* Don't allow volatile mem reloads into floating point registers.
4939 This is defined to force reload to choose the r/m case instead
4940 of the f/f case when reloading (set (reg fX) (mem/v)). */
4941 if (MEM_P (x) && MEM_VOLATILE_P (x))
4942 return NO_REGS;
4944 /* Force all unrecognized constants into the constant pool. */
4945 if (CONSTANT_P (x))
4946 return NO_REGS;
4947 break;
4949 case AR_M_REGS:
4950 case AR_I_REGS:
4951 if (!OBJECT_P (x))
4952 return NO_REGS;
4953 break;
4955 default:
4956 break;
4959 return class;
4962 /* This function returns the register class required for a secondary
4963 register when copying between one of the registers in CLASS, and X,
4964 using MODE. A return value of NO_REGS means that no secondary register
4965 is required. */
4967 enum reg_class
4968 ia64_secondary_reload_class (enum reg_class class,
4969 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
4971 int regno = -1;
4973 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
4974 regno = true_regnum (x);
4976 switch (class)
4978 case BR_REGS:
4979 case AR_M_REGS:
4980 case AR_I_REGS:
4981 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
4982 interaction. We end up with two pseudos with overlapping lifetimes
4983 both of which are equiv to the same constant, and both which need
4984 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
4985 changes depending on the path length, which means the qty_first_reg
4986 check in make_regs_eqv can give different answers at different times.
4987 At some point I'll probably need a reload_indi pattern to handle
4988 this.
4990 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
4991 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
4992 non-general registers for good measure. */
4993 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
4994 return GR_REGS;
4996 /* This is needed if a pseudo used as a call_operand gets spilled to a
4997 stack slot. */
4998 if (GET_CODE (x) == MEM)
4999 return GR_REGS;
5000 break;
5002 case FR_REGS:
5003 case FP_REGS:
5004 /* Need to go through general registers to get to other class regs. */
5005 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
5006 return GR_REGS;
5008 /* This can happen when a paradoxical subreg is an operand to the
5009 muldi3 pattern. */
5010 /* ??? This shouldn't be necessary after instruction scheduling is
5011 enabled, because paradoxical subregs are not accepted by
5012 register_operand when INSN_SCHEDULING is defined. Or alternatively,
5013 stop the paradoxical subreg stupidity in the *_operand functions
5014 in recog.c. */
5015 if (GET_CODE (x) == MEM
5016 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
5017 || GET_MODE (x) == QImode))
5018 return GR_REGS;
5020 /* This can happen because of the ior/and/etc patterns that accept FP
5021 registers as operands. If the third operand is a constant, then it
5022 needs to be reloaded into a FP register. */
5023 if (GET_CODE (x) == CONST_INT)
5024 return GR_REGS;
5026 /* This can happen because of register elimination in a muldi3 insn.
5027 E.g. `26107 * (unsigned long)&u'. */
5028 if (GET_CODE (x) == PLUS)
5029 return GR_REGS;
5030 break;
5032 case PR_REGS:
5033 /* ??? This happens if we cse/gcse a BImode value across a call,
5034 and the function has a nonlocal goto. This is because global
5035 does not allocate call crossing pseudos to hard registers when
5036 current_function_has_nonlocal_goto is true. This is relatively
5037 common for C++ programs that use exceptions. To reproduce,
5038 return NO_REGS and compile libstdc++. */
5039 if (GET_CODE (x) == MEM)
5040 return GR_REGS;
5042 /* This can happen when we take a BImode subreg of a DImode value,
5043 and that DImode value winds up in some non-GR register. */
5044 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
5045 return GR_REGS;
5046 break;
5048 default:
5049 break;
5052 return NO_REGS;
5056 /* Parse the -mfixed-range= option string. */
5058 static void
5059 fix_range (const char *const_str)
5061 int i, first, last;
5062 char *str, *dash, *comma;
5064 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
5065 REG2 are either register names or register numbers. The effect
5066 of this option is to mark the registers in the range from REG1 to
5067 REG2 as ``fixed'' so they won't be used by the compiler. This is
5068 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
5070 i = strlen (const_str);
5071 str = (char *) alloca (i + 1);
5072 memcpy (str, const_str, i + 1);
5074 while (1)
5076 dash = strchr (str, '-');
5077 if (!dash)
5079 warning (0, "value of -mfixed-range must have form REG1-REG2");
5080 return;
5082 *dash = '\0';
5084 comma = strchr (dash + 1, ',');
5085 if (comma)
5086 *comma = '\0';
5088 first = decode_reg_name (str);
5089 if (first < 0)
5091 warning (0, "unknown register name: %s", str);
5092 return;
5095 last = decode_reg_name (dash + 1);
5096 if (last < 0)
5098 warning (0, "unknown register name: %s", dash + 1);
5099 return;
5102 *dash = '-';
5104 if (first > last)
5106 warning (0, "%s-%s is an empty range", str, dash + 1);
5107 return;
5110 for (i = first; i <= last; ++i)
5111 fixed_regs[i] = call_used_regs[i] = 1;
5113 if (!comma)
5114 break;
5116 *comma = ',';
5117 str = comma + 1;
5121 /* Implement TARGET_HANDLE_OPTION. */
5123 static bool
5124 ia64_handle_option (size_t code, const char *arg, int value)
5126 switch (code)
5128 case OPT_mfixed_range_:
5129 fix_range (arg);
5130 return true;
5132 case OPT_mtls_size_:
5133 if (value != 14 && value != 22 && value != 64)
5134 error ("bad value %<%s%> for -mtls-size= switch", arg);
5135 return true;
5137 case OPT_mtune_:
5139 static struct pta
5141 const char *name; /* processor name or nickname. */
5142 enum processor_type processor;
5144 const processor_alias_table[] =
5146 {"itanium", PROCESSOR_ITANIUM},
5147 {"itanium1", PROCESSOR_ITANIUM},
5148 {"merced", PROCESSOR_ITANIUM},
5149 {"itanium2", PROCESSOR_ITANIUM2},
5150 {"mckinley", PROCESSOR_ITANIUM2},
5152 int const pta_size = ARRAY_SIZE (processor_alias_table);
5153 int i;
5155 for (i = 0; i < pta_size; i++)
5156 if (!strcmp (arg, processor_alias_table[i].name))
5158 ia64_tune = processor_alias_table[i].processor;
5159 break;
5161 if (i == pta_size)
5162 error ("bad value %<%s%> for -mtune= switch", arg);
5163 return true;
5166 default:
5167 return true;
5171 /* Implement OVERRIDE_OPTIONS. */
5173 void
5174 ia64_override_options (void)
5176 if (TARGET_AUTO_PIC)
5177 target_flags |= MASK_CONST_GP;
5179 if (TARGET_INLINE_SQRT == INL_MIN_LAT)
5181 warning (0, "not yet implemented: latency-optimized inline square root");
5182 TARGET_INLINE_SQRT = INL_MAX_THR;
5185 ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
5186 flag_schedule_insns_after_reload = 0;
5188 ia64_section_threshold = g_switch_set ? g_switch_value : IA64_DEFAULT_GVALUE;
5190 init_machine_status = ia64_init_machine_status;
5193 static struct machine_function *
5194 ia64_init_machine_status (void)
5196 return ggc_alloc_cleared (sizeof (struct machine_function));
5199 static enum attr_itanium_class ia64_safe_itanium_class (rtx);
5200 static enum attr_type ia64_safe_type (rtx);
5202 static enum attr_itanium_class
5203 ia64_safe_itanium_class (rtx insn)
5205 if (recog_memoized (insn) >= 0)
5206 return get_attr_itanium_class (insn);
5207 else
5208 return ITANIUM_CLASS_UNKNOWN;
5211 static enum attr_type
5212 ia64_safe_type (rtx insn)
5214 if (recog_memoized (insn) >= 0)
5215 return get_attr_type (insn);
5216 else
5217 return TYPE_UNKNOWN;
5220 /* The following collection of routines emit instruction group stop bits as
5221 necessary to avoid dependencies. */
5223 /* Need to track some additional registers as far as serialization is
5224 concerned so we can properly handle br.call and br.ret. We could
5225 make these registers visible to gcc, but since these registers are
5226 never explicitly used in gcc generated code, it seems wasteful to
5227 do so (plus it would make the call and return patterns needlessly
5228 complex). */
5229 #define REG_RP (BR_REG (0))
5230 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
5231 /* This is used for volatile asms which may require a stop bit immediately
5232 before and after them. */
5233 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
5234 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
5235 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
5237 /* For each register, we keep track of how it has been written in the
5238 current instruction group.
5240 If a register is written unconditionally (no qualifying predicate),
5241 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
5243 If a register is written if its qualifying predicate P is true, we
5244 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
5245 may be written again by the complement of P (P^1) and when this happens,
5246 WRITE_COUNT gets set to 2.
5248 The result of this is that whenever an insn attempts to write a register
5249 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
5251 If a predicate register is written by a floating-point insn, we set
5252 WRITTEN_BY_FP to true.
5254 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
5255 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
5257 struct reg_write_state
5259 unsigned int write_count : 2;
5260 unsigned int first_pred : 16;
5261 unsigned int written_by_fp : 1;
5262 unsigned int written_by_and : 1;
5263 unsigned int written_by_or : 1;
5266 /* Cumulative info for the current instruction group. */
5267 struct reg_write_state rws_sum[NUM_REGS];
5268 /* Info for the current instruction. This gets copied to rws_sum after a
5269 stop bit is emitted. */
5270 struct reg_write_state rws_insn[NUM_REGS];
5272 /* Indicates whether this is the first instruction after a stop bit,
5273 in which case we don't need another stop bit. Without this,
5274 ia64_variable_issue will die when scheduling an alloc. */
5275 static int first_instruction;
5277 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
5278 RTL for one instruction. */
5279 struct reg_flags
5281 unsigned int is_write : 1; /* Is register being written? */
5282 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
5283 unsigned int is_branch : 1; /* Is register used as part of a branch? */
5284 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
5285 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
5286 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
5289 static void rws_update (struct reg_write_state *, int, struct reg_flags, int);
5290 static int rws_access_regno (int, struct reg_flags, int);
5291 static int rws_access_reg (rtx, struct reg_flags, int);
5292 static void update_set_flags (rtx, struct reg_flags *);
5293 static int set_src_needs_barrier (rtx, struct reg_flags, int);
5294 static int rtx_needs_barrier (rtx, struct reg_flags, int);
5295 static void init_insn_group_barriers (void);
5296 static int group_barrier_needed (rtx);
5297 static int safe_group_barrier_needed (rtx);
5299 /* Update *RWS for REGNO, which is being written by the current instruction,
5300 with predicate PRED, and associated register flags in FLAGS. */
5302 static void
5303 rws_update (struct reg_write_state *rws, int regno, struct reg_flags flags, int pred)
5305 if (pred)
5306 rws[regno].write_count++;
5307 else
5308 rws[regno].write_count = 2;
5309 rws[regno].written_by_fp |= flags.is_fp;
5310 /* ??? Not tracking and/or across differing predicates. */
5311 rws[regno].written_by_and = flags.is_and;
5312 rws[regno].written_by_or = flags.is_or;
5313 rws[regno].first_pred = pred;
5316 /* Handle an access to register REGNO of type FLAGS using predicate register
5317 PRED. Update rws_insn and rws_sum arrays. Return 1 if this access creates
5318 a dependency with an earlier instruction in the same group. */
5320 static int
5321 rws_access_regno (int regno, struct reg_flags flags, int pred)
5323 int need_barrier = 0;
5325 gcc_assert (regno < NUM_REGS);
5327 if (! PR_REGNO_P (regno))
5328 flags.is_and = flags.is_or = 0;
5330 if (flags.is_write)
5332 int write_count;
5334 /* One insn writes same reg multiple times? */
5335 gcc_assert (!rws_insn[regno].write_count);
5337 /* Update info for current instruction. */
5338 rws_update (rws_insn, regno, flags, pred);
5339 write_count = rws_sum[regno].write_count;
5341 switch (write_count)
5343 case 0:
5344 /* The register has not been written yet. */
5345 rws_update (rws_sum, regno, flags, pred);
5346 break;
5348 case 1:
5349 /* The register has been written via a predicate. If this is
5350 not a complementary predicate, then we need a barrier. */
5351 /* ??? This assumes that P and P+1 are always complementary
5352 predicates for P even. */
5353 if (flags.is_and && rws_sum[regno].written_by_and)
5355 else if (flags.is_or && rws_sum[regno].written_by_or)
5357 else if ((rws_sum[regno].first_pred ^ 1) != pred)
5358 need_barrier = 1;
5359 rws_update (rws_sum, regno, flags, pred);
5360 break;
5362 case 2:
5363 /* The register has been unconditionally written already. We
5364 need a barrier. */
5365 if (flags.is_and && rws_sum[regno].written_by_and)
5367 else if (flags.is_or && rws_sum[regno].written_by_or)
5369 else
5370 need_barrier = 1;
5371 rws_sum[regno].written_by_and = flags.is_and;
5372 rws_sum[regno].written_by_or = flags.is_or;
5373 break;
5375 default:
5376 gcc_unreachable ();
5379 else
5381 if (flags.is_branch)
5383 /* Branches have several RAW exceptions that allow to avoid
5384 barriers. */
5386 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
5387 /* RAW dependencies on branch regs are permissible as long
5388 as the writer is a non-branch instruction. Since we
5389 never generate code that uses a branch register written
5390 by a branch instruction, handling this case is
5391 easy. */
5392 return 0;
5394 if (REGNO_REG_CLASS (regno) == PR_REGS
5395 && ! rws_sum[regno].written_by_fp)
5396 /* The predicates of a branch are available within the
5397 same insn group as long as the predicate was written by
5398 something other than a floating-point instruction. */
5399 return 0;
5402 if (flags.is_and && rws_sum[regno].written_by_and)
5403 return 0;
5404 if (flags.is_or && rws_sum[regno].written_by_or)
5405 return 0;
5407 switch (rws_sum[regno].write_count)
5409 case 0:
5410 /* The register has not been written yet. */
5411 break;
5413 case 1:
5414 /* The register has been written via a predicate. If this is
5415 not a complementary predicate, then we need a barrier. */
5416 /* ??? This assumes that P and P+1 are always complementary
5417 predicates for P even. */
5418 if ((rws_sum[regno].first_pred ^ 1) != pred)
5419 need_barrier = 1;
5420 break;
5422 case 2:
5423 /* The register has been unconditionally written already. We
5424 need a barrier. */
5425 need_barrier = 1;
5426 break;
5428 default:
5429 gcc_unreachable ();
5433 return need_barrier;
5436 static int
5437 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
5439 int regno = REGNO (reg);
5440 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
5442 if (n == 1)
5443 return rws_access_regno (regno, flags, pred);
5444 else
5446 int need_barrier = 0;
5447 while (--n >= 0)
5448 need_barrier |= rws_access_regno (regno + n, flags, pred);
5449 return need_barrier;
5453 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
5454 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
5456 static void
5457 update_set_flags (rtx x, struct reg_flags *pflags)
5459 rtx src = SET_SRC (x);
5461 switch (GET_CODE (src))
5463 case CALL:
5464 return;
5466 case IF_THEN_ELSE:
5467 /* There are four cases here:
5468 (1) The destination is (pc), in which case this is a branch,
5469 nothing here applies.
5470 (2) The destination is ar.lc, in which case this is a
5471 doloop_end_internal,
5472 (3) The destination is an fp register, in which case this is
5473 an fselect instruction.
5474 (4) The condition has (unspec [(reg)] UNSPEC_LDC), in which case
5475 this is a check load.
5476 In all cases, nothing we do in this function applies. */
5477 return;
5479 default:
5480 if (COMPARISON_P (src)
5481 && SCALAR_FLOAT_MODE_P (GET_MODE (XEXP (src, 0))))
5482 /* Set pflags->is_fp to 1 so that we know we're dealing
5483 with a floating point comparison when processing the
5484 destination of the SET. */
5485 pflags->is_fp = 1;
5487 /* Discover if this is a parallel comparison. We only handle
5488 and.orcm and or.andcm at present, since we must retain a
5489 strict inverse on the predicate pair. */
5490 else if (GET_CODE (src) == AND)
5491 pflags->is_and = 1;
5492 else if (GET_CODE (src) == IOR)
5493 pflags->is_or = 1;
5495 break;
5499 /* Subroutine of rtx_needs_barrier; this function determines whether the
5500 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
5501 are as in rtx_needs_barrier. COND is an rtx that holds the condition
5502 for this insn. */
5504 static int
5505 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred)
5507 int need_barrier = 0;
5508 rtx dst;
5509 rtx src = SET_SRC (x);
5511 if (GET_CODE (src) == CALL)
5512 /* We don't need to worry about the result registers that
5513 get written by subroutine call. */
5514 return rtx_needs_barrier (src, flags, pred);
5515 else if (SET_DEST (x) == pc_rtx)
5517 /* X is a conditional branch. */
5518 /* ??? This seems redundant, as the caller sets this bit for
5519 all JUMP_INSNs. */
5520 if (!ia64_spec_check_src_p (src))
5521 flags.is_branch = 1;
5522 return rtx_needs_barrier (src, flags, pred);
5525 if (ia64_spec_check_src_p (src))
5526 /* Avoid checking one register twice (in condition
5527 and in 'then' section) for ldc pattern. */
5529 gcc_assert (REG_P (XEXP (src, 2)));
5530 need_barrier = rtx_needs_barrier (XEXP (src, 2), flags, pred);
5532 /* We process MEM below. */
5533 src = XEXP (src, 1);
5536 need_barrier |= rtx_needs_barrier (src, flags, pred);
5538 dst = SET_DEST (x);
5539 if (GET_CODE (dst) == ZERO_EXTRACT)
5541 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
5542 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
5544 return need_barrier;
5547 /* Handle an access to rtx X of type FLAGS using predicate register
5548 PRED. Return 1 if this access creates a dependency with an earlier
5549 instruction in the same group. */
5551 static int
5552 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
5554 int i, j;
5555 int is_complemented = 0;
5556 int need_barrier = 0;
5557 const char *format_ptr;
5558 struct reg_flags new_flags;
5559 rtx cond;
5561 if (! x)
5562 return 0;
5564 new_flags = flags;
5566 switch (GET_CODE (x))
5568 case SET:
5569 update_set_flags (x, &new_flags);
5570 need_barrier = set_src_needs_barrier (x, new_flags, pred);
5571 if (GET_CODE (SET_SRC (x)) != CALL)
5573 new_flags.is_write = 1;
5574 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
5576 break;
5578 case CALL:
5579 new_flags.is_write = 0;
5580 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5582 /* Avoid multiple register writes, in case this is a pattern with
5583 multiple CALL rtx. This avoids a failure in rws_access_reg. */
5584 if (! flags.is_sibcall && ! rws_insn[REG_AR_CFM].write_count)
5586 new_flags.is_write = 1;
5587 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
5588 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
5589 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5591 break;
5593 case COND_EXEC:
5594 /* X is a predicated instruction. */
5596 cond = COND_EXEC_TEST (x);
5597 gcc_assert (!pred);
5598 need_barrier = rtx_needs_barrier (cond, flags, 0);
5600 if (GET_CODE (cond) == EQ)
5601 is_complemented = 1;
5602 cond = XEXP (cond, 0);
5603 gcc_assert (GET_CODE (cond) == REG
5604 && REGNO_REG_CLASS (REGNO (cond)) == PR_REGS);
5605 pred = REGNO (cond);
5606 if (is_complemented)
5607 ++pred;
5609 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
5610 return need_barrier;
5612 case CLOBBER:
5613 case USE:
5614 /* Clobber & use are for earlier compiler-phases only. */
5615 break;
5617 case ASM_OPERANDS:
5618 case ASM_INPUT:
5619 /* We always emit stop bits for traditional asms. We emit stop bits
5620 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
5621 if (GET_CODE (x) != ASM_OPERANDS
5622 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
5624 /* Avoid writing the register multiple times if we have multiple
5625 asm outputs. This avoids a failure in rws_access_reg. */
5626 if (! rws_insn[REG_VOLATILE].write_count)
5628 new_flags.is_write = 1;
5629 rws_access_regno (REG_VOLATILE, new_flags, pred);
5631 return 1;
5634 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
5635 We cannot just fall through here since then we would be confused
5636 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
5637 traditional asms unlike their normal usage. */
5639 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
5640 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
5641 need_barrier = 1;
5642 break;
5644 case PARALLEL:
5645 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5647 rtx pat = XVECEXP (x, 0, i);
5648 switch (GET_CODE (pat))
5650 case SET:
5651 update_set_flags (pat, &new_flags);
5652 need_barrier |= set_src_needs_barrier (pat, new_flags, pred);
5653 break;
5655 case USE:
5656 case CALL:
5657 case ASM_OPERANDS:
5658 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5659 break;
5661 case CLOBBER:
5662 case RETURN:
5663 break;
5665 default:
5666 gcc_unreachable ();
5669 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5671 rtx pat = XVECEXP (x, 0, i);
5672 if (GET_CODE (pat) == SET)
5674 if (GET_CODE (SET_SRC (pat)) != CALL)
5676 new_flags.is_write = 1;
5677 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
5678 pred);
5681 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
5682 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5684 break;
5686 case SUBREG:
5687 need_barrier |= rtx_needs_barrier (SUBREG_REG (x), flags, pred);
5688 break;
5689 case REG:
5690 if (REGNO (x) == AR_UNAT_REGNUM)
5692 for (i = 0; i < 64; ++i)
5693 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
5695 else
5696 need_barrier = rws_access_reg (x, flags, pred);
5697 break;
5699 case MEM:
5700 /* Find the regs used in memory address computation. */
5701 new_flags.is_write = 0;
5702 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5703 break;
5705 case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR:
5706 case SYMBOL_REF: case LABEL_REF: case CONST:
5707 break;
5709 /* Operators with side-effects. */
5710 case POST_INC: case POST_DEC:
5711 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
5713 new_flags.is_write = 0;
5714 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5715 new_flags.is_write = 1;
5716 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5717 break;
5719 case POST_MODIFY:
5720 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
5722 new_flags.is_write = 0;
5723 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5724 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5725 new_flags.is_write = 1;
5726 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5727 break;
5729 /* Handle common unary and binary ops for efficiency. */
5730 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
5731 case MOD: case UDIV: case UMOD: case AND: case IOR:
5732 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
5733 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
5734 case NE: case EQ: case GE: case GT: case LE:
5735 case LT: case GEU: case GTU: case LEU: case LTU:
5736 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5737 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5738 break;
5740 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
5741 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
5742 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
5743 case SQRT: case FFS: case POPCOUNT:
5744 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5745 break;
5747 case VEC_SELECT:
5748 /* VEC_SELECT's second argument is a PARALLEL with integers that
5749 describe the elements selected. On ia64, those integers are
5750 always constants. Avoid walking the PARALLEL so that we don't
5751 get confused with "normal" parallels and then die. */
5752 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5753 break;
5755 case UNSPEC:
5756 switch (XINT (x, 1))
5758 case UNSPEC_LTOFF_DTPMOD:
5759 case UNSPEC_LTOFF_DTPREL:
5760 case UNSPEC_DTPREL:
5761 case UNSPEC_LTOFF_TPREL:
5762 case UNSPEC_TPREL:
5763 case UNSPEC_PRED_REL_MUTEX:
5764 case UNSPEC_PIC_CALL:
5765 case UNSPEC_MF:
5766 case UNSPEC_FETCHADD_ACQ:
5767 case UNSPEC_BSP_VALUE:
5768 case UNSPEC_FLUSHRS:
5769 case UNSPEC_BUNDLE_SELECTOR:
5770 break;
5772 case UNSPEC_GR_SPILL:
5773 case UNSPEC_GR_RESTORE:
5775 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
5776 HOST_WIDE_INT bit = (offset >> 3) & 63;
5778 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5779 new_flags.is_write = (XINT (x, 1) == UNSPEC_GR_SPILL);
5780 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
5781 new_flags, pred);
5782 break;
5785 case UNSPEC_FR_SPILL:
5786 case UNSPEC_FR_RESTORE:
5787 case UNSPEC_GETF_EXP:
5788 case UNSPEC_SETF_EXP:
5789 case UNSPEC_ADDP4:
5790 case UNSPEC_FR_SQRT_RECIP_APPROX:
5791 case UNSPEC_LDA:
5792 case UNSPEC_LDS:
5793 case UNSPEC_LDSA:
5794 case UNSPEC_CHKACLR:
5795 case UNSPEC_CHKS:
5796 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5797 break;
5799 case UNSPEC_FR_RECIP_APPROX:
5800 case UNSPEC_SHRP:
5801 case UNSPEC_COPYSIGN:
5802 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5803 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5804 break;
5806 case UNSPEC_CMPXCHG_ACQ:
5807 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5808 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
5809 break;
5811 default:
5812 gcc_unreachable ();
5814 break;
5816 case UNSPEC_VOLATILE:
5817 switch (XINT (x, 1))
5819 case UNSPECV_ALLOC:
5820 /* Alloc must always be the first instruction of a group.
5821 We force this by always returning true. */
5822 /* ??? We might get better scheduling if we explicitly check for
5823 input/local/output register dependencies, and modify the
5824 scheduler so that alloc is always reordered to the start of
5825 the current group. We could then eliminate all of the
5826 first_instruction code. */
5827 rws_access_regno (AR_PFS_REGNUM, flags, pred);
5829 new_flags.is_write = 1;
5830 rws_access_regno (REG_AR_CFM, new_flags, pred);
5831 return 1;
5833 case UNSPECV_SET_BSP:
5834 need_barrier = 1;
5835 break;
5837 case UNSPECV_BLOCKAGE:
5838 case UNSPECV_INSN_GROUP_BARRIER:
5839 case UNSPECV_BREAK:
5840 case UNSPECV_PSAC_ALL:
5841 case UNSPECV_PSAC_NORMAL:
5842 return 0;
5844 default:
5845 gcc_unreachable ();
5847 break;
5849 case RETURN:
5850 new_flags.is_write = 0;
5851 need_barrier = rws_access_regno (REG_RP, flags, pred);
5852 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
5854 new_flags.is_write = 1;
5855 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5856 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5857 break;
5859 default:
5860 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
5861 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5862 switch (format_ptr[i])
5864 case '0': /* unused field */
5865 case 'i': /* integer */
5866 case 'n': /* note */
5867 case 'w': /* wide integer */
5868 case 's': /* pointer to string */
5869 case 'S': /* optional pointer to string */
5870 break;
5872 case 'e':
5873 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
5874 need_barrier = 1;
5875 break;
5877 case 'E':
5878 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
5879 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
5880 need_barrier = 1;
5881 break;
5883 default:
5884 gcc_unreachable ();
5886 break;
5888 return need_barrier;
5891 /* Clear out the state for group_barrier_needed at the start of a
5892 sequence of insns. */
5894 static void
5895 init_insn_group_barriers (void)
5897 memset (rws_sum, 0, sizeof (rws_sum));
5898 first_instruction = 1;
5901 /* Given the current state, determine whether a group barrier (a stop bit) is
5902 necessary before INSN. Return nonzero if so. This modifies the state to
5903 include the effects of INSN as a side-effect. */
5905 static int
5906 group_barrier_needed (rtx insn)
5908 rtx pat;
5909 int need_barrier = 0;
5910 struct reg_flags flags;
5912 memset (&flags, 0, sizeof (flags));
5913 switch (GET_CODE (insn))
5915 case NOTE:
5916 break;
5918 case BARRIER:
5919 /* A barrier doesn't imply an instruction group boundary. */
5920 break;
5922 case CODE_LABEL:
5923 memset (rws_insn, 0, sizeof (rws_insn));
5924 return 1;
5926 case CALL_INSN:
5927 flags.is_branch = 1;
5928 flags.is_sibcall = SIBLING_CALL_P (insn);
5929 memset (rws_insn, 0, sizeof (rws_insn));
5931 /* Don't bundle a call following another call. */
5932 if ((pat = prev_active_insn (insn))
5933 && GET_CODE (pat) == CALL_INSN)
5935 need_barrier = 1;
5936 break;
5939 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
5940 break;
5942 case JUMP_INSN:
5943 if (!ia64_spec_check_p (insn))
5944 flags.is_branch = 1;
5946 /* Don't bundle a jump following a call. */
5947 if ((pat = prev_active_insn (insn))
5948 && GET_CODE (pat) == CALL_INSN)
5950 need_barrier = 1;
5951 break;
5953 /* FALLTHRU */
5955 case INSN:
5956 if (GET_CODE (PATTERN (insn)) == USE
5957 || GET_CODE (PATTERN (insn)) == CLOBBER)
5958 /* Don't care about USE and CLOBBER "insns"---those are used to
5959 indicate to the optimizer that it shouldn't get rid of
5960 certain operations. */
5961 break;
5963 pat = PATTERN (insn);
5965 /* Ug. Hack hacks hacked elsewhere. */
5966 switch (recog_memoized (insn))
5968 /* We play dependency tricks with the epilogue in order
5969 to get proper schedules. Undo this for dv analysis. */
5970 case CODE_FOR_epilogue_deallocate_stack:
5971 case CODE_FOR_prologue_allocate_stack:
5972 pat = XVECEXP (pat, 0, 0);
5973 break;
5975 /* The pattern we use for br.cloop confuses the code above.
5976 The second element of the vector is representative. */
5977 case CODE_FOR_doloop_end_internal:
5978 pat = XVECEXP (pat, 0, 1);
5979 break;
5981 /* Doesn't generate code. */
5982 case CODE_FOR_pred_rel_mutex:
5983 case CODE_FOR_prologue_use:
5984 return 0;
5986 default:
5987 break;
5990 memset (rws_insn, 0, sizeof (rws_insn));
5991 need_barrier = rtx_needs_barrier (pat, flags, 0);
5993 /* Check to see if the previous instruction was a volatile
5994 asm. */
5995 if (! need_barrier)
5996 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
5997 break;
5999 default:
6000 gcc_unreachable ();
6003 if (first_instruction && INSN_P (insn)
6004 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
6005 && GET_CODE (PATTERN (insn)) != USE
6006 && GET_CODE (PATTERN (insn)) != CLOBBER)
6008 need_barrier = 0;
6009 first_instruction = 0;
6012 return need_barrier;
6015 /* Like group_barrier_needed, but do not clobber the current state. */
6017 static int
6018 safe_group_barrier_needed (rtx insn)
6020 struct reg_write_state rws_saved[NUM_REGS];
6021 int saved_first_instruction;
6022 int t;
6024 memcpy (rws_saved, rws_sum, NUM_REGS * sizeof *rws_saved);
6025 saved_first_instruction = first_instruction;
6027 t = group_barrier_needed (insn);
6029 memcpy (rws_sum, rws_saved, NUM_REGS * sizeof *rws_saved);
6030 first_instruction = saved_first_instruction;
6032 return t;
6035 /* Scan the current function and insert stop bits as necessary to
6036 eliminate dependencies. This function assumes that a final
6037 instruction scheduling pass has been run which has already
6038 inserted most of the necessary stop bits. This function only
6039 inserts new ones at basic block boundaries, since these are
6040 invisible to the scheduler. */
6042 static void
6043 emit_insn_group_barriers (FILE *dump)
6045 rtx insn;
6046 rtx last_label = 0;
6047 int insns_since_last_label = 0;
6049 init_insn_group_barriers ();
6051 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6053 if (GET_CODE (insn) == CODE_LABEL)
6055 if (insns_since_last_label)
6056 last_label = insn;
6057 insns_since_last_label = 0;
6059 else if (GET_CODE (insn) == NOTE
6060 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
6062 if (insns_since_last_label)
6063 last_label = insn;
6064 insns_since_last_label = 0;
6066 else if (GET_CODE (insn) == INSN
6067 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
6068 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
6070 init_insn_group_barriers ();
6071 last_label = 0;
6073 else if (INSN_P (insn))
6075 insns_since_last_label = 1;
6077 if (group_barrier_needed (insn))
6079 if (last_label)
6081 if (dump)
6082 fprintf (dump, "Emitting stop before label %d\n",
6083 INSN_UID (last_label));
6084 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
6085 insn = last_label;
6087 init_insn_group_barriers ();
6088 last_label = 0;
6095 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
6096 This function has to emit all necessary group barriers. */
6098 static void
6099 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
6101 rtx insn;
6103 init_insn_group_barriers ();
6105 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6107 if (GET_CODE (insn) == BARRIER)
6109 rtx last = prev_active_insn (insn);
6111 if (! last)
6112 continue;
6113 if (GET_CODE (last) == JUMP_INSN
6114 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
6115 last = prev_active_insn (last);
6116 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
6117 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
6119 init_insn_group_barriers ();
6121 else if (INSN_P (insn))
6123 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
6124 init_insn_group_barriers ();
6125 else if (group_barrier_needed (insn))
6127 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
6128 init_insn_group_barriers ();
6129 group_barrier_needed (insn);
6137 /* Instruction scheduling support. */
6139 #define NR_BUNDLES 10
6141 /* A list of names of all available bundles. */
6143 static const char *bundle_name [NR_BUNDLES] =
6145 ".mii",
6146 ".mmi",
6147 ".mfi",
6148 ".mmf",
6149 #if NR_BUNDLES == 10
6150 ".bbb",
6151 ".mbb",
6152 #endif
6153 ".mib",
6154 ".mmb",
6155 ".mfb",
6156 ".mlx"
6159 /* Nonzero if we should insert stop bits into the schedule. */
6161 int ia64_final_schedule = 0;
6163 /* Codes of the corresponding queried units: */
6165 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
6166 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
6168 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
6169 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
6171 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
6173 /* The following variable value is an insn group barrier. */
6175 static rtx dfa_stop_insn;
6177 /* The following variable value is the last issued insn. */
6179 static rtx last_scheduled_insn;
6181 /* The following variable value is size of the DFA state. */
6183 static size_t dfa_state_size;
6185 /* The following variable value is pointer to a DFA state used as
6186 temporary variable. */
6188 static state_t temp_dfa_state = NULL;
6190 /* The following variable value is DFA state after issuing the last
6191 insn. */
6193 static state_t prev_cycle_state = NULL;
6195 /* The following array element values are TRUE if the corresponding
6196 insn requires to add stop bits before it. */
6198 static char *stops_p = NULL;
6200 /* The following array element values are ZERO for non-speculative
6201 instructions and hold corresponding speculation check number for
6202 speculative instructions. */
6203 static int *spec_check_no = NULL;
6205 /* Size of spec_check_no array. */
6206 static int max_uid = 0;
6208 /* The following variable is used to set up the mentioned above array. */
6210 static int stop_before_p = 0;
6212 /* The following variable value is length of the arrays `clocks' and
6213 `add_cycles'. */
6215 static int clocks_length;
6217 /* The following array element values are cycles on which the
6218 corresponding insn will be issued. The array is used only for
6219 Itanium1. */
6221 static int *clocks;
6223 /* The following array element values are numbers of cycles should be
6224 added to improve insn scheduling for MM_insns for Itanium1. */
6226 static int *add_cycles;
6228 /* The following variable value is number of data speculations in progress. */
6229 static int pending_data_specs = 0;
6231 static rtx ia64_single_set (rtx);
6232 static void ia64_emit_insn_before (rtx, rtx);
6234 /* Map a bundle number to its pseudo-op. */
6236 const char *
6237 get_bundle_name (int b)
6239 return bundle_name[b];
6243 /* Return the maximum number of instructions a cpu can issue. */
6245 static int
6246 ia64_issue_rate (void)
6248 return 6;
6251 /* Helper function - like single_set, but look inside COND_EXEC. */
6253 static rtx
6254 ia64_single_set (rtx insn)
6256 rtx x = PATTERN (insn), ret;
6257 if (GET_CODE (x) == COND_EXEC)
6258 x = COND_EXEC_CODE (x);
6259 if (GET_CODE (x) == SET)
6260 return x;
6262 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
6263 Although they are not classical single set, the second set is there just
6264 to protect it from moving past FP-relative stack accesses. */
6265 switch (recog_memoized (insn))
6267 case CODE_FOR_prologue_allocate_stack:
6268 case CODE_FOR_epilogue_deallocate_stack:
6269 ret = XVECEXP (x, 0, 0);
6270 break;
6272 default:
6273 ret = single_set_2 (insn, x);
6274 break;
6277 return ret;
6280 /* Adjust the cost of a scheduling dependency. Return the new cost of
6281 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
6283 static int
6284 ia64_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
6286 enum attr_itanium_class dep_class;
6287 enum attr_itanium_class insn_class;
6289 if (REG_NOTE_KIND (link) != REG_DEP_OUTPUT)
6290 return cost;
6292 insn_class = ia64_safe_itanium_class (insn);
6293 dep_class = ia64_safe_itanium_class (dep_insn);
6294 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
6295 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
6296 return 0;
6298 return cost;
6301 /* Like emit_insn_before, but skip cycle_display notes.
6302 ??? When cycle display notes are implemented, update this. */
6304 static void
6305 ia64_emit_insn_before (rtx insn, rtx before)
6307 emit_insn_before (insn, before);
6310 /* The following function marks insns who produce addresses for load
6311 and store insns. Such insns will be placed into M slots because it
6312 decrease latency time for Itanium1 (see function
6313 `ia64_produce_address_p' and the DFA descriptions). */
6315 static void
6316 ia64_dependencies_evaluation_hook (rtx head, rtx tail)
6318 rtx insn, next, next_tail;
6320 /* Before reload, which_alternative is not set, which means that
6321 ia64_safe_itanium_class will produce wrong results for (at least)
6322 move instructions. */
6323 if (!reload_completed)
6324 return;
6326 next_tail = NEXT_INSN (tail);
6327 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6328 if (INSN_P (insn))
6329 insn->call = 0;
6330 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6331 if (INSN_P (insn)
6332 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
6334 dep_link_t link;
6336 FOR_EACH_DEP_LINK (link, INSN_FORW_DEPS (insn))
6338 enum attr_itanium_class c;
6340 if (DEP_LINK_KIND (link) != REG_DEP_TRUE)
6341 continue;
6343 next = DEP_LINK_CON (link);
6344 c = ia64_safe_itanium_class (next);
6345 if ((c == ITANIUM_CLASS_ST
6346 || c == ITANIUM_CLASS_STF)
6347 && ia64_st_address_bypass_p (insn, next))
6348 break;
6349 else if ((c == ITANIUM_CLASS_LD
6350 || c == ITANIUM_CLASS_FLD
6351 || c == ITANIUM_CLASS_FLDP)
6352 && ia64_ld_address_bypass_p (insn, next))
6353 break;
6355 insn->call = link != 0;
6359 /* We're beginning a new block. Initialize data structures as necessary. */
6361 static void
6362 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
6363 int sched_verbose ATTRIBUTE_UNUSED,
6364 int max_ready ATTRIBUTE_UNUSED)
6366 #ifdef ENABLE_CHECKING
6367 rtx insn;
6369 if (reload_completed)
6370 for (insn = NEXT_INSN (current_sched_info->prev_head);
6371 insn != current_sched_info->next_tail;
6372 insn = NEXT_INSN (insn))
6373 gcc_assert (!SCHED_GROUP_P (insn));
6374 #endif
6375 last_scheduled_insn = NULL_RTX;
6376 init_insn_group_barriers ();
6379 /* We're beginning a scheduling pass. Check assertion. */
6381 static void
6382 ia64_sched_init_global (FILE *dump ATTRIBUTE_UNUSED,
6383 int sched_verbose ATTRIBUTE_UNUSED,
6384 int max_ready ATTRIBUTE_UNUSED)
6386 gcc_assert (!pending_data_specs);
6389 /* Scheduling pass is now finished. Free/reset static variable. */
6390 static void
6391 ia64_sched_finish_global (FILE *dump ATTRIBUTE_UNUSED,
6392 int sched_verbose ATTRIBUTE_UNUSED)
6394 free (spec_check_no);
6395 spec_check_no = 0;
6396 max_uid = 0;
6399 /* We are about to being issuing insns for this clock cycle.
6400 Override the default sort algorithm to better slot instructions. */
6402 static int
6403 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
6404 int *pn_ready, int clock_var ATTRIBUTE_UNUSED,
6405 int reorder_type)
6407 int n_asms;
6408 int n_ready = *pn_ready;
6409 rtx *e_ready = ready + n_ready;
6410 rtx *insnp;
6412 if (sched_verbose)
6413 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
6415 if (reorder_type == 0)
6417 /* First, move all USEs, CLOBBERs and other crud out of the way. */
6418 n_asms = 0;
6419 for (insnp = ready; insnp < e_ready; insnp++)
6420 if (insnp < e_ready)
6422 rtx insn = *insnp;
6423 enum attr_type t = ia64_safe_type (insn);
6424 if (t == TYPE_UNKNOWN)
6426 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6427 || asm_noperands (PATTERN (insn)) >= 0)
6429 rtx lowest = ready[n_asms];
6430 ready[n_asms] = insn;
6431 *insnp = lowest;
6432 n_asms++;
6434 else
6436 rtx highest = ready[n_ready - 1];
6437 ready[n_ready - 1] = insn;
6438 *insnp = highest;
6439 return 1;
6444 if (n_asms < n_ready)
6446 /* Some normal insns to process. Skip the asms. */
6447 ready += n_asms;
6448 n_ready -= n_asms;
6450 else if (n_ready > 0)
6451 return 1;
6454 if (ia64_final_schedule)
6456 int deleted = 0;
6457 int nr_need_stop = 0;
6459 for (insnp = ready; insnp < e_ready; insnp++)
6460 if (safe_group_barrier_needed (*insnp))
6461 nr_need_stop++;
6463 if (reorder_type == 1 && n_ready == nr_need_stop)
6464 return 0;
6465 if (reorder_type == 0)
6466 return 1;
6467 insnp = e_ready;
6468 /* Move down everything that needs a stop bit, preserving
6469 relative order. */
6470 while (insnp-- > ready + deleted)
6471 while (insnp >= ready + deleted)
6473 rtx insn = *insnp;
6474 if (! safe_group_barrier_needed (insn))
6475 break;
6476 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
6477 *ready = insn;
6478 deleted++;
6480 n_ready -= deleted;
6481 ready += deleted;
6484 return 1;
6487 /* We are about to being issuing insns for this clock cycle. Override
6488 the default sort algorithm to better slot instructions. */
6490 static int
6491 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
6492 int clock_var)
6494 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
6495 pn_ready, clock_var, 0);
6498 /* Like ia64_sched_reorder, but called after issuing each insn.
6499 Override the default sort algorithm to better slot instructions. */
6501 static int
6502 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
6503 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
6504 int *pn_ready, int clock_var)
6506 if (ia64_tune == PROCESSOR_ITANIUM && reload_completed && last_scheduled_insn)
6507 clocks [INSN_UID (last_scheduled_insn)] = clock_var;
6508 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
6509 clock_var, 1);
6512 /* We are about to issue INSN. Return the number of insns left on the
6513 ready queue that can be issued this cycle. */
6515 static int
6516 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
6517 int sched_verbose ATTRIBUTE_UNUSED,
6518 rtx insn ATTRIBUTE_UNUSED,
6519 int can_issue_more ATTRIBUTE_UNUSED)
6521 if (current_sched_info->flags & DO_SPECULATION)
6522 /* Modulo scheduling does not extend h_i_d when emitting
6523 new instructions. Deal with it. */
6525 if (DONE_SPEC (insn) & BEGIN_DATA)
6526 pending_data_specs++;
6527 if (CHECK_SPEC (insn) & BEGIN_DATA)
6528 pending_data_specs--;
6531 last_scheduled_insn = insn;
6532 memcpy (prev_cycle_state, curr_state, dfa_state_size);
6533 if (reload_completed)
6535 int needed = group_barrier_needed (insn);
6537 gcc_assert (!needed);
6538 if (GET_CODE (insn) == CALL_INSN)
6539 init_insn_group_barriers ();
6540 stops_p [INSN_UID (insn)] = stop_before_p;
6541 stop_before_p = 0;
6543 return 1;
6546 /* We are choosing insn from the ready queue. Return nonzero if INSN
6547 can be chosen. */
6549 static int
6550 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn)
6552 gcc_assert (insn && INSN_P (insn));
6553 return ((!reload_completed
6554 || !safe_group_barrier_needed (insn))
6555 && ia64_first_cycle_multipass_dfa_lookahead_guard_spec (insn));
6558 /* We are choosing insn from the ready queue. Return nonzero if INSN
6559 can be chosen. */
6561 static bool
6562 ia64_first_cycle_multipass_dfa_lookahead_guard_spec (rtx insn)
6564 gcc_assert (insn && INSN_P (insn));
6565 /* Size of ALAT is 32. As far as we perform conservative data speculation,
6566 we keep ALAT half-empty. */
6567 return (pending_data_specs < 16
6568 || !(TODO_SPEC (insn) & BEGIN_DATA));
6571 /* The following variable value is pseudo-insn used by the DFA insn
6572 scheduler to change the DFA state when the simulated clock is
6573 increased. */
6575 static rtx dfa_pre_cycle_insn;
6577 /* We are about to being issuing INSN. Return nonzero if we cannot
6578 issue it on given cycle CLOCK and return zero if we should not sort
6579 the ready queue on the next clock start. */
6581 static int
6582 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
6583 int clock, int *sort_p)
6585 int setup_clocks_p = FALSE;
6587 gcc_assert (insn && INSN_P (insn));
6588 if ((reload_completed && safe_group_barrier_needed (insn))
6589 || (last_scheduled_insn
6590 && (GET_CODE (last_scheduled_insn) == CALL_INSN
6591 || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6592 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
6594 init_insn_group_barriers ();
6595 if (verbose && dump)
6596 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
6597 last_clock == clock ? " + cycle advance" : "");
6598 stop_before_p = 1;
6599 if (last_clock == clock)
6601 state_transition (curr_state, dfa_stop_insn);
6602 if (TARGET_EARLY_STOP_BITS)
6603 *sort_p = (last_scheduled_insn == NULL_RTX
6604 || GET_CODE (last_scheduled_insn) != CALL_INSN);
6605 else
6606 *sort_p = 0;
6607 return 1;
6609 else if (reload_completed)
6610 setup_clocks_p = TRUE;
6611 if (GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6612 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)
6613 state_reset (curr_state);
6614 else
6616 memcpy (curr_state, prev_cycle_state, dfa_state_size);
6617 state_transition (curr_state, dfa_stop_insn);
6618 state_transition (curr_state, dfa_pre_cycle_insn);
6619 state_transition (curr_state, NULL);
6622 else if (reload_completed)
6623 setup_clocks_p = TRUE;
6624 if (setup_clocks_p && ia64_tune == PROCESSOR_ITANIUM
6625 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6626 && asm_noperands (PATTERN (insn)) < 0)
6628 enum attr_itanium_class c = ia64_safe_itanium_class (insn);
6630 if (c != ITANIUM_CLASS_MMMUL && c != ITANIUM_CLASS_MMSHF)
6632 dep_link_t link;
6633 int d = -1;
6635 FOR_EACH_DEP_LINK (link, INSN_BACK_DEPS (insn))
6636 if (DEP_LINK_KIND (link) == REG_DEP_TRUE)
6638 enum attr_itanium_class dep_class;
6639 rtx dep_insn = DEP_LINK_PRO (link);
6641 dep_class = ia64_safe_itanium_class (dep_insn);
6642 if ((dep_class == ITANIUM_CLASS_MMMUL
6643 || dep_class == ITANIUM_CLASS_MMSHF)
6644 && last_clock - clocks [INSN_UID (dep_insn)] < 4
6645 && (d < 0
6646 || last_clock - clocks [INSN_UID (dep_insn)] < d))
6647 d = last_clock - clocks [INSN_UID (dep_insn)];
6649 if (d >= 0)
6650 add_cycles [INSN_UID (insn)] = 3 - d;
6653 return 0;
6656 /* Implement targetm.sched.h_i_d_extended hook.
6657 Extend internal data structures. */
6658 static void
6659 ia64_h_i_d_extended (void)
6661 if (current_sched_info->flags & DO_SPECULATION)
6663 int new_max_uid = get_max_uid () + 1;
6665 spec_check_no = xrecalloc (spec_check_no, new_max_uid,
6666 max_uid, sizeof (*spec_check_no));
6667 max_uid = new_max_uid;
6670 if (stops_p != NULL)
6672 int new_clocks_length = get_max_uid () + 1;
6674 stops_p = xrecalloc (stops_p, new_clocks_length, clocks_length, 1);
6676 if (ia64_tune == PROCESSOR_ITANIUM)
6678 clocks = xrecalloc (clocks, new_clocks_length, clocks_length,
6679 sizeof (int));
6680 add_cycles = xrecalloc (add_cycles, new_clocks_length, clocks_length,
6681 sizeof (int));
6684 clocks_length = new_clocks_length;
6688 /* Constants that help mapping 'enum machine_mode' to int. */
6689 enum SPEC_MODES
6691 SPEC_MODE_INVALID = -1,
6692 SPEC_MODE_FIRST = 0,
6693 SPEC_MODE_FOR_EXTEND_FIRST = 1,
6694 SPEC_MODE_FOR_EXTEND_LAST = 3,
6695 SPEC_MODE_LAST = 8
6698 /* Return index of the MODE. */
6699 static int
6700 ia64_mode_to_int (enum machine_mode mode)
6702 switch (mode)
6704 case BImode: return 0; /* SPEC_MODE_FIRST */
6705 case QImode: return 1; /* SPEC_MODE_FOR_EXTEND_FIRST */
6706 case HImode: return 2;
6707 case SImode: return 3; /* SPEC_MODE_FOR_EXTEND_LAST */
6708 case DImode: return 4;
6709 case SFmode: return 5;
6710 case DFmode: return 6;
6711 case XFmode: return 7;
6712 case TImode:
6713 /* ??? This mode needs testing. Bypasses for ldfp8 instruction are not
6714 mentioned in itanium[12].md. Predicate fp_register_operand also
6715 needs to be defined. Bottom line: better disable for now. */
6716 return SPEC_MODE_INVALID;
6717 default: return SPEC_MODE_INVALID;
6721 /* Provide information about speculation capabilities. */
6722 static void
6723 ia64_set_sched_flags (spec_info_t spec_info)
6725 unsigned int *flags = &(current_sched_info->flags);
6727 if (*flags & SCHED_RGN
6728 || *flags & SCHED_EBB)
6730 int mask = 0;
6732 if ((mflag_sched_br_data_spec && !reload_completed && optimize > 0)
6733 || (mflag_sched_ar_data_spec && reload_completed))
6735 mask |= BEGIN_DATA;
6737 if ((mflag_sched_br_in_data_spec && !reload_completed)
6738 || (mflag_sched_ar_in_data_spec && reload_completed))
6739 mask |= BE_IN_DATA;
6742 if (mflag_sched_control_spec)
6744 mask |= BEGIN_CONTROL;
6746 if (mflag_sched_in_control_spec)
6747 mask |= BE_IN_CONTROL;
6750 gcc_assert (*flags & USE_GLAT);
6752 if (mask)
6754 *flags |= USE_DEPS_LIST | DETACH_LIFE_INFO | DO_SPECULATION;
6756 spec_info->mask = mask;
6757 spec_info->flags = 0;
6759 if ((mask & DATA_SPEC) && mflag_sched_prefer_non_data_spec_insns)
6760 spec_info->flags |= PREFER_NON_DATA_SPEC;
6762 if ((mask & CONTROL_SPEC)
6763 && mflag_sched_prefer_non_control_spec_insns)
6764 spec_info->flags |= PREFER_NON_CONTROL_SPEC;
6766 if (mflag_sched_spec_verbose)
6768 if (sched_verbose >= 1)
6769 spec_info->dump = sched_dump;
6770 else
6771 spec_info->dump = stderr;
6773 else
6774 spec_info->dump = 0;
6776 if (mflag_sched_count_spec_in_critical_path)
6777 spec_info->flags |= COUNT_SPEC_IN_CRITICAL_PATH;
6782 /* Implement targetm.sched.speculate_insn hook.
6783 Check if the INSN can be TS speculative.
6784 If 'no' - return -1.
6785 If 'yes' - generate speculative pattern in the NEW_PAT and return 1.
6786 If current pattern of the INSN already provides TS speculation, return 0. */
6787 static int
6788 ia64_speculate_insn (rtx insn, ds_t ts, rtx *new_pat)
6790 rtx pat, reg, mem, mem_reg;
6791 int mode_no, gen_p = 1;
6792 bool extend_p;
6794 gcc_assert (!(ts & ~BEGIN_SPEC) && ts);
6796 pat = PATTERN (insn);
6798 if (GET_CODE (pat) == COND_EXEC)
6799 pat = COND_EXEC_CODE (pat);
6801 /* This should be a SET ... */
6802 if (GET_CODE (pat) != SET)
6803 return -1;
6805 reg = SET_DEST (pat);
6806 /* ... to the general/fp register ... */
6807 if (!REG_P (reg) || !(GR_REGNO_P (REGNO (reg)) || FP_REGNO_P (REGNO (reg))))
6808 return -1;
6810 /* ... from the mem ... */
6811 mem = SET_SRC (pat);
6813 /* ... that can, possibly, be a zero_extend ... */
6814 if (GET_CODE (mem) == ZERO_EXTEND)
6816 mem = XEXP (mem, 0);
6817 extend_p = true;
6819 else
6820 extend_p = false;
6822 /* ... or a speculative load. */
6823 if (GET_CODE (mem) == UNSPEC)
6825 int code;
6827 code = XINT (mem, 1);
6828 if (code != UNSPEC_LDA && code != UNSPEC_LDS && code != UNSPEC_LDSA)
6829 return -1;
6831 if ((code == UNSPEC_LDA && !(ts & BEGIN_CONTROL))
6832 || (code == UNSPEC_LDS && !(ts & BEGIN_DATA))
6833 || code == UNSPEC_LDSA)
6834 gen_p = 0;
6836 mem = XVECEXP (mem, 0, 0);
6837 gcc_assert (MEM_P (mem));
6840 /* Source should be a mem ... */
6841 if (!MEM_P (mem))
6842 return -1;
6844 /* ... addressed by a register. */
6845 mem_reg = XEXP (mem, 0);
6846 if (!REG_P (mem_reg))
6847 return -1;
6849 /* We should use MEM's mode since REG's mode in presence of ZERO_EXTEND
6850 will always be DImode. */
6851 mode_no = ia64_mode_to_int (GET_MODE (mem));
6853 if (mode_no == SPEC_MODE_INVALID
6854 || (extend_p
6855 && !(SPEC_MODE_FOR_EXTEND_FIRST <= mode_no
6856 && mode_no <= SPEC_MODE_FOR_EXTEND_LAST)))
6857 return -1;
6859 extract_insn_cached (insn);
6860 gcc_assert (reg == recog_data.operand[0] && mem == recog_data.operand[1]);
6862 *new_pat = ia64_gen_spec_insn (insn, ts, mode_no, gen_p != 0, extend_p);
6864 return gen_p;
6867 enum
6869 /* Offset to reach ZERO_EXTEND patterns. */
6870 SPEC_GEN_EXTEND_OFFSET = SPEC_MODE_LAST - SPEC_MODE_FOR_EXTEND_FIRST + 1,
6871 /* Number of patterns for each speculation mode. */
6872 SPEC_N = (SPEC_MODE_LAST
6873 + SPEC_MODE_FOR_EXTEND_LAST - SPEC_MODE_FOR_EXTEND_FIRST + 2)
6876 enum SPEC_GEN_LD_MAP
6878 /* Offset to ld.a patterns. */
6879 SPEC_GEN_A = 0 * SPEC_N,
6880 /* Offset to ld.s patterns. */
6881 SPEC_GEN_S = 1 * SPEC_N,
6882 /* Offset to ld.sa patterns. */
6883 SPEC_GEN_SA = 2 * SPEC_N,
6884 /* Offset to ld.sa patterns. For this patterns corresponding ld.c will
6885 mutate to chk.s. */
6886 SPEC_GEN_SA_FOR_S = 3 * SPEC_N
6889 /* These offsets are used to get (4 * SPEC_N). */
6890 enum SPEC_GEN_CHECK_OFFSET
6892 SPEC_GEN_CHKA_FOR_A_OFFSET = 4 * SPEC_N - SPEC_GEN_A,
6893 SPEC_GEN_CHKA_FOR_SA_OFFSET = 4 * SPEC_N - SPEC_GEN_SA
6896 /* If GEN_P is true, calculate the index of needed speculation check and return
6897 speculative pattern for INSN with speculative mode TS, machine mode
6898 MODE_NO and with ZERO_EXTEND (if EXTEND_P is true).
6899 If GEN_P is false, just calculate the index of needed speculation check. */
6900 static rtx
6901 ia64_gen_spec_insn (rtx insn, ds_t ts, int mode_no, bool gen_p, bool extend_p)
6903 rtx pat, new_pat;
6904 int load_no;
6905 int shift = 0;
6907 static rtx (* const gen_load[]) (rtx, rtx) = {
6908 gen_movbi_advanced,
6909 gen_movqi_advanced,
6910 gen_movhi_advanced,
6911 gen_movsi_advanced,
6912 gen_movdi_advanced,
6913 gen_movsf_advanced,
6914 gen_movdf_advanced,
6915 gen_movxf_advanced,
6916 gen_movti_advanced,
6917 gen_zero_extendqidi2_advanced,
6918 gen_zero_extendhidi2_advanced,
6919 gen_zero_extendsidi2_advanced,
6921 gen_movbi_speculative,
6922 gen_movqi_speculative,
6923 gen_movhi_speculative,
6924 gen_movsi_speculative,
6925 gen_movdi_speculative,
6926 gen_movsf_speculative,
6927 gen_movdf_speculative,
6928 gen_movxf_speculative,
6929 gen_movti_speculative,
6930 gen_zero_extendqidi2_speculative,
6931 gen_zero_extendhidi2_speculative,
6932 gen_zero_extendsidi2_speculative,
6934 gen_movbi_speculative_advanced,
6935 gen_movqi_speculative_advanced,
6936 gen_movhi_speculative_advanced,
6937 gen_movsi_speculative_advanced,
6938 gen_movdi_speculative_advanced,
6939 gen_movsf_speculative_advanced,
6940 gen_movdf_speculative_advanced,
6941 gen_movxf_speculative_advanced,
6942 gen_movti_speculative_advanced,
6943 gen_zero_extendqidi2_speculative_advanced,
6944 gen_zero_extendhidi2_speculative_advanced,
6945 gen_zero_extendsidi2_speculative_advanced,
6947 gen_movbi_speculative_advanced,
6948 gen_movqi_speculative_advanced,
6949 gen_movhi_speculative_advanced,
6950 gen_movsi_speculative_advanced,
6951 gen_movdi_speculative_advanced,
6952 gen_movsf_speculative_advanced,
6953 gen_movdf_speculative_advanced,
6954 gen_movxf_speculative_advanced,
6955 gen_movti_speculative_advanced,
6956 gen_zero_extendqidi2_speculative_advanced,
6957 gen_zero_extendhidi2_speculative_advanced,
6958 gen_zero_extendsidi2_speculative_advanced
6961 load_no = extend_p ? mode_no + SPEC_GEN_EXTEND_OFFSET : mode_no;
6963 if (ts & BEGIN_DATA)
6965 /* We don't need recovery because even if this is ld.sa
6966 ALAT entry will be allocated only if NAT bit is set to zero.
6967 So it is enough to use ld.c here. */
6969 if (ts & BEGIN_CONTROL)
6971 load_no += SPEC_GEN_SA;
6973 if (!mflag_sched_ldc)
6974 shift = SPEC_GEN_CHKA_FOR_SA_OFFSET;
6976 else
6978 load_no += SPEC_GEN_A;
6980 if (!mflag_sched_ldc)
6981 shift = SPEC_GEN_CHKA_FOR_A_OFFSET;
6984 else if (ts & BEGIN_CONTROL)
6986 /* ld.sa can be used instead of ld.s to avoid basic block splitting. */
6987 if (!mflag_control_ldc)
6988 load_no += SPEC_GEN_S;
6989 else
6991 gcc_assert (mflag_sched_ldc);
6992 load_no += SPEC_GEN_SA_FOR_S;
6995 else
6996 gcc_unreachable ();
6998 /* Set the desired check index. We add '1', because zero element in this
6999 array means, that instruction with such uid is non-speculative. */
7000 spec_check_no[INSN_UID (insn)] = load_no + shift + 1;
7002 if (!gen_p)
7003 return 0;
7005 new_pat = gen_load[load_no] (copy_rtx (recog_data.operand[0]),
7006 copy_rtx (recog_data.operand[1]));
7008 pat = PATTERN (insn);
7009 if (GET_CODE (pat) == COND_EXEC)
7010 new_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx
7011 (COND_EXEC_TEST (pat)), new_pat);
7013 return new_pat;
7016 /* Offset to branchy checks. */
7017 enum { SPEC_GEN_CHECK_MUTATION_OFFSET = 5 * SPEC_N };
7019 /* Return nonzero, if INSN needs branchy recovery check. */
7020 static bool
7021 ia64_needs_block_p (rtx insn)
7023 int check_no;
7025 check_no = spec_check_no[INSN_UID(insn)] - 1;
7026 gcc_assert (0 <= check_no && check_no < SPEC_GEN_CHECK_MUTATION_OFFSET);
7028 return ((SPEC_GEN_S <= check_no && check_no < SPEC_GEN_S + SPEC_N)
7029 || (4 * SPEC_N <= check_no && check_no < 4 * SPEC_N + SPEC_N));
7032 /* Generate (or regenerate, if (MUTATE_P)) recovery check for INSN.
7033 If (LABEL != 0 || MUTATE_P), generate branchy recovery check.
7034 Otherwise, generate a simple check. */
7035 static rtx
7036 ia64_gen_check (rtx insn, rtx label, bool mutate_p)
7038 rtx op1, pat, check_pat;
7040 static rtx (* const gen_check[]) (rtx, rtx) = {
7041 gen_movbi_clr,
7042 gen_movqi_clr,
7043 gen_movhi_clr,
7044 gen_movsi_clr,
7045 gen_movdi_clr,
7046 gen_movsf_clr,
7047 gen_movdf_clr,
7048 gen_movxf_clr,
7049 gen_movti_clr,
7050 gen_zero_extendqidi2_clr,
7051 gen_zero_extendhidi2_clr,
7052 gen_zero_extendsidi2_clr,
7054 gen_speculation_check_bi,
7055 gen_speculation_check_qi,
7056 gen_speculation_check_hi,
7057 gen_speculation_check_si,
7058 gen_speculation_check_di,
7059 gen_speculation_check_sf,
7060 gen_speculation_check_df,
7061 gen_speculation_check_xf,
7062 gen_speculation_check_ti,
7063 gen_speculation_check_di,
7064 gen_speculation_check_di,
7065 gen_speculation_check_di,
7067 gen_movbi_clr,
7068 gen_movqi_clr,
7069 gen_movhi_clr,
7070 gen_movsi_clr,
7071 gen_movdi_clr,
7072 gen_movsf_clr,
7073 gen_movdf_clr,
7074 gen_movxf_clr,
7075 gen_movti_clr,
7076 gen_zero_extendqidi2_clr,
7077 gen_zero_extendhidi2_clr,
7078 gen_zero_extendsidi2_clr,
7080 gen_movbi_clr,
7081 gen_movqi_clr,
7082 gen_movhi_clr,
7083 gen_movsi_clr,
7084 gen_movdi_clr,
7085 gen_movsf_clr,
7086 gen_movdf_clr,
7087 gen_movxf_clr,
7088 gen_movti_clr,
7089 gen_zero_extendqidi2_clr,
7090 gen_zero_extendhidi2_clr,
7091 gen_zero_extendsidi2_clr,
7093 gen_advanced_load_check_clr_bi,
7094 gen_advanced_load_check_clr_qi,
7095 gen_advanced_load_check_clr_hi,
7096 gen_advanced_load_check_clr_si,
7097 gen_advanced_load_check_clr_di,
7098 gen_advanced_load_check_clr_sf,
7099 gen_advanced_load_check_clr_df,
7100 gen_advanced_load_check_clr_xf,
7101 gen_advanced_load_check_clr_ti,
7102 gen_advanced_load_check_clr_di,
7103 gen_advanced_load_check_clr_di,
7104 gen_advanced_load_check_clr_di,
7106 /* Following checks are generated during mutation. */
7107 gen_advanced_load_check_clr_bi,
7108 gen_advanced_load_check_clr_qi,
7109 gen_advanced_load_check_clr_hi,
7110 gen_advanced_load_check_clr_si,
7111 gen_advanced_load_check_clr_di,
7112 gen_advanced_load_check_clr_sf,
7113 gen_advanced_load_check_clr_df,
7114 gen_advanced_load_check_clr_xf,
7115 gen_advanced_load_check_clr_ti,
7116 gen_advanced_load_check_clr_di,
7117 gen_advanced_load_check_clr_di,
7118 gen_advanced_load_check_clr_di,
7120 0,0,0,0,0,0,0,0,0,0,0,0,
7122 gen_advanced_load_check_clr_bi,
7123 gen_advanced_load_check_clr_qi,
7124 gen_advanced_load_check_clr_hi,
7125 gen_advanced_load_check_clr_si,
7126 gen_advanced_load_check_clr_di,
7127 gen_advanced_load_check_clr_sf,
7128 gen_advanced_load_check_clr_df,
7129 gen_advanced_load_check_clr_xf,
7130 gen_advanced_load_check_clr_ti,
7131 gen_advanced_load_check_clr_di,
7132 gen_advanced_load_check_clr_di,
7133 gen_advanced_load_check_clr_di,
7135 gen_speculation_check_bi,
7136 gen_speculation_check_qi,
7137 gen_speculation_check_hi,
7138 gen_speculation_check_si,
7139 gen_speculation_check_di,
7140 gen_speculation_check_sf,
7141 gen_speculation_check_df,
7142 gen_speculation_check_xf,
7143 gen_speculation_check_ti,
7144 gen_speculation_check_di,
7145 gen_speculation_check_di,
7146 gen_speculation_check_di
7149 extract_insn_cached (insn);
7151 if (label)
7153 gcc_assert (mutate_p || ia64_needs_block_p (insn));
7154 op1 = label;
7156 else
7158 gcc_assert (!mutate_p && !ia64_needs_block_p (insn));
7159 op1 = copy_rtx (recog_data.operand[1]);
7162 if (mutate_p)
7163 /* INSN is ld.c.
7164 Find the speculation check number by searching for original
7165 speculative load in the RESOLVED_DEPS list of INSN.
7166 As long as patterns are unique for each instruction, this can be
7167 accomplished by matching ORIG_PAT fields. */
7169 dep_link_t link;
7170 int check_no = 0;
7171 rtx orig_pat = ORIG_PAT (insn);
7173 FOR_EACH_DEP_LINK (link, INSN_RESOLVED_BACK_DEPS (insn))
7175 rtx x = DEP_LINK_PRO (link);
7177 if (ORIG_PAT (x) == orig_pat)
7178 check_no = spec_check_no[INSN_UID (x)];
7180 gcc_assert (check_no);
7182 spec_check_no[INSN_UID (insn)] = (check_no
7183 + SPEC_GEN_CHECK_MUTATION_OFFSET);
7186 check_pat = (gen_check[spec_check_no[INSN_UID (insn)] - 1]
7187 (copy_rtx (recog_data.operand[0]), op1));
7189 pat = PATTERN (insn);
7190 if (GET_CODE (pat) == COND_EXEC)
7191 check_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
7192 check_pat);
7194 return check_pat;
7197 /* Return nonzero, if X is branchy recovery check. */
7198 static int
7199 ia64_spec_check_p (rtx x)
7201 x = PATTERN (x);
7202 if (GET_CODE (x) == COND_EXEC)
7203 x = COND_EXEC_CODE (x);
7204 if (GET_CODE (x) == SET)
7205 return ia64_spec_check_src_p (SET_SRC (x));
7206 return 0;
7209 /* Return nonzero, if SRC belongs to recovery check. */
7210 static int
7211 ia64_spec_check_src_p (rtx src)
7213 if (GET_CODE (src) == IF_THEN_ELSE)
7215 rtx t;
7217 t = XEXP (src, 0);
7218 if (GET_CODE (t) == NE)
7220 t = XEXP (t, 0);
7222 if (GET_CODE (t) == UNSPEC)
7224 int code;
7226 code = XINT (t, 1);
7228 if (code == UNSPEC_CHKACLR
7229 || code == UNSPEC_CHKS
7230 || code == UNSPEC_LDCCLR)
7232 gcc_assert (code != 0);
7233 return code;
7238 return 0;
7242 /* The following page contains abstract data `bundle states' which are
7243 used for bundling insns (inserting nops and template generation). */
7245 /* The following describes state of insn bundling. */
7247 struct bundle_state
7249 /* Unique bundle state number to identify them in the debugging
7250 output */
7251 int unique_num;
7252 rtx insn; /* corresponding insn, NULL for the 1st and the last state */
7253 /* number nops before and after the insn */
7254 short before_nops_num, after_nops_num;
7255 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
7256 insn */
7257 int cost; /* cost of the state in cycles */
7258 int accumulated_insns_num; /* number of all previous insns including
7259 nops. L is considered as 2 insns */
7260 int branch_deviation; /* deviation of previous branches from 3rd slots */
7261 struct bundle_state *next; /* next state with the same insn_num */
7262 struct bundle_state *originator; /* originator (previous insn state) */
7263 /* All bundle states are in the following chain. */
7264 struct bundle_state *allocated_states_chain;
7265 /* The DFA State after issuing the insn and the nops. */
7266 state_t dfa_state;
7269 /* The following is map insn number to the corresponding bundle state. */
7271 static struct bundle_state **index_to_bundle_states;
7273 /* The unique number of next bundle state. */
7275 static int bundle_states_num;
7277 /* All allocated bundle states are in the following chain. */
7279 static struct bundle_state *allocated_bundle_states_chain;
7281 /* All allocated but not used bundle states are in the following
7282 chain. */
7284 static struct bundle_state *free_bundle_state_chain;
7287 /* The following function returns a free bundle state. */
7289 static struct bundle_state *
7290 get_free_bundle_state (void)
7292 struct bundle_state *result;
7294 if (free_bundle_state_chain != NULL)
7296 result = free_bundle_state_chain;
7297 free_bundle_state_chain = result->next;
7299 else
7301 result = xmalloc (sizeof (struct bundle_state));
7302 result->dfa_state = xmalloc (dfa_state_size);
7303 result->allocated_states_chain = allocated_bundle_states_chain;
7304 allocated_bundle_states_chain = result;
7306 result->unique_num = bundle_states_num++;
7307 return result;
7311 /* The following function frees given bundle state. */
7313 static void
7314 free_bundle_state (struct bundle_state *state)
7316 state->next = free_bundle_state_chain;
7317 free_bundle_state_chain = state;
7320 /* Start work with abstract data `bundle states'. */
7322 static void
7323 initiate_bundle_states (void)
7325 bundle_states_num = 0;
7326 free_bundle_state_chain = NULL;
7327 allocated_bundle_states_chain = NULL;
7330 /* Finish work with abstract data `bundle states'. */
7332 static void
7333 finish_bundle_states (void)
7335 struct bundle_state *curr_state, *next_state;
7337 for (curr_state = allocated_bundle_states_chain;
7338 curr_state != NULL;
7339 curr_state = next_state)
7341 next_state = curr_state->allocated_states_chain;
7342 free (curr_state->dfa_state);
7343 free (curr_state);
7347 /* Hash table of the bundle states. The key is dfa_state and insn_num
7348 of the bundle states. */
7350 static htab_t bundle_state_table;
7352 /* The function returns hash of BUNDLE_STATE. */
7354 static unsigned
7355 bundle_state_hash (const void *bundle_state)
7357 const struct bundle_state *state = (struct bundle_state *) bundle_state;
7358 unsigned result, i;
7360 for (result = i = 0; i < dfa_state_size; i++)
7361 result += (((unsigned char *) state->dfa_state) [i]
7362 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
7363 return result + state->insn_num;
7366 /* The function returns nonzero if the bundle state keys are equal. */
7368 static int
7369 bundle_state_eq_p (const void *bundle_state_1, const void *bundle_state_2)
7371 const struct bundle_state * state1 = (struct bundle_state *) bundle_state_1;
7372 const struct bundle_state * state2 = (struct bundle_state *) bundle_state_2;
7374 return (state1->insn_num == state2->insn_num
7375 && memcmp (state1->dfa_state, state2->dfa_state,
7376 dfa_state_size) == 0);
7379 /* The function inserts the BUNDLE_STATE into the hash table. The
7380 function returns nonzero if the bundle has been inserted into the
7381 table. The table contains the best bundle state with given key. */
7383 static int
7384 insert_bundle_state (struct bundle_state *bundle_state)
7386 void **entry_ptr;
7388 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, 1);
7389 if (*entry_ptr == NULL)
7391 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
7392 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
7393 *entry_ptr = (void *) bundle_state;
7394 return TRUE;
7396 else if (bundle_state->cost < ((struct bundle_state *) *entry_ptr)->cost
7397 || (bundle_state->cost == ((struct bundle_state *) *entry_ptr)->cost
7398 && (((struct bundle_state *)*entry_ptr)->accumulated_insns_num
7399 > bundle_state->accumulated_insns_num
7400 || (((struct bundle_state *)
7401 *entry_ptr)->accumulated_insns_num
7402 == bundle_state->accumulated_insns_num
7403 && ((struct bundle_state *)
7404 *entry_ptr)->branch_deviation
7405 > bundle_state->branch_deviation))))
7408 struct bundle_state temp;
7410 temp = *(struct bundle_state *) *entry_ptr;
7411 *(struct bundle_state *) *entry_ptr = *bundle_state;
7412 ((struct bundle_state *) *entry_ptr)->next = temp.next;
7413 *bundle_state = temp;
7415 return FALSE;
7418 /* Start work with the hash table. */
7420 static void
7421 initiate_bundle_state_table (void)
7423 bundle_state_table = htab_create (50, bundle_state_hash, bundle_state_eq_p,
7424 (htab_del) 0);
7427 /* Finish work with the hash table. */
7429 static void
7430 finish_bundle_state_table (void)
7432 htab_delete (bundle_state_table);
7437 /* The following variable is a insn `nop' used to check bundle states
7438 with different number of inserted nops. */
7440 static rtx ia64_nop;
7442 /* The following function tries to issue NOPS_NUM nops for the current
7443 state without advancing processor cycle. If it failed, the
7444 function returns FALSE and frees the current state. */
7446 static int
7447 try_issue_nops (struct bundle_state *curr_state, int nops_num)
7449 int i;
7451 for (i = 0; i < nops_num; i++)
7452 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
7454 free_bundle_state (curr_state);
7455 return FALSE;
7457 return TRUE;
7460 /* The following function tries to issue INSN for the current
7461 state without advancing processor cycle. If it failed, the
7462 function returns FALSE and frees the current state. */
7464 static int
7465 try_issue_insn (struct bundle_state *curr_state, rtx insn)
7467 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
7469 free_bundle_state (curr_state);
7470 return FALSE;
7472 return TRUE;
7475 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
7476 starting with ORIGINATOR without advancing processor cycle. If
7477 TRY_BUNDLE_END_P is TRUE, the function also/only (if
7478 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
7479 If it was successful, the function creates new bundle state and
7480 insert into the hash table and into `index_to_bundle_states'. */
7482 static void
7483 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
7484 rtx insn, int try_bundle_end_p, int only_bundle_end_p)
7486 struct bundle_state *curr_state;
7488 curr_state = get_free_bundle_state ();
7489 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
7490 curr_state->insn = insn;
7491 curr_state->insn_num = originator->insn_num + 1;
7492 curr_state->cost = originator->cost;
7493 curr_state->originator = originator;
7494 curr_state->before_nops_num = before_nops_num;
7495 curr_state->after_nops_num = 0;
7496 curr_state->accumulated_insns_num
7497 = originator->accumulated_insns_num + before_nops_num;
7498 curr_state->branch_deviation = originator->branch_deviation;
7499 gcc_assert (insn);
7500 if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
7502 gcc_assert (GET_MODE (insn) != TImode);
7503 if (!try_issue_nops (curr_state, before_nops_num))
7504 return;
7505 if (!try_issue_insn (curr_state, insn))
7506 return;
7507 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
7508 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
7509 && curr_state->accumulated_insns_num % 3 != 0)
7511 free_bundle_state (curr_state);
7512 return;
7515 else if (GET_MODE (insn) != TImode)
7517 if (!try_issue_nops (curr_state, before_nops_num))
7518 return;
7519 if (!try_issue_insn (curr_state, insn))
7520 return;
7521 curr_state->accumulated_insns_num++;
7522 gcc_assert (GET_CODE (PATTERN (insn)) != ASM_INPUT
7523 && asm_noperands (PATTERN (insn)) < 0);
7525 if (ia64_safe_type (insn) == TYPE_L)
7526 curr_state->accumulated_insns_num++;
7528 else
7530 /* If this is an insn that must be first in a group, then don't allow
7531 nops to be emitted before it. Currently, alloc is the only such
7532 supported instruction. */
7533 /* ??? The bundling automatons should handle this for us, but they do
7534 not yet have support for the first_insn attribute. */
7535 if (before_nops_num > 0 && get_attr_first_insn (insn) == FIRST_INSN_YES)
7537 free_bundle_state (curr_state);
7538 return;
7541 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
7542 state_transition (curr_state->dfa_state, NULL);
7543 curr_state->cost++;
7544 if (!try_issue_nops (curr_state, before_nops_num))
7545 return;
7546 if (!try_issue_insn (curr_state, insn))
7547 return;
7548 curr_state->accumulated_insns_num++;
7549 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
7550 || asm_noperands (PATTERN (insn)) >= 0)
7552 /* Finish bundle containing asm insn. */
7553 curr_state->after_nops_num
7554 = 3 - curr_state->accumulated_insns_num % 3;
7555 curr_state->accumulated_insns_num
7556 += 3 - curr_state->accumulated_insns_num % 3;
7558 else if (ia64_safe_type (insn) == TYPE_L)
7559 curr_state->accumulated_insns_num++;
7561 if (ia64_safe_type (insn) == TYPE_B)
7562 curr_state->branch_deviation
7563 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
7564 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
7566 if (!only_bundle_end_p && insert_bundle_state (curr_state))
7568 state_t dfa_state;
7569 struct bundle_state *curr_state1;
7570 struct bundle_state *allocated_states_chain;
7572 curr_state1 = get_free_bundle_state ();
7573 dfa_state = curr_state1->dfa_state;
7574 allocated_states_chain = curr_state1->allocated_states_chain;
7575 *curr_state1 = *curr_state;
7576 curr_state1->dfa_state = dfa_state;
7577 curr_state1->allocated_states_chain = allocated_states_chain;
7578 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
7579 dfa_state_size);
7580 curr_state = curr_state1;
7582 if (!try_issue_nops (curr_state,
7583 3 - curr_state->accumulated_insns_num % 3))
7584 return;
7585 curr_state->after_nops_num
7586 = 3 - curr_state->accumulated_insns_num % 3;
7587 curr_state->accumulated_insns_num
7588 += 3 - curr_state->accumulated_insns_num % 3;
7590 if (!insert_bundle_state (curr_state))
7591 free_bundle_state (curr_state);
7592 return;
7595 /* The following function returns position in the two window bundle
7596 for given STATE. */
7598 static int
7599 get_max_pos (state_t state)
7601 if (cpu_unit_reservation_p (state, pos_6))
7602 return 6;
7603 else if (cpu_unit_reservation_p (state, pos_5))
7604 return 5;
7605 else if (cpu_unit_reservation_p (state, pos_4))
7606 return 4;
7607 else if (cpu_unit_reservation_p (state, pos_3))
7608 return 3;
7609 else if (cpu_unit_reservation_p (state, pos_2))
7610 return 2;
7611 else if (cpu_unit_reservation_p (state, pos_1))
7612 return 1;
7613 else
7614 return 0;
7617 /* The function returns code of a possible template for given position
7618 and state. The function should be called only with 2 values of
7619 position equal to 3 or 6. We avoid generating F NOPs by putting
7620 templates containing F insns at the end of the template search
7621 because undocumented anomaly in McKinley derived cores which can
7622 cause stalls if an F-unit insn (including a NOP) is issued within a
7623 six-cycle window after reading certain application registers (such
7624 as ar.bsp). Furthermore, power-considerations also argue against
7625 the use of F-unit instructions unless they're really needed. */
7627 static int
7628 get_template (state_t state, int pos)
7630 switch (pos)
7632 case 3:
7633 if (cpu_unit_reservation_p (state, _0mmi_))
7634 return 1;
7635 else if (cpu_unit_reservation_p (state, _0mii_))
7636 return 0;
7637 else if (cpu_unit_reservation_p (state, _0mmb_))
7638 return 7;
7639 else if (cpu_unit_reservation_p (state, _0mib_))
7640 return 6;
7641 else if (cpu_unit_reservation_p (state, _0mbb_))
7642 return 5;
7643 else if (cpu_unit_reservation_p (state, _0bbb_))
7644 return 4;
7645 else if (cpu_unit_reservation_p (state, _0mmf_))
7646 return 3;
7647 else if (cpu_unit_reservation_p (state, _0mfi_))
7648 return 2;
7649 else if (cpu_unit_reservation_p (state, _0mfb_))
7650 return 8;
7651 else if (cpu_unit_reservation_p (state, _0mlx_))
7652 return 9;
7653 else
7654 gcc_unreachable ();
7655 case 6:
7656 if (cpu_unit_reservation_p (state, _1mmi_))
7657 return 1;
7658 else if (cpu_unit_reservation_p (state, _1mii_))
7659 return 0;
7660 else if (cpu_unit_reservation_p (state, _1mmb_))
7661 return 7;
7662 else if (cpu_unit_reservation_p (state, _1mib_))
7663 return 6;
7664 else if (cpu_unit_reservation_p (state, _1mbb_))
7665 return 5;
7666 else if (cpu_unit_reservation_p (state, _1bbb_))
7667 return 4;
7668 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
7669 return 3;
7670 else if (cpu_unit_reservation_p (state, _1mfi_))
7671 return 2;
7672 else if (cpu_unit_reservation_p (state, _1mfb_))
7673 return 8;
7674 else if (cpu_unit_reservation_p (state, _1mlx_))
7675 return 9;
7676 else
7677 gcc_unreachable ();
7678 default:
7679 gcc_unreachable ();
7683 /* The following function returns an insn important for insn bundling
7684 followed by INSN and before TAIL. */
7686 static rtx
7687 get_next_important_insn (rtx insn, rtx tail)
7689 for (; insn && insn != tail; insn = NEXT_INSN (insn))
7690 if (INSN_P (insn)
7691 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
7692 && GET_CODE (PATTERN (insn)) != USE
7693 && GET_CODE (PATTERN (insn)) != CLOBBER)
7694 return insn;
7695 return NULL_RTX;
7698 /* Add a bundle selector TEMPLATE0 before INSN. */
7700 static void
7701 ia64_add_bundle_selector_before (int template0, rtx insn)
7703 rtx b = gen_bundle_selector (GEN_INT (template0));
7705 ia64_emit_insn_before (b, insn);
7706 #if NR_BUNDLES == 10
7707 if ((template0 == 4 || template0 == 5)
7708 && (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS)))
7710 int i;
7711 rtx note = NULL_RTX;
7713 /* In .mbb and .bbb bundles, check if CALL_INSN isn't in the
7714 first or second slot. If it is and has REG_EH_NOTE set, copy it
7715 to following nops, as br.call sets rp to the address of following
7716 bundle and therefore an EH region end must be on a bundle
7717 boundary. */
7718 insn = PREV_INSN (insn);
7719 for (i = 0; i < 3; i++)
7722 insn = next_active_insn (insn);
7723 while (GET_CODE (insn) == INSN
7724 && get_attr_empty (insn) == EMPTY_YES);
7725 if (GET_CODE (insn) == CALL_INSN)
7726 note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
7727 else if (note)
7729 int code;
7731 gcc_assert ((code = recog_memoized (insn)) == CODE_FOR_nop
7732 || code == CODE_FOR_nop_b);
7733 if (find_reg_note (insn, REG_EH_REGION, NULL_RTX))
7734 note = NULL_RTX;
7735 else
7736 REG_NOTES (insn)
7737 = gen_rtx_EXPR_LIST (REG_EH_REGION, XEXP (note, 0),
7738 REG_NOTES (insn));
7742 #endif
7745 /* The following function does insn bundling. Bundling means
7746 inserting templates and nop insns to fit insn groups into permitted
7747 templates. Instruction scheduling uses NDFA (non-deterministic
7748 finite automata) encoding informations about the templates and the
7749 inserted nops. Nondeterminism of the automata permits follows
7750 all possible insn sequences very fast.
7752 Unfortunately it is not possible to get information about inserting
7753 nop insns and used templates from the automata states. The
7754 automata only says that we can issue an insn possibly inserting
7755 some nops before it and using some template. Therefore insn
7756 bundling in this function is implemented by using DFA
7757 (deterministic finite automata). We follow all possible insn
7758 sequences by inserting 0-2 nops (that is what the NDFA describe for
7759 insn scheduling) before/after each insn being bundled. We know the
7760 start of simulated processor cycle from insn scheduling (insn
7761 starting a new cycle has TImode).
7763 Simple implementation of insn bundling would create enormous
7764 number of possible insn sequences satisfying information about new
7765 cycle ticks taken from the insn scheduling. To make the algorithm
7766 practical we use dynamic programming. Each decision (about
7767 inserting nops and implicitly about previous decisions) is described
7768 by structure bundle_state (see above). If we generate the same
7769 bundle state (key is automaton state after issuing the insns and
7770 nops for it), we reuse already generated one. As consequence we
7771 reject some decisions which cannot improve the solution and
7772 reduce memory for the algorithm.
7774 When we reach the end of EBB (extended basic block), we choose the
7775 best sequence and then, moving back in EBB, insert templates for
7776 the best alternative. The templates are taken from querying
7777 automaton state for each insn in chosen bundle states.
7779 So the algorithm makes two (forward and backward) passes through
7780 EBB. There is an additional forward pass through EBB for Itanium1
7781 processor. This pass inserts more nops to make dependency between
7782 a producer insn and MMMUL/MMSHF at least 4 cycles long. */
7784 static void
7785 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
7787 struct bundle_state *curr_state, *next_state, *best_state;
7788 rtx insn, next_insn;
7789 int insn_num;
7790 int i, bundle_end_p, only_bundle_end_p, asm_p;
7791 int pos = 0, max_pos, template0, template1;
7792 rtx b;
7793 rtx nop;
7794 enum attr_type type;
7796 insn_num = 0;
7797 /* Count insns in the EBB. */
7798 for (insn = NEXT_INSN (prev_head_insn);
7799 insn && insn != tail;
7800 insn = NEXT_INSN (insn))
7801 if (INSN_P (insn))
7802 insn_num++;
7803 if (insn_num == 0)
7804 return;
7805 bundling_p = 1;
7806 dfa_clean_insn_cache ();
7807 initiate_bundle_state_table ();
7808 index_to_bundle_states = xmalloc ((insn_num + 2)
7809 * sizeof (struct bundle_state *));
7810 /* First (forward) pass -- generation of bundle states. */
7811 curr_state = get_free_bundle_state ();
7812 curr_state->insn = NULL;
7813 curr_state->before_nops_num = 0;
7814 curr_state->after_nops_num = 0;
7815 curr_state->insn_num = 0;
7816 curr_state->cost = 0;
7817 curr_state->accumulated_insns_num = 0;
7818 curr_state->branch_deviation = 0;
7819 curr_state->next = NULL;
7820 curr_state->originator = NULL;
7821 state_reset (curr_state->dfa_state);
7822 index_to_bundle_states [0] = curr_state;
7823 insn_num = 0;
7824 /* Shift cycle mark if it is put on insn which could be ignored. */
7825 for (insn = NEXT_INSN (prev_head_insn);
7826 insn != tail;
7827 insn = NEXT_INSN (insn))
7828 if (INSN_P (insn)
7829 && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
7830 || GET_CODE (PATTERN (insn)) == USE
7831 || GET_CODE (PATTERN (insn)) == CLOBBER)
7832 && GET_MODE (insn) == TImode)
7834 PUT_MODE (insn, VOIDmode);
7835 for (next_insn = NEXT_INSN (insn);
7836 next_insn != tail;
7837 next_insn = NEXT_INSN (next_insn))
7838 if (INSN_P (next_insn)
7839 && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
7840 && GET_CODE (PATTERN (next_insn)) != USE
7841 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
7843 PUT_MODE (next_insn, TImode);
7844 break;
7847 /* Forward pass: generation of bundle states. */
7848 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
7849 insn != NULL_RTX;
7850 insn = next_insn)
7852 gcc_assert (INSN_P (insn)
7853 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
7854 && GET_CODE (PATTERN (insn)) != USE
7855 && GET_CODE (PATTERN (insn)) != CLOBBER);
7856 type = ia64_safe_type (insn);
7857 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
7858 insn_num++;
7859 index_to_bundle_states [insn_num] = NULL;
7860 for (curr_state = index_to_bundle_states [insn_num - 1];
7861 curr_state != NULL;
7862 curr_state = next_state)
7864 pos = curr_state->accumulated_insns_num % 3;
7865 next_state = curr_state->next;
7866 /* We must fill up the current bundle in order to start a
7867 subsequent asm insn in a new bundle. Asm insn is always
7868 placed in a separate bundle. */
7869 only_bundle_end_p
7870 = (next_insn != NULL_RTX
7871 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
7872 && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
7873 /* We may fill up the current bundle if it is the cycle end
7874 without a group barrier. */
7875 bundle_end_p
7876 = (only_bundle_end_p || next_insn == NULL_RTX
7877 || (GET_MODE (next_insn) == TImode
7878 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
7879 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
7880 || type == TYPE_S
7881 /* We need to insert 2 nops for cases like M_MII. To
7882 guarantee issuing all insns on the same cycle for
7883 Itanium 1, we need to issue 2 nops after the first M
7884 insn (MnnMII where n is a nop insn). */
7885 || ((type == TYPE_M || type == TYPE_A)
7886 && ia64_tune == PROCESSOR_ITANIUM
7887 && !bundle_end_p && pos == 1))
7888 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
7889 only_bundle_end_p);
7890 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
7891 only_bundle_end_p);
7892 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
7893 only_bundle_end_p);
7895 gcc_assert (index_to_bundle_states [insn_num]);
7896 for (curr_state = index_to_bundle_states [insn_num];
7897 curr_state != NULL;
7898 curr_state = curr_state->next)
7899 if (verbose >= 2 && dump)
7901 /* This structure is taken from generated code of the
7902 pipeline hazard recognizer (see file insn-attrtab.c).
7903 Please don't forget to change the structure if a new
7904 automaton is added to .md file. */
7905 struct DFA_chip
7907 unsigned short one_automaton_state;
7908 unsigned short oneb_automaton_state;
7909 unsigned short two_automaton_state;
7910 unsigned short twob_automaton_state;
7913 fprintf
7914 (dump,
7915 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
7916 curr_state->unique_num,
7917 (curr_state->originator == NULL
7918 ? -1 : curr_state->originator->unique_num),
7919 curr_state->cost,
7920 curr_state->before_nops_num, curr_state->after_nops_num,
7921 curr_state->accumulated_insns_num, curr_state->branch_deviation,
7922 (ia64_tune == PROCESSOR_ITANIUM
7923 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
7924 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
7925 INSN_UID (insn));
7929 /* We should find a solution because the 2nd insn scheduling has
7930 found one. */
7931 gcc_assert (index_to_bundle_states [insn_num]);
7932 /* Find a state corresponding to the best insn sequence. */
7933 best_state = NULL;
7934 for (curr_state = index_to_bundle_states [insn_num];
7935 curr_state != NULL;
7936 curr_state = curr_state->next)
7937 /* We are just looking at the states with fully filled up last
7938 bundle. The first we prefer insn sequences with minimal cost
7939 then with minimal inserted nops and finally with branch insns
7940 placed in the 3rd slots. */
7941 if (curr_state->accumulated_insns_num % 3 == 0
7942 && (best_state == NULL || best_state->cost > curr_state->cost
7943 || (best_state->cost == curr_state->cost
7944 && (curr_state->accumulated_insns_num
7945 < best_state->accumulated_insns_num
7946 || (curr_state->accumulated_insns_num
7947 == best_state->accumulated_insns_num
7948 && curr_state->branch_deviation
7949 < best_state->branch_deviation)))))
7950 best_state = curr_state;
7951 /* Second (backward) pass: adding nops and templates. */
7952 insn_num = best_state->before_nops_num;
7953 template0 = template1 = -1;
7954 for (curr_state = best_state;
7955 curr_state->originator != NULL;
7956 curr_state = curr_state->originator)
7958 insn = curr_state->insn;
7959 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
7960 || asm_noperands (PATTERN (insn)) >= 0);
7961 insn_num++;
7962 if (verbose >= 2 && dump)
7964 struct DFA_chip
7966 unsigned short one_automaton_state;
7967 unsigned short oneb_automaton_state;
7968 unsigned short two_automaton_state;
7969 unsigned short twob_automaton_state;
7972 fprintf
7973 (dump,
7974 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
7975 curr_state->unique_num,
7976 (curr_state->originator == NULL
7977 ? -1 : curr_state->originator->unique_num),
7978 curr_state->cost,
7979 curr_state->before_nops_num, curr_state->after_nops_num,
7980 curr_state->accumulated_insns_num, curr_state->branch_deviation,
7981 (ia64_tune == PROCESSOR_ITANIUM
7982 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
7983 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
7984 INSN_UID (insn));
7986 /* Find the position in the current bundle window. The window can
7987 contain at most two bundles. Two bundle window means that
7988 the processor will make two bundle rotation. */
7989 max_pos = get_max_pos (curr_state->dfa_state);
7990 if (max_pos == 6
7991 /* The following (negative template number) means that the
7992 processor did one bundle rotation. */
7993 || (max_pos == 3 && template0 < 0))
7995 /* We are at the end of the window -- find template(s) for
7996 its bundle(s). */
7997 pos = max_pos;
7998 if (max_pos == 3)
7999 template0 = get_template (curr_state->dfa_state, 3);
8000 else
8002 template1 = get_template (curr_state->dfa_state, 3);
8003 template0 = get_template (curr_state->dfa_state, 6);
8006 if (max_pos > 3 && template1 < 0)
8007 /* It may happen when we have the stop inside a bundle. */
8009 gcc_assert (pos <= 3);
8010 template1 = get_template (curr_state->dfa_state, 3);
8011 pos += 3;
8013 if (!asm_p)
8014 /* Emit nops after the current insn. */
8015 for (i = 0; i < curr_state->after_nops_num; i++)
8017 nop = gen_nop ();
8018 emit_insn_after (nop, insn);
8019 pos--;
8020 gcc_assert (pos >= 0);
8021 if (pos % 3 == 0)
8023 /* We are at the start of a bundle: emit the template
8024 (it should be defined). */
8025 gcc_assert (template0 >= 0);
8026 ia64_add_bundle_selector_before (template0, nop);
8027 /* If we have two bundle window, we make one bundle
8028 rotation. Otherwise template0 will be undefined
8029 (negative value). */
8030 template0 = template1;
8031 template1 = -1;
8034 /* Move the position backward in the window. Group barrier has
8035 no slot. Asm insn takes all bundle. */
8036 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
8037 && GET_CODE (PATTERN (insn)) != ASM_INPUT
8038 && asm_noperands (PATTERN (insn)) < 0)
8039 pos--;
8040 /* Long insn takes 2 slots. */
8041 if (ia64_safe_type (insn) == TYPE_L)
8042 pos--;
8043 gcc_assert (pos >= 0);
8044 if (pos % 3 == 0
8045 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
8046 && GET_CODE (PATTERN (insn)) != ASM_INPUT
8047 && asm_noperands (PATTERN (insn)) < 0)
8049 /* The current insn is at the bundle start: emit the
8050 template. */
8051 gcc_assert (template0 >= 0);
8052 ia64_add_bundle_selector_before (template0, insn);
8053 b = PREV_INSN (insn);
8054 insn = b;
8055 /* See comment above in analogous place for emitting nops
8056 after the insn. */
8057 template0 = template1;
8058 template1 = -1;
8060 /* Emit nops after the current insn. */
8061 for (i = 0; i < curr_state->before_nops_num; i++)
8063 nop = gen_nop ();
8064 ia64_emit_insn_before (nop, insn);
8065 nop = PREV_INSN (insn);
8066 insn = nop;
8067 pos--;
8068 gcc_assert (pos >= 0);
8069 if (pos % 3 == 0)
8071 /* See comment above in analogous place for emitting nops
8072 after the insn. */
8073 gcc_assert (template0 >= 0);
8074 ia64_add_bundle_selector_before (template0, insn);
8075 b = PREV_INSN (insn);
8076 insn = b;
8077 template0 = template1;
8078 template1 = -1;
8082 if (ia64_tune == PROCESSOR_ITANIUM)
8083 /* Insert additional cycles for MM-insns (MMMUL and MMSHF).
8084 Itanium1 has a strange design, if the distance between an insn
8085 and dependent MM-insn is less 4 then we have a 6 additional
8086 cycles stall. So we make the distance equal to 4 cycles if it
8087 is less. */
8088 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
8089 insn != NULL_RTX;
8090 insn = next_insn)
8092 gcc_assert (INSN_P (insn)
8093 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
8094 && GET_CODE (PATTERN (insn)) != USE
8095 && GET_CODE (PATTERN (insn)) != CLOBBER);
8096 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
8097 if (INSN_UID (insn) < clocks_length && add_cycles [INSN_UID (insn)])
8098 /* We found a MM-insn which needs additional cycles. */
8100 rtx last;
8101 int i, j, n;
8102 int pred_stop_p;
8104 /* Now we are searching for a template of the bundle in
8105 which the MM-insn is placed and the position of the
8106 insn in the bundle (0, 1, 2). Also we are searching
8107 for that there is a stop before the insn. */
8108 last = prev_active_insn (insn);
8109 pred_stop_p = recog_memoized (last) == CODE_FOR_insn_group_barrier;
8110 if (pred_stop_p)
8111 last = prev_active_insn (last);
8112 n = 0;
8113 for (;; last = prev_active_insn (last))
8114 if (recog_memoized (last) == CODE_FOR_bundle_selector)
8116 template0 = XINT (XVECEXP (PATTERN (last), 0, 0), 0);
8117 if (template0 == 9)
8118 /* The insn is in MLX bundle. Change the template
8119 onto MFI because we will add nops before the
8120 insn. It simplifies subsequent code a lot. */
8121 PATTERN (last)
8122 = gen_bundle_selector (const2_rtx); /* -> MFI */
8123 break;
8125 else if (recog_memoized (last) != CODE_FOR_insn_group_barrier
8126 && (ia64_safe_itanium_class (last)
8127 != ITANIUM_CLASS_IGNORE))
8128 n++;
8129 /* Some check of correctness: the stop is not at the
8130 bundle start, there are no more 3 insns in the bundle,
8131 and the MM-insn is not at the start of bundle with
8132 template MLX. */
8133 gcc_assert ((!pred_stop_p || n)
8134 && n <= 2
8135 && (template0 != 9 || !n));
8136 /* Put nops after the insn in the bundle. */
8137 for (j = 3 - n; j > 0; j --)
8138 ia64_emit_insn_before (gen_nop (), insn);
8139 /* It takes into account that we will add more N nops
8140 before the insn lately -- please see code below. */
8141 add_cycles [INSN_UID (insn)]--;
8142 if (!pred_stop_p || add_cycles [INSN_UID (insn)])
8143 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8144 insn);
8145 if (pred_stop_p)
8146 add_cycles [INSN_UID (insn)]--;
8147 for (i = add_cycles [INSN_UID (insn)]; i > 0; i--)
8149 /* Insert "MII;" template. */
8150 ia64_emit_insn_before (gen_bundle_selector (const0_rtx),
8151 insn);
8152 ia64_emit_insn_before (gen_nop (), insn);
8153 ia64_emit_insn_before (gen_nop (), insn);
8154 if (i > 1)
8156 /* To decrease code size, we use "MI;I;"
8157 template. */
8158 ia64_emit_insn_before
8159 (gen_insn_group_barrier (GEN_INT (3)), insn);
8160 i--;
8162 ia64_emit_insn_before (gen_nop (), insn);
8163 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8164 insn);
8166 /* Put the MM-insn in the same slot of a bundle with the
8167 same template as the original one. */
8168 ia64_add_bundle_selector_before (template0, insn);
8169 /* To put the insn in the same slot, add necessary number
8170 of nops. */
8171 for (j = n; j > 0; j --)
8172 ia64_emit_insn_before (gen_nop (), insn);
8173 /* Put the stop if the original bundle had it. */
8174 if (pred_stop_p)
8175 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8176 insn);
8179 free (index_to_bundle_states);
8180 finish_bundle_state_table ();
8181 bundling_p = 0;
8182 dfa_clean_insn_cache ();
8185 /* The following function is called at the end of scheduling BB or
8186 EBB. After reload, it inserts stop bits and does insn bundling. */
8188 static void
8189 ia64_sched_finish (FILE *dump, int sched_verbose)
8191 if (sched_verbose)
8192 fprintf (dump, "// Finishing schedule.\n");
8193 if (!reload_completed)
8194 return;
8195 if (reload_completed)
8197 final_emit_insn_group_barriers (dump);
8198 bundling (dump, sched_verbose, current_sched_info->prev_head,
8199 current_sched_info->next_tail);
8200 if (sched_verbose && dump)
8201 fprintf (dump, "// finishing %d-%d\n",
8202 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
8203 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
8205 return;
8209 /* The following function inserts stop bits in scheduled BB or EBB. */
8211 static void
8212 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
8214 rtx insn;
8215 int need_barrier_p = 0;
8216 rtx prev_insn = NULL_RTX;
8218 init_insn_group_barriers ();
8220 for (insn = NEXT_INSN (current_sched_info->prev_head);
8221 insn != current_sched_info->next_tail;
8222 insn = NEXT_INSN (insn))
8224 if (GET_CODE (insn) == BARRIER)
8226 rtx last = prev_active_insn (insn);
8228 if (! last)
8229 continue;
8230 if (GET_CODE (last) == JUMP_INSN
8231 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
8232 last = prev_active_insn (last);
8233 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
8234 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
8236 init_insn_group_barriers ();
8237 need_barrier_p = 0;
8238 prev_insn = NULL_RTX;
8240 else if (INSN_P (insn))
8242 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
8244 init_insn_group_barriers ();
8245 need_barrier_p = 0;
8246 prev_insn = NULL_RTX;
8248 else if (need_barrier_p || group_barrier_needed (insn))
8250 if (TARGET_EARLY_STOP_BITS)
8252 rtx last;
8254 for (last = insn;
8255 last != current_sched_info->prev_head;
8256 last = PREV_INSN (last))
8257 if (INSN_P (last) && GET_MODE (last) == TImode
8258 && stops_p [INSN_UID (last)])
8259 break;
8260 if (last == current_sched_info->prev_head)
8261 last = insn;
8262 last = prev_active_insn (last);
8263 if (last
8264 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
8265 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
8266 last);
8267 init_insn_group_barriers ();
8268 for (last = NEXT_INSN (last);
8269 last != insn;
8270 last = NEXT_INSN (last))
8271 if (INSN_P (last))
8272 group_barrier_needed (last);
8274 else
8276 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8277 insn);
8278 init_insn_group_barriers ();
8280 group_barrier_needed (insn);
8281 prev_insn = NULL_RTX;
8283 else if (recog_memoized (insn) >= 0)
8284 prev_insn = insn;
8285 need_barrier_p = (GET_CODE (insn) == CALL_INSN
8286 || GET_CODE (PATTERN (insn)) == ASM_INPUT
8287 || asm_noperands (PATTERN (insn)) >= 0);
8294 /* If the following function returns TRUE, we will use the DFA
8295 insn scheduler. */
8297 static int
8298 ia64_first_cycle_multipass_dfa_lookahead (void)
8300 return (reload_completed ? 6 : 4);
8303 /* The following function initiates variable `dfa_pre_cycle_insn'. */
8305 static void
8306 ia64_init_dfa_pre_cycle_insn (void)
8308 if (temp_dfa_state == NULL)
8310 dfa_state_size = state_size ();
8311 temp_dfa_state = xmalloc (dfa_state_size);
8312 prev_cycle_state = xmalloc (dfa_state_size);
8314 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
8315 PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
8316 recog_memoized (dfa_pre_cycle_insn);
8317 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
8318 PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX;
8319 recog_memoized (dfa_stop_insn);
8322 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
8323 used by the DFA insn scheduler. */
8325 static rtx
8326 ia64_dfa_pre_cycle_insn (void)
8328 return dfa_pre_cycle_insn;
8331 /* The following function returns TRUE if PRODUCER (of type ilog or
8332 ld) produces address for CONSUMER (of type st or stf). */
8335 ia64_st_address_bypass_p (rtx producer, rtx consumer)
8337 rtx dest, reg, mem;
8339 gcc_assert (producer && consumer);
8340 dest = ia64_single_set (producer);
8341 gcc_assert (dest);
8342 reg = SET_DEST (dest);
8343 gcc_assert (reg);
8344 if (GET_CODE (reg) == SUBREG)
8345 reg = SUBREG_REG (reg);
8346 gcc_assert (GET_CODE (reg) == REG);
8348 dest = ia64_single_set (consumer);
8349 gcc_assert (dest);
8350 mem = SET_DEST (dest);
8351 gcc_assert (mem && GET_CODE (mem) == MEM);
8352 return reg_mentioned_p (reg, mem);
8355 /* The following function returns TRUE if PRODUCER (of type ilog or
8356 ld) produces address for CONSUMER (of type ld or fld). */
8359 ia64_ld_address_bypass_p (rtx producer, rtx consumer)
8361 rtx dest, src, reg, mem;
8363 gcc_assert (producer && consumer);
8364 dest = ia64_single_set (producer);
8365 gcc_assert (dest);
8366 reg = SET_DEST (dest);
8367 gcc_assert (reg);
8368 if (GET_CODE (reg) == SUBREG)
8369 reg = SUBREG_REG (reg);
8370 gcc_assert (GET_CODE (reg) == REG);
8372 src = ia64_single_set (consumer);
8373 gcc_assert (src);
8374 mem = SET_SRC (src);
8375 gcc_assert (mem);
8377 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
8378 mem = XVECEXP (mem, 0, 0);
8379 else if (GET_CODE (mem) == IF_THEN_ELSE)
8380 /* ??? Is this bypass necessary for ld.c? */
8382 gcc_assert (XINT (XEXP (XEXP (mem, 0), 0), 1) == UNSPEC_LDCCLR);
8383 mem = XEXP (mem, 1);
8386 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
8387 mem = XEXP (mem, 0);
8389 if (GET_CODE (mem) == UNSPEC)
8391 int c = XINT (mem, 1);
8393 gcc_assert (c == UNSPEC_LDA || c == UNSPEC_LDS || c == UNSPEC_LDSA);
8394 mem = XVECEXP (mem, 0, 0);
8397 /* Note that LO_SUM is used for GOT loads. */
8398 gcc_assert (GET_CODE (mem) == LO_SUM || GET_CODE (mem) == MEM);
8400 return reg_mentioned_p (reg, mem);
8403 /* The following function returns TRUE if INSN produces address for a
8404 load/store insn. We will place such insns into M slot because it
8405 decreases its latency time. */
8408 ia64_produce_address_p (rtx insn)
8410 return insn->call;
8414 /* Emit pseudo-ops for the assembler to describe predicate relations.
8415 At present this assumes that we only consider predicate pairs to
8416 be mutex, and that the assembler can deduce proper values from
8417 straight-line code. */
8419 static void
8420 emit_predicate_relation_info (void)
8422 basic_block bb;
8424 FOR_EACH_BB_REVERSE (bb)
8426 int r;
8427 rtx head = BB_HEAD (bb);
8429 /* We only need such notes at code labels. */
8430 if (GET_CODE (head) != CODE_LABEL)
8431 continue;
8432 if (GET_CODE (NEXT_INSN (head)) == NOTE
8433 && NOTE_LINE_NUMBER (NEXT_INSN (head)) == NOTE_INSN_BASIC_BLOCK)
8434 head = NEXT_INSN (head);
8436 /* Skip p0, which may be thought to be live due to (reg:DI p0)
8437 grabbing the entire block of predicate registers. */
8438 for (r = PR_REG (2); r < PR_REG (64); r += 2)
8439 if (REGNO_REG_SET_P (bb->il.rtl->global_live_at_start, r))
8441 rtx p = gen_rtx_REG (BImode, r);
8442 rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
8443 if (head == BB_END (bb))
8444 BB_END (bb) = n;
8445 head = n;
8449 /* Look for conditional calls that do not return, and protect predicate
8450 relations around them. Otherwise the assembler will assume the call
8451 returns, and complain about uses of call-clobbered predicates after
8452 the call. */
8453 FOR_EACH_BB_REVERSE (bb)
8455 rtx insn = BB_HEAD (bb);
8457 while (1)
8459 if (GET_CODE (insn) == CALL_INSN
8460 && GET_CODE (PATTERN (insn)) == COND_EXEC
8461 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
8463 rtx b = emit_insn_before (gen_safe_across_calls_all (), insn);
8464 rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn);
8465 if (BB_HEAD (bb) == insn)
8466 BB_HEAD (bb) = b;
8467 if (BB_END (bb) == insn)
8468 BB_END (bb) = a;
8471 if (insn == BB_END (bb))
8472 break;
8473 insn = NEXT_INSN (insn);
8478 /* Perform machine dependent operations on the rtl chain INSNS. */
8480 static void
8481 ia64_reorg (void)
8483 /* We are freeing block_for_insn in the toplev to keep compatibility
8484 with old MDEP_REORGS that are not CFG based. Recompute it now. */
8485 compute_bb_for_insn ();
8487 /* If optimizing, we'll have split before scheduling. */
8488 if (optimize == 0)
8489 split_all_insns (0);
8491 /* ??? update_life_info_in_dirty_blocks fails to terminate during
8492 non-optimizing bootstrap. */
8493 update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES, PROP_DEATH_NOTES);
8495 if (optimize && ia64_flag_schedule_insns2)
8497 timevar_push (TV_SCHED2);
8498 ia64_final_schedule = 1;
8500 initiate_bundle_states ();
8501 ia64_nop = make_insn_raw (gen_nop ());
8502 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
8503 recog_memoized (ia64_nop);
8504 clocks_length = get_max_uid () + 1;
8505 stops_p = xcalloc (1, clocks_length);
8506 if (ia64_tune == PROCESSOR_ITANIUM)
8508 clocks = xcalloc (clocks_length, sizeof (int));
8509 add_cycles = xcalloc (clocks_length, sizeof (int));
8511 if (ia64_tune == PROCESSOR_ITANIUM2)
8513 pos_1 = get_cpu_unit_code ("2_1");
8514 pos_2 = get_cpu_unit_code ("2_2");
8515 pos_3 = get_cpu_unit_code ("2_3");
8516 pos_4 = get_cpu_unit_code ("2_4");
8517 pos_5 = get_cpu_unit_code ("2_5");
8518 pos_6 = get_cpu_unit_code ("2_6");
8519 _0mii_ = get_cpu_unit_code ("2b_0mii.");
8520 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
8521 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
8522 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
8523 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
8524 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
8525 _0mib_ = get_cpu_unit_code ("2b_0mib.");
8526 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
8527 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
8528 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
8529 _1mii_ = get_cpu_unit_code ("2b_1mii.");
8530 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
8531 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
8532 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
8533 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
8534 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
8535 _1mib_ = get_cpu_unit_code ("2b_1mib.");
8536 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
8537 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
8538 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
8540 else
8542 pos_1 = get_cpu_unit_code ("1_1");
8543 pos_2 = get_cpu_unit_code ("1_2");
8544 pos_3 = get_cpu_unit_code ("1_3");
8545 pos_4 = get_cpu_unit_code ("1_4");
8546 pos_5 = get_cpu_unit_code ("1_5");
8547 pos_6 = get_cpu_unit_code ("1_6");
8548 _0mii_ = get_cpu_unit_code ("1b_0mii.");
8549 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
8550 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
8551 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
8552 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
8553 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
8554 _0mib_ = get_cpu_unit_code ("1b_0mib.");
8555 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
8556 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
8557 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
8558 _1mii_ = get_cpu_unit_code ("1b_1mii.");
8559 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
8560 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
8561 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
8562 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
8563 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
8564 _1mib_ = get_cpu_unit_code ("1b_1mib.");
8565 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
8566 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
8567 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
8569 schedule_ebbs ();
8570 finish_bundle_states ();
8571 if (ia64_tune == PROCESSOR_ITANIUM)
8573 free (add_cycles);
8574 free (clocks);
8576 free (stops_p);
8577 stops_p = NULL;
8578 emit_insn_group_barriers (dump_file);
8580 ia64_final_schedule = 0;
8581 timevar_pop (TV_SCHED2);
8583 else
8584 emit_all_insn_group_barriers (dump_file);
8586 /* A call must not be the last instruction in a function, so that the
8587 return address is still within the function, so that unwinding works
8588 properly. Note that IA-64 differs from dwarf2 on this point. */
8589 if (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
8591 rtx insn;
8592 int saw_stop = 0;
8594 insn = get_last_insn ();
8595 if (! INSN_P (insn))
8596 insn = prev_active_insn (insn);
8597 /* Skip over insns that expand to nothing. */
8598 while (GET_CODE (insn) == INSN && get_attr_empty (insn) == EMPTY_YES)
8600 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
8601 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
8602 saw_stop = 1;
8603 insn = prev_active_insn (insn);
8605 if (GET_CODE (insn) == CALL_INSN)
8607 if (! saw_stop)
8608 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
8609 emit_insn (gen_break_f ());
8610 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
8614 emit_predicate_relation_info ();
8616 if (ia64_flag_var_tracking)
8618 timevar_push (TV_VAR_TRACKING);
8619 variable_tracking_main ();
8620 timevar_pop (TV_VAR_TRACKING);
8624 /* Return true if REGNO is used by the epilogue. */
8627 ia64_epilogue_uses (int regno)
8629 switch (regno)
8631 case R_GR (1):
8632 /* With a call to a function in another module, we will write a new
8633 value to "gp". After returning from such a call, we need to make
8634 sure the function restores the original gp-value, even if the
8635 function itself does not use the gp anymore. */
8636 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
8638 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
8639 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
8640 /* For functions defined with the syscall_linkage attribute, all
8641 input registers are marked as live at all function exits. This
8642 prevents the register allocator from using the input registers,
8643 which in turn makes it possible to restart a system call after
8644 an interrupt without having to save/restore the input registers.
8645 This also prevents kernel data from leaking to application code. */
8646 return lookup_attribute ("syscall_linkage",
8647 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
8649 case R_BR (0):
8650 /* Conditional return patterns can't represent the use of `b0' as
8651 the return address, so we force the value live this way. */
8652 return 1;
8654 case AR_PFS_REGNUM:
8655 /* Likewise for ar.pfs, which is used by br.ret. */
8656 return 1;
8658 default:
8659 return 0;
8663 /* Return true if REGNO is used by the frame unwinder. */
8666 ia64_eh_uses (int regno)
8668 if (! reload_completed)
8669 return 0;
8671 if (current_frame_info.reg_save_b0
8672 && regno == current_frame_info.reg_save_b0)
8673 return 1;
8674 if (current_frame_info.reg_save_pr
8675 && regno == current_frame_info.reg_save_pr)
8676 return 1;
8677 if (current_frame_info.reg_save_ar_pfs
8678 && regno == current_frame_info.reg_save_ar_pfs)
8679 return 1;
8680 if (current_frame_info.reg_save_ar_unat
8681 && regno == current_frame_info.reg_save_ar_unat)
8682 return 1;
8683 if (current_frame_info.reg_save_ar_lc
8684 && regno == current_frame_info.reg_save_ar_lc)
8685 return 1;
8687 return 0;
8690 /* Return true if this goes in small data/bss. */
8692 /* ??? We could also support own long data here. Generating movl/add/ld8
8693 instead of addl,ld8/ld8. This makes the code bigger, but should make the
8694 code faster because there is one less load. This also includes incomplete
8695 types which can't go in sdata/sbss. */
8697 static bool
8698 ia64_in_small_data_p (tree exp)
8700 if (TARGET_NO_SDATA)
8701 return false;
8703 /* We want to merge strings, so we never consider them small data. */
8704 if (TREE_CODE (exp) == STRING_CST)
8705 return false;
8707 /* Functions are never small data. */
8708 if (TREE_CODE (exp) == FUNCTION_DECL)
8709 return false;
8711 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
8713 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
8715 if (strcmp (section, ".sdata") == 0
8716 || strncmp (section, ".sdata.", 7) == 0
8717 || strncmp (section, ".gnu.linkonce.s.", 16) == 0
8718 || strcmp (section, ".sbss") == 0
8719 || strncmp (section, ".sbss.", 6) == 0
8720 || strncmp (section, ".gnu.linkonce.sb.", 17) == 0)
8721 return true;
8723 else
8725 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
8727 /* If this is an incomplete type with size 0, then we can't put it
8728 in sdata because it might be too big when completed. */
8729 if (size > 0 && size <= ia64_section_threshold)
8730 return true;
8733 return false;
8736 /* Output assembly directives for prologue regions. */
8738 /* The current basic block number. */
8740 static bool last_block;
8742 /* True if we need a copy_state command at the start of the next block. */
8744 static bool need_copy_state;
8746 #ifndef MAX_ARTIFICIAL_LABEL_BYTES
8747 # define MAX_ARTIFICIAL_LABEL_BYTES 30
8748 #endif
8750 /* Emit a debugging label after a call-frame-related insn. We'd
8751 rather output the label right away, but we'd have to output it
8752 after, not before, the instruction, and the instruction has not
8753 been output yet. So we emit the label after the insn, delete it to
8754 avoid introducing basic blocks, and mark it as preserved, such that
8755 it is still output, given that it is referenced in debug info. */
8757 static const char *
8758 ia64_emit_deleted_label_after_insn (rtx insn)
8760 char label[MAX_ARTIFICIAL_LABEL_BYTES];
8761 rtx lb = gen_label_rtx ();
8762 rtx label_insn = emit_label_after (lb, insn);
8764 LABEL_PRESERVE_P (lb) = 1;
8766 delete_insn (label_insn);
8768 ASM_GENERATE_INTERNAL_LABEL (label, "L", CODE_LABEL_NUMBER (label_insn));
8770 return xstrdup (label);
8773 /* Define the CFA after INSN with the steady-state definition. */
8775 static void
8776 ia64_dwarf2out_def_steady_cfa (rtx insn)
8778 rtx fp = frame_pointer_needed
8779 ? hard_frame_pointer_rtx
8780 : stack_pointer_rtx;
8782 dwarf2out_def_cfa
8783 (ia64_emit_deleted_label_after_insn (insn),
8784 REGNO (fp),
8785 ia64_initial_elimination_offset
8786 (REGNO (arg_pointer_rtx), REGNO (fp))
8787 + ARG_POINTER_CFA_OFFSET (current_function_decl));
8790 /* The generic dwarf2 frame debug info generator does not define a
8791 separate region for the very end of the epilogue, so refrain from
8792 doing so in the IA64-specific code as well. */
8794 #define IA64_CHANGE_CFA_IN_EPILOGUE 0
8796 /* The function emits unwind directives for the start of an epilogue. */
8798 static void
8799 process_epilogue (FILE *asm_out_file, rtx insn, bool unwind, bool frame)
8801 /* If this isn't the last block of the function, then we need to label the
8802 current state, and copy it back in at the start of the next block. */
8804 if (!last_block)
8806 if (unwind)
8807 fprintf (asm_out_file, "\t.label_state %d\n",
8808 ++cfun->machine->state_num);
8809 need_copy_state = true;
8812 if (unwind)
8813 fprintf (asm_out_file, "\t.restore sp\n");
8814 if (IA64_CHANGE_CFA_IN_EPILOGUE && frame)
8815 dwarf2out_def_cfa (ia64_emit_deleted_label_after_insn (insn),
8816 STACK_POINTER_REGNUM, INCOMING_FRAME_SP_OFFSET);
8819 /* This function processes a SET pattern looking for specific patterns
8820 which result in emitting an assembly directive required for unwinding. */
8822 static int
8823 process_set (FILE *asm_out_file, rtx pat, rtx insn, bool unwind, bool frame)
8825 rtx src = SET_SRC (pat);
8826 rtx dest = SET_DEST (pat);
8827 int src_regno, dest_regno;
8829 /* Look for the ALLOC insn. */
8830 if (GET_CODE (src) == UNSPEC_VOLATILE
8831 && XINT (src, 1) == UNSPECV_ALLOC
8832 && GET_CODE (dest) == REG)
8834 dest_regno = REGNO (dest);
8836 /* If this is the final destination for ar.pfs, then this must
8837 be the alloc in the prologue. */
8838 if (dest_regno == current_frame_info.reg_save_ar_pfs)
8840 if (unwind)
8841 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
8842 ia64_dbx_register_number (dest_regno));
8844 else
8846 /* This must be an alloc before a sibcall. We must drop the
8847 old frame info. The easiest way to drop the old frame
8848 info is to ensure we had a ".restore sp" directive
8849 followed by a new prologue. If the procedure doesn't
8850 have a memory-stack frame, we'll issue a dummy ".restore
8851 sp" now. */
8852 if (current_frame_info.total_size == 0 && !frame_pointer_needed)
8853 /* if haven't done process_epilogue() yet, do it now */
8854 process_epilogue (asm_out_file, insn, unwind, frame);
8855 if (unwind)
8856 fprintf (asm_out_file, "\t.prologue\n");
8858 return 1;
8861 /* Look for SP = .... */
8862 if (GET_CODE (dest) == REG && REGNO (dest) == STACK_POINTER_REGNUM)
8864 if (GET_CODE (src) == PLUS)
8866 rtx op0 = XEXP (src, 0);
8867 rtx op1 = XEXP (src, 1);
8869 gcc_assert (op0 == dest && GET_CODE (op1) == CONST_INT);
8871 if (INTVAL (op1) < 0)
8873 gcc_assert (!frame_pointer_needed);
8874 if (unwind)
8875 fprintf (asm_out_file, "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
8876 -INTVAL (op1));
8877 if (frame)
8878 ia64_dwarf2out_def_steady_cfa (insn);
8880 else
8881 process_epilogue (asm_out_file, insn, unwind, frame);
8883 else
8885 gcc_assert (GET_CODE (src) == REG
8886 && REGNO (src) == HARD_FRAME_POINTER_REGNUM);
8887 process_epilogue (asm_out_file, insn, unwind, frame);
8890 return 1;
8893 /* Register move we need to look at. */
8894 if (GET_CODE (dest) == REG && GET_CODE (src) == REG)
8896 src_regno = REGNO (src);
8897 dest_regno = REGNO (dest);
8899 switch (src_regno)
8901 case BR_REG (0):
8902 /* Saving return address pointer. */
8903 gcc_assert (dest_regno == current_frame_info.reg_save_b0);
8904 if (unwind)
8905 fprintf (asm_out_file, "\t.save rp, r%d\n",
8906 ia64_dbx_register_number (dest_regno));
8907 return 1;
8909 case PR_REG (0):
8910 gcc_assert (dest_regno == current_frame_info.reg_save_pr);
8911 if (unwind)
8912 fprintf (asm_out_file, "\t.save pr, r%d\n",
8913 ia64_dbx_register_number (dest_regno));
8914 return 1;
8916 case AR_UNAT_REGNUM:
8917 gcc_assert (dest_regno == current_frame_info.reg_save_ar_unat);
8918 if (unwind)
8919 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
8920 ia64_dbx_register_number (dest_regno));
8921 return 1;
8923 case AR_LC_REGNUM:
8924 gcc_assert (dest_regno == current_frame_info.reg_save_ar_lc);
8925 if (unwind)
8926 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
8927 ia64_dbx_register_number (dest_regno));
8928 return 1;
8930 case STACK_POINTER_REGNUM:
8931 gcc_assert (dest_regno == HARD_FRAME_POINTER_REGNUM
8932 && frame_pointer_needed);
8933 if (unwind)
8934 fprintf (asm_out_file, "\t.vframe r%d\n",
8935 ia64_dbx_register_number (dest_regno));
8936 if (frame)
8937 ia64_dwarf2out_def_steady_cfa (insn);
8938 return 1;
8940 default:
8941 /* Everything else should indicate being stored to memory. */
8942 gcc_unreachable ();
8946 /* Memory store we need to look at. */
8947 if (GET_CODE (dest) == MEM && GET_CODE (src) == REG)
8949 long off;
8950 rtx base;
8951 const char *saveop;
8953 if (GET_CODE (XEXP (dest, 0)) == REG)
8955 base = XEXP (dest, 0);
8956 off = 0;
8958 else
8960 gcc_assert (GET_CODE (XEXP (dest, 0)) == PLUS
8961 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT);
8962 base = XEXP (XEXP (dest, 0), 0);
8963 off = INTVAL (XEXP (XEXP (dest, 0), 1));
8966 if (base == hard_frame_pointer_rtx)
8968 saveop = ".savepsp";
8969 off = - off;
8971 else
8973 gcc_assert (base == stack_pointer_rtx);
8974 saveop = ".savesp";
8977 src_regno = REGNO (src);
8978 switch (src_regno)
8980 case BR_REG (0):
8981 gcc_assert (!current_frame_info.reg_save_b0);
8982 if (unwind)
8983 fprintf (asm_out_file, "\t%s rp, %ld\n", saveop, off);
8984 return 1;
8986 case PR_REG (0):
8987 gcc_assert (!current_frame_info.reg_save_pr);
8988 if (unwind)
8989 fprintf (asm_out_file, "\t%s pr, %ld\n", saveop, off);
8990 return 1;
8992 case AR_LC_REGNUM:
8993 gcc_assert (!current_frame_info.reg_save_ar_lc);
8994 if (unwind)
8995 fprintf (asm_out_file, "\t%s ar.lc, %ld\n", saveop, off);
8996 return 1;
8998 case AR_PFS_REGNUM:
8999 gcc_assert (!current_frame_info.reg_save_ar_pfs);
9000 if (unwind)
9001 fprintf (asm_out_file, "\t%s ar.pfs, %ld\n", saveop, off);
9002 return 1;
9004 case AR_UNAT_REGNUM:
9005 gcc_assert (!current_frame_info.reg_save_ar_unat);
9006 if (unwind)
9007 fprintf (asm_out_file, "\t%s ar.unat, %ld\n", saveop, off);
9008 return 1;
9010 case GR_REG (4):
9011 case GR_REG (5):
9012 case GR_REG (6):
9013 case GR_REG (7):
9014 if (unwind)
9015 fprintf (asm_out_file, "\t.save.g 0x%x\n",
9016 1 << (src_regno - GR_REG (4)));
9017 return 1;
9019 case BR_REG (1):
9020 case BR_REG (2):
9021 case BR_REG (3):
9022 case BR_REG (4):
9023 case BR_REG (5):
9024 if (unwind)
9025 fprintf (asm_out_file, "\t.save.b 0x%x\n",
9026 1 << (src_regno - BR_REG (1)));
9027 return 1;
9029 case FR_REG (2):
9030 case FR_REG (3):
9031 case FR_REG (4):
9032 case FR_REG (5):
9033 if (unwind)
9034 fprintf (asm_out_file, "\t.save.f 0x%x\n",
9035 1 << (src_regno - FR_REG (2)));
9036 return 1;
9038 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
9039 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
9040 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
9041 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
9042 if (unwind)
9043 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
9044 1 << (src_regno - FR_REG (12)));
9045 return 1;
9047 default:
9048 return 0;
9052 return 0;
9056 /* This function looks at a single insn and emits any directives
9057 required to unwind this insn. */
9058 void
9059 process_for_unwind_directive (FILE *asm_out_file, rtx insn)
9061 bool unwind = (flag_unwind_tables
9062 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS));
9063 bool frame = dwarf2out_do_frame ();
9065 if (unwind || frame)
9067 rtx pat;
9069 if (GET_CODE (insn) == NOTE
9070 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
9072 last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
9074 /* Restore unwind state from immediately before the epilogue. */
9075 if (need_copy_state)
9077 if (unwind)
9079 fprintf (asm_out_file, "\t.body\n");
9080 fprintf (asm_out_file, "\t.copy_state %d\n",
9081 cfun->machine->state_num);
9083 if (IA64_CHANGE_CFA_IN_EPILOGUE && frame)
9084 ia64_dwarf2out_def_steady_cfa (insn);
9085 need_copy_state = false;
9089 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
9090 return;
9092 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
9093 if (pat)
9094 pat = XEXP (pat, 0);
9095 else
9096 pat = PATTERN (insn);
9098 switch (GET_CODE (pat))
9100 case SET:
9101 process_set (asm_out_file, pat, insn, unwind, frame);
9102 break;
9104 case PARALLEL:
9106 int par_index;
9107 int limit = XVECLEN (pat, 0);
9108 for (par_index = 0; par_index < limit; par_index++)
9110 rtx x = XVECEXP (pat, 0, par_index);
9111 if (GET_CODE (x) == SET)
9112 process_set (asm_out_file, x, insn, unwind, frame);
9114 break;
9117 default:
9118 gcc_unreachable ();
9124 enum ia64_builtins
9126 IA64_BUILTIN_BSP,
9127 IA64_BUILTIN_FLUSHRS
9130 void
9131 ia64_init_builtins (void)
9133 tree fpreg_type;
9134 tree float80_type;
9136 /* The __fpreg type. */
9137 fpreg_type = make_node (REAL_TYPE);
9138 TYPE_PRECISION (fpreg_type) = 82;
9139 layout_type (fpreg_type);
9140 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
9142 /* The __float80 type. */
9143 float80_type = make_node (REAL_TYPE);
9144 TYPE_PRECISION (float80_type) = 80;
9145 layout_type (float80_type);
9146 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
9148 /* The __float128 type. */
9149 if (!TARGET_HPUX)
9151 tree float128_type = make_node (REAL_TYPE);
9152 TYPE_PRECISION (float128_type) = 128;
9153 layout_type (float128_type);
9154 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
9156 else
9157 /* Under HPUX, this is a synonym for "long double". */
9158 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
9159 "__float128");
9161 #define def_builtin(name, type, code) \
9162 add_builtin_function ((name), (type), (code), BUILT_IN_MD, \
9163 NULL, NULL_TREE)
9165 def_builtin ("__builtin_ia64_bsp",
9166 build_function_type (ptr_type_node, void_list_node),
9167 IA64_BUILTIN_BSP);
9169 def_builtin ("__builtin_ia64_flushrs",
9170 build_function_type (void_type_node, void_list_node),
9171 IA64_BUILTIN_FLUSHRS);
9173 #undef def_builtin
9175 if (TARGET_HPUX)
9177 if (built_in_decls [BUILT_IN_FINITE])
9178 set_user_assembler_name (built_in_decls [BUILT_IN_FINITE],
9179 "_Isfinite");
9180 if (built_in_decls [BUILT_IN_FINITEF])
9181 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEF],
9182 "_Isfinitef");
9183 if (built_in_decls [BUILT_IN_FINITEL])
9184 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEL],
9185 "_Isfinitef128");
9190 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9191 enum machine_mode mode ATTRIBUTE_UNUSED,
9192 int ignore ATTRIBUTE_UNUSED)
9194 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
9195 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
9197 switch (fcode)
9199 case IA64_BUILTIN_BSP:
9200 if (! target || ! register_operand (target, DImode))
9201 target = gen_reg_rtx (DImode);
9202 emit_insn (gen_bsp_value (target));
9203 #ifdef POINTERS_EXTEND_UNSIGNED
9204 target = convert_memory_address (ptr_mode, target);
9205 #endif
9206 return target;
9208 case IA64_BUILTIN_FLUSHRS:
9209 emit_insn (gen_flushrs ());
9210 return const0_rtx;
9212 default:
9213 break;
9216 return NULL_RTX;
9219 /* For the HP-UX IA64 aggregate parameters are passed stored in the
9220 most significant bits of the stack slot. */
9222 enum direction
9223 ia64_hpux_function_arg_padding (enum machine_mode mode, tree type)
9225 /* Exception to normal case for structures/unions/etc. */
9227 if (type && AGGREGATE_TYPE_P (type)
9228 && int_size_in_bytes (type) < UNITS_PER_WORD)
9229 return upward;
9231 /* Fall back to the default. */
9232 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
9235 /* Emit text to declare externally defined variables and functions, because
9236 the Intel assembler does not support undefined externals. */
9238 void
9239 ia64_asm_output_external (FILE *file, tree decl, const char *name)
9241 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
9242 set in order to avoid putting out names that are never really
9243 used. */
9244 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
9246 /* maybe_assemble_visibility will return 1 if the assembler
9247 visibility directive is output. */
9248 int need_visibility = ((*targetm.binds_local_p) (decl)
9249 && maybe_assemble_visibility (decl));
9251 /* GNU as does not need anything here, but the HP linker does
9252 need something for external functions. */
9253 if ((TARGET_HPUX_LD || !TARGET_GNU_AS)
9254 && TREE_CODE (decl) == FUNCTION_DECL)
9255 (*targetm.asm_out.globalize_decl_name) (file, decl);
9256 else if (need_visibility && !TARGET_GNU_AS)
9257 (*targetm.asm_out.globalize_label) (file, name);
9261 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
9262 modes of word_mode and larger. Rename the TFmode libfuncs using the
9263 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
9264 backward compatibility. */
9266 static void
9267 ia64_init_libfuncs (void)
9269 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
9270 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
9271 set_optab_libfunc (smod_optab, SImode, "__modsi3");
9272 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
9274 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
9275 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
9276 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
9277 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
9278 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
9280 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
9281 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
9282 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
9283 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
9284 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
9285 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
9287 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
9288 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
9289 set_conv_libfunc (sfix_optab, TImode, TFmode, "_U_Qfcnvfxt_quad_to_quad");
9290 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
9291 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
9293 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
9294 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
9295 set_conv_libfunc (sfloat_optab, TFmode, TImode, "_U_Qfcnvxf_quad_to_quad");
9296 /* HP-UX 11.23 libc does not have a function for unsigned
9297 SImode-to-TFmode conversion. */
9298 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxuf_dbl_to_quad");
9301 /* Rename all the TFmode libfuncs using the HPUX conventions. */
9303 static void
9304 ia64_hpux_init_libfuncs (void)
9306 ia64_init_libfuncs ();
9308 /* The HP SI millicode division and mod functions expect DI arguments.
9309 By turning them off completely we avoid using both libgcc and the
9310 non-standard millicode routines and use the HP DI millicode routines
9311 instead. */
9313 set_optab_libfunc (sdiv_optab, SImode, 0);
9314 set_optab_libfunc (udiv_optab, SImode, 0);
9315 set_optab_libfunc (smod_optab, SImode, 0);
9316 set_optab_libfunc (umod_optab, SImode, 0);
9318 set_optab_libfunc (sdiv_optab, DImode, "__milli_divI");
9319 set_optab_libfunc (udiv_optab, DImode, "__milli_divU");
9320 set_optab_libfunc (smod_optab, DImode, "__milli_remI");
9321 set_optab_libfunc (umod_optab, DImode, "__milli_remU");
9323 /* HP-UX libc has TF min/max/abs routines in it. */
9324 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
9325 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
9326 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
9328 /* ia64_expand_compare uses this. */
9329 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
9331 /* These should never be used. */
9332 set_optab_libfunc (eq_optab, TFmode, 0);
9333 set_optab_libfunc (ne_optab, TFmode, 0);
9334 set_optab_libfunc (gt_optab, TFmode, 0);
9335 set_optab_libfunc (ge_optab, TFmode, 0);
9336 set_optab_libfunc (lt_optab, TFmode, 0);
9337 set_optab_libfunc (le_optab, TFmode, 0);
9340 /* Rename the division and modulus functions in VMS. */
9342 static void
9343 ia64_vms_init_libfuncs (void)
9345 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
9346 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
9347 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
9348 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
9349 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
9350 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
9351 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
9352 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
9355 /* Rename the TFmode libfuncs available from soft-fp in glibc using
9356 the HPUX conventions. */
9358 static void
9359 ia64_sysv4_init_libfuncs (void)
9361 ia64_init_libfuncs ();
9363 /* These functions are not part of the HPUX TFmode interface. We
9364 use them instead of _U_Qfcmp, which doesn't work the way we
9365 expect. */
9366 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
9367 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
9368 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
9369 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
9370 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
9371 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
9373 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
9374 glibc doesn't have them. */
9377 /* Return the section to use for X. The only special thing we do here
9378 is to honor small data. */
9380 static section *
9381 ia64_select_rtx_section (enum machine_mode mode, rtx x,
9382 unsigned HOST_WIDE_INT align)
9384 if (GET_MODE_SIZE (mode) > 0
9385 && GET_MODE_SIZE (mode) <= ia64_section_threshold
9386 && !TARGET_NO_SDATA)
9387 return sdata_section;
9388 else
9389 return default_elf_select_rtx_section (mode, x, align);
9392 /* It is illegal to have relocations in shared segments on AIX and HPUX.
9393 Pretend flag_pic is always set. */
9395 static section *
9396 ia64_rwreloc_select_section (tree exp, int reloc, unsigned HOST_WIDE_INT align)
9398 return default_elf_select_section_1 (exp, reloc, align, true);
9401 static void
9402 ia64_rwreloc_unique_section (tree decl, int reloc)
9404 default_unique_section_1 (decl, reloc, true);
9407 static section *
9408 ia64_rwreloc_select_rtx_section (enum machine_mode mode, rtx x,
9409 unsigned HOST_WIDE_INT align)
9411 section *sect;
9412 int save_pic = flag_pic;
9413 flag_pic = 1;
9414 sect = ia64_select_rtx_section (mode, x, align);
9415 flag_pic = save_pic;
9416 return sect;
9419 #ifndef TARGET_RWRELOC
9420 #define TARGET_RWRELOC flag_pic
9421 #endif
9423 static unsigned int
9424 ia64_section_type_flags (tree decl, const char *name, int reloc)
9426 unsigned int flags = 0;
9428 if (strcmp (name, ".sdata") == 0
9429 || strncmp (name, ".sdata.", 7) == 0
9430 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9431 || strncmp (name, ".sdata2.", 8) == 0
9432 || strncmp (name, ".gnu.linkonce.s2.", 17) == 0
9433 || strcmp (name, ".sbss") == 0
9434 || strncmp (name, ".sbss.", 6) == 0
9435 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9436 flags = SECTION_SMALL;
9438 flags |= default_section_type_flags_1 (decl, name, reloc, TARGET_RWRELOC);
9439 return flags;
9442 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
9443 structure type and that the address of that type should be passed
9444 in out0, rather than in r8. */
9446 static bool
9447 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
9449 tree ret_type = TREE_TYPE (fntype);
9451 /* The Itanium C++ ABI requires that out0, rather than r8, be used
9452 as the structure return address parameter, if the return value
9453 type has a non-trivial copy constructor or destructor. It is not
9454 clear if this same convention should be used for other
9455 programming languages. Until G++ 3.4, we incorrectly used r8 for
9456 these return values. */
9457 return (abi_version_at_least (2)
9458 && ret_type
9459 && TYPE_MODE (ret_type) == BLKmode
9460 && TREE_ADDRESSABLE (ret_type)
9461 && strcmp (lang_hooks.name, "GNU C++") == 0);
9464 /* Output the assembler code for a thunk function. THUNK_DECL is the
9465 declaration for the thunk function itself, FUNCTION is the decl for
9466 the target function. DELTA is an immediate constant offset to be
9467 added to THIS. If VCALL_OFFSET is nonzero, the word at
9468 *(*this + vcall_offset) should be added to THIS. */
9470 static void
9471 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
9472 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9473 tree function)
9475 rtx this, insn, funexp;
9476 unsigned int this_parmno;
9477 unsigned int this_regno;
9479 reload_completed = 1;
9480 epilogue_completed = 1;
9481 no_new_pseudos = 1;
9482 reset_block_changes ();
9484 /* Set things up as ia64_expand_prologue might. */
9485 last_scratch_gr_reg = 15;
9487 memset (&current_frame_info, 0, sizeof (current_frame_info));
9488 current_frame_info.spill_cfa_off = -16;
9489 current_frame_info.n_input_regs = 1;
9490 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
9492 /* Mark the end of the (empty) prologue. */
9493 emit_note (NOTE_INSN_PROLOGUE_END);
9495 /* Figure out whether "this" will be the first parameter (the
9496 typical case) or the second parameter (as happens when the
9497 virtual function returns certain class objects). */
9498 this_parmno
9499 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
9500 ? 1 : 0);
9501 this_regno = IN_REG (this_parmno);
9502 if (!TARGET_REG_NAMES)
9503 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
9505 this = gen_rtx_REG (Pmode, this_regno);
9506 if (TARGET_ILP32)
9508 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
9509 REG_POINTER (tmp) = 1;
9510 if (delta && CONST_OK_FOR_I (delta))
9512 emit_insn (gen_ptr_extend_plus_imm (this, tmp, GEN_INT (delta)));
9513 delta = 0;
9515 else
9516 emit_insn (gen_ptr_extend (this, tmp));
9519 /* Apply the constant offset, if required. */
9520 if (delta)
9522 rtx delta_rtx = GEN_INT (delta);
9524 if (!CONST_OK_FOR_I (delta))
9526 rtx tmp = gen_rtx_REG (Pmode, 2);
9527 emit_move_insn (tmp, delta_rtx);
9528 delta_rtx = tmp;
9530 emit_insn (gen_adddi3 (this, this, delta_rtx));
9533 /* Apply the offset from the vtable, if required. */
9534 if (vcall_offset)
9536 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
9537 rtx tmp = gen_rtx_REG (Pmode, 2);
9539 if (TARGET_ILP32)
9541 rtx t = gen_rtx_REG (ptr_mode, 2);
9542 REG_POINTER (t) = 1;
9543 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this));
9544 if (CONST_OK_FOR_I (vcall_offset))
9546 emit_insn (gen_ptr_extend_plus_imm (tmp, t,
9547 vcall_offset_rtx));
9548 vcall_offset = 0;
9550 else
9551 emit_insn (gen_ptr_extend (tmp, t));
9553 else
9554 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
9556 if (vcall_offset)
9558 if (!CONST_OK_FOR_J (vcall_offset))
9560 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
9561 emit_move_insn (tmp2, vcall_offset_rtx);
9562 vcall_offset_rtx = tmp2;
9564 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
9567 if (TARGET_ILP32)
9568 emit_move_insn (gen_rtx_REG (ptr_mode, 2),
9569 gen_rtx_MEM (ptr_mode, tmp));
9570 else
9571 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
9573 emit_insn (gen_adddi3 (this, this, tmp));
9576 /* Generate a tail call to the target function. */
9577 if (! TREE_USED (function))
9579 assemble_external (function);
9580 TREE_USED (function) = 1;
9582 funexp = XEXP (DECL_RTL (function), 0);
9583 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
9584 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
9585 insn = get_last_insn ();
9586 SIBLING_CALL_P (insn) = 1;
9588 /* Code generation for calls relies on splitting. */
9589 reload_completed = 1;
9590 epilogue_completed = 1;
9591 try_split (PATTERN (insn), insn, 0);
9593 emit_barrier ();
9595 /* Run just enough of rest_of_compilation to get the insns emitted.
9596 There's not really enough bulk here to make other passes such as
9597 instruction scheduling worth while. Note that use_thunk calls
9598 assemble_start_function and assemble_end_function. */
9600 insn_locators_initialize ();
9601 emit_all_insn_group_barriers (NULL);
9602 insn = get_insns ();
9603 shorten_branches (insn);
9604 final_start_function (insn, file, 1);
9605 final (insn, file, 1);
9606 final_end_function ();
9608 reload_completed = 0;
9609 epilogue_completed = 0;
9610 no_new_pseudos = 0;
9613 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9615 static rtx
9616 ia64_struct_value_rtx (tree fntype,
9617 int incoming ATTRIBUTE_UNUSED)
9619 if (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype))
9620 return NULL_RTX;
9621 return gen_rtx_REG (Pmode, GR_REG (8));
9624 static bool
9625 ia64_scalar_mode_supported_p (enum machine_mode mode)
9627 switch (mode)
9629 case QImode:
9630 case HImode:
9631 case SImode:
9632 case DImode:
9633 case TImode:
9634 return true;
9636 case SFmode:
9637 case DFmode:
9638 case XFmode:
9639 case RFmode:
9640 return true;
9642 case TFmode:
9643 return TARGET_HPUX;
9645 default:
9646 return false;
9650 static bool
9651 ia64_vector_mode_supported_p (enum machine_mode mode)
9653 switch (mode)
9655 case V8QImode:
9656 case V4HImode:
9657 case V2SImode:
9658 return true;
9660 case V2SFmode:
9661 return true;
9663 default:
9664 return false;
9668 /* Implement the FUNCTION_PROFILER macro. */
9670 void
9671 ia64_output_function_profiler (FILE *file, int labelno)
9673 bool indirect_call;
9675 /* If the function needs a static chain and the static chain
9676 register is r15, we use an indirect call so as to bypass
9677 the PLT stub in case the executable is dynamically linked,
9678 because the stub clobbers r15 as per 5.3.6 of the psABI.
9679 We don't need to do that in non canonical PIC mode. */
9681 if (cfun->static_chain_decl && !TARGET_NO_PIC && !TARGET_AUTO_PIC)
9683 gcc_assert (STATIC_CHAIN_REGNUM == 15);
9684 indirect_call = true;
9686 else
9687 indirect_call = false;
9689 if (TARGET_GNU_AS)
9690 fputs ("\t.prologue 4, r40\n", file);
9691 else
9692 fputs ("\t.prologue\n\t.save ar.pfs, r40\n", file);
9693 fputs ("\talloc out0 = ar.pfs, 8, 0, 4, 0\n", file);
9695 if (NO_PROFILE_COUNTERS)
9696 fputs ("\tmov out3 = r0\n", file);
9697 else
9699 char buf[20];
9700 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
9702 if (TARGET_AUTO_PIC)
9703 fputs ("\tmovl out3 = @gprel(", file);
9704 else
9705 fputs ("\taddl out3 = @ltoff(", file);
9706 assemble_name (file, buf);
9707 if (TARGET_AUTO_PIC)
9708 fputs (")\n", file);
9709 else
9710 fputs ("), r1\n", file);
9713 if (indirect_call)
9714 fputs ("\taddl r14 = @ltoff(@fptr(_mcount)), r1\n", file);
9715 fputs ("\t;;\n", file);
9717 fputs ("\t.save rp, r42\n", file);
9718 fputs ("\tmov out2 = b0\n", file);
9719 if (indirect_call)
9720 fputs ("\tld8 r14 = [r14]\n\t;;\n", file);
9721 fputs ("\t.body\n", file);
9722 fputs ("\tmov out1 = r1\n", file);
9723 if (indirect_call)
9725 fputs ("\tld8 r16 = [r14], 8\n\t;;\n", file);
9726 fputs ("\tmov b6 = r16\n", file);
9727 fputs ("\tld8 r1 = [r14]\n", file);
9728 fputs ("\tbr.call.sptk.many b0 = b6\n\t;;\n", file);
9730 else
9731 fputs ("\tbr.call.sptk.many b0 = _mcount\n\t;;\n", file);
9734 static GTY(()) rtx mcount_func_rtx;
9735 static rtx
9736 gen_mcount_func_rtx (void)
9738 if (!mcount_func_rtx)
9739 mcount_func_rtx = init_one_libfunc ("_mcount");
9740 return mcount_func_rtx;
9743 void
9744 ia64_profile_hook (int labelno)
9746 rtx label, ip;
9748 if (NO_PROFILE_COUNTERS)
9749 label = const0_rtx;
9750 else
9752 char buf[30];
9753 const char *label_name;
9754 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
9755 label_name = (*targetm.strip_name_encoding) (ggc_strdup (buf));
9756 label = gen_rtx_SYMBOL_REF (Pmode, label_name);
9757 SYMBOL_REF_FLAGS (label) = SYMBOL_FLAG_LOCAL;
9759 ip = gen_reg_rtx (Pmode);
9760 emit_insn (gen_ip_value (ip));
9761 emit_library_call (gen_mcount_func_rtx (), LCT_NORMAL,
9762 VOIDmode, 3,
9763 gen_rtx_REG (Pmode, BR_REG (0)), Pmode,
9764 ip, Pmode,
9765 label, Pmode);
9768 /* Return the mangling of TYPE if it is an extended fundamental type. */
9770 static const char *
9771 ia64_mangle_fundamental_type (tree type)
9773 /* On HP-UX, "long double" is mangled as "e" so __float128 is
9774 mangled as "e". */
9775 if (!TARGET_HPUX && TYPE_MODE (type) == TFmode)
9776 return "g";
9777 /* On HP-UX, "e" is not available as a mangling of __float80 so use
9778 an extended mangling. Elsewhere, "e" is available since long
9779 double is 80 bits. */
9780 if (TYPE_MODE (type) == XFmode)
9781 return TARGET_HPUX ? "u9__float80" : "e";
9782 if (TYPE_MODE (type) == RFmode)
9783 return "u7__fpreg";
9784 return NULL;
9787 /* Return the diagnostic message string if conversion from FROMTYPE to
9788 TOTYPE is not allowed, NULL otherwise. */
9789 static const char *
9790 ia64_invalid_conversion (tree fromtype, tree totype)
9792 /* Reject nontrivial conversion to or from __fpreg. */
9793 if (TYPE_MODE (fromtype) == RFmode
9794 && TYPE_MODE (totype) != RFmode
9795 && TYPE_MODE (totype) != VOIDmode)
9796 return N_("invalid conversion from %<__fpreg%>");
9797 if (TYPE_MODE (totype) == RFmode
9798 && TYPE_MODE (fromtype) != RFmode)
9799 return N_("invalid conversion to %<__fpreg%>");
9800 return NULL;
9803 /* Return the diagnostic message string if the unary operation OP is
9804 not permitted on TYPE, NULL otherwise. */
9805 static const char *
9806 ia64_invalid_unary_op (int op, tree type)
9808 /* Reject operations on __fpreg other than unary + or &. */
9809 if (TYPE_MODE (type) == RFmode
9810 && op != CONVERT_EXPR
9811 && op != ADDR_EXPR)
9812 return N_("invalid operation on %<__fpreg%>");
9813 return NULL;
9816 /* Return the diagnostic message string if the binary operation OP is
9817 not permitted on TYPE1 and TYPE2, NULL otherwise. */
9818 static const char *
9819 ia64_invalid_binary_op (int op ATTRIBUTE_UNUSED, tree type1, tree type2)
9821 /* Reject operations on __fpreg. */
9822 if (TYPE_MODE (type1) == RFmode || TYPE_MODE (type2) == RFmode)
9823 return N_("invalid operation on %<__fpreg%>");
9824 return NULL;
9827 /* Implement overriding of the optimization options. */
9828 void
9829 ia64_optimization_options (int level ATTRIBUTE_UNUSED,
9830 int size ATTRIBUTE_UNUSED)
9832 /* Let the scheduler form additional regions. */
9833 set_param_value ("max-sched-extend-regions-iters", 2);
9835 /* Set the default values for cache-related parameters. */
9836 set_param_value ("simultaneous-prefetches", 6);
9837 set_param_value ("l1-cache-line-size", 32);
9841 /* HP-UX version_id attribute.
9842 For object foo, if the version_id is set to 1234 put out an alias
9843 of '.alias foo "foo{1234}" We can't use "foo{1234}" in anything
9844 other than an alias statement because it is an illegal symbol name. */
9846 static tree
9847 ia64_handle_version_id_attribute (tree *node ATTRIBUTE_UNUSED,
9848 tree name ATTRIBUTE_UNUSED,
9849 tree args,
9850 int flags ATTRIBUTE_UNUSED,
9851 bool *no_add_attrs)
9853 tree arg = TREE_VALUE (args);
9855 if (TREE_CODE (arg) != STRING_CST)
9857 error("version attribute is not a string");
9858 *no_add_attrs = true;
9859 return NULL_TREE;
9861 return NULL_TREE;
9864 #include "gt-ia64.h"