1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999-2018 Free Software Foundation, Inc.
3 Contributed by James E. Wilson <wilson@cygnus.com> and
4 David Mosberger <davidm@hpl.hp.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #define IN_TARGET_CODE 1
26 #include "coretypes.h"
35 #include "stringpool.h"
41 #include "diagnostic-core.h"
43 #include "fold-const.h"
44 #include "stor-layout.h"
48 #include "insn-attr.h"
54 #include "sched-int.h"
55 #include "common/common-target.h"
56 #include "langhooks.h"
62 #include "tm-constrs.h"
63 #include "sel-sched.h"
69 /* This file should be included last. */
70 #include "target-def.h"
72 /* This is used for communication between ASM_OUTPUT_LABEL and
73 ASM_OUTPUT_LABELREF. */
74 int ia64_asm_output_label
= 0;
76 /* Register names for ia64_expand_prologue. */
77 static const char * const ia64_reg_numbers
[96] =
78 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
79 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
80 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
81 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
82 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
83 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
84 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
85 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
86 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
87 "r104","r105","r106","r107","r108","r109","r110","r111",
88 "r112","r113","r114","r115","r116","r117","r118","r119",
89 "r120","r121","r122","r123","r124","r125","r126","r127"};
91 /* ??? These strings could be shared with REGISTER_NAMES. */
92 static const char * const ia64_input_reg_names
[8] =
93 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
95 /* ??? These strings could be shared with REGISTER_NAMES. */
96 static const char * const ia64_local_reg_names
[80] =
97 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
98 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
99 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
100 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
101 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
102 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
103 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
104 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
105 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
106 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
108 /* ??? These strings could be shared with REGISTER_NAMES. */
109 static const char * const ia64_output_reg_names
[8] =
110 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
112 /* Variables which are this size or smaller are put in the sdata/sbss
115 unsigned int ia64_section_threshold
;
117 /* The following variable is used by the DFA insn scheduler. The value is
118 TRUE if we do insn bundling instead of insn scheduling. */
130 number_of_ia64_frame_regs
133 /* Structure to be filled in by ia64_compute_frame_size with register
134 save masks and offsets for the current function. */
136 struct ia64_frame_info
138 HOST_WIDE_INT total_size
; /* size of the stack frame, not including
139 the caller's scratch area. */
140 HOST_WIDE_INT spill_cfa_off
; /* top of the reg spill area from the cfa. */
141 HOST_WIDE_INT spill_size
; /* size of the gr/br/fr spill area. */
142 HOST_WIDE_INT extra_spill_size
; /* size of spill area for others. */
143 HARD_REG_SET mask
; /* mask of saved registers. */
144 unsigned int gr_used_mask
; /* mask of registers in use as gr spill
145 registers or long-term scratches. */
146 int n_spilled
; /* number of spilled registers. */
147 int r
[number_of_ia64_frame_regs
]; /* Frame related registers. */
148 int n_input_regs
; /* number of input registers used. */
149 int n_local_regs
; /* number of local registers used. */
150 int n_output_regs
; /* number of output registers used. */
151 int n_rotate_regs
; /* number of rotating registers used. */
153 char need_regstk
; /* true if a .regstk directive needed. */
154 char initialized
; /* true if the data is finalized. */
157 /* Current frame information calculated by ia64_compute_frame_size. */
158 static struct ia64_frame_info current_frame_info
;
159 /* The actual registers that are emitted. */
160 static int emitted_frame_related_regs
[number_of_ia64_frame_regs
];
162 static int ia64_first_cycle_multipass_dfa_lookahead (void);
163 static void ia64_dependencies_evaluation_hook (rtx_insn
*, rtx_insn
*);
164 static void ia64_init_dfa_pre_cycle_insn (void);
165 static rtx
ia64_dfa_pre_cycle_insn (void);
166 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx_insn
*, int);
167 static int ia64_dfa_new_cycle (FILE *, int, rtx_insn
*, int, int, int *);
168 static void ia64_h_i_d_extended (void);
169 static void * ia64_alloc_sched_context (void);
170 static void ia64_init_sched_context (void *, bool);
171 static void ia64_set_sched_context (void *);
172 static void ia64_clear_sched_context (void *);
173 static void ia64_free_sched_context (void *);
174 static int ia64_mode_to_int (machine_mode
);
175 static void ia64_set_sched_flags (spec_info_t
);
176 static ds_t
ia64_get_insn_spec_ds (rtx_insn
*);
177 static ds_t
ia64_get_insn_checked_ds (rtx_insn
*);
178 static bool ia64_skip_rtx_p (const_rtx
);
179 static int ia64_speculate_insn (rtx_insn
*, ds_t
, rtx
*);
180 static bool ia64_needs_block_p (ds_t
);
181 static rtx
ia64_gen_spec_check (rtx_insn
*, rtx_insn
*, ds_t
);
182 static int ia64_spec_check_p (rtx
);
183 static int ia64_spec_check_src_p (rtx
);
184 static rtx
gen_tls_get_addr (void);
185 static rtx
gen_thread_pointer (void);
186 static int find_gr_spill (enum ia64_frame_regs
, int);
187 static int next_scratch_gr_reg (void);
188 static void mark_reg_gr_used_mask (rtx
, void *);
189 static void ia64_compute_frame_size (HOST_WIDE_INT
);
190 static void setup_spill_pointers (int, rtx
, HOST_WIDE_INT
);
191 static void finish_spill_pointers (void);
192 static rtx
spill_restore_mem (rtx
, HOST_WIDE_INT
);
193 static void do_spill (rtx (*)(rtx
, rtx
, rtx
), rtx
, HOST_WIDE_INT
, rtx
);
194 static void do_restore (rtx (*)(rtx
, rtx
, rtx
), rtx
, HOST_WIDE_INT
);
195 static rtx
gen_movdi_x (rtx
, rtx
, rtx
);
196 static rtx
gen_fr_spill_x (rtx
, rtx
, rtx
);
197 static rtx
gen_fr_restore_x (rtx
, rtx
, rtx
);
199 static void ia64_option_override (void);
200 static bool ia64_can_eliminate (const int, const int);
201 static machine_mode
hfa_element_mode (const_tree
, bool);
202 static void ia64_setup_incoming_varargs (cumulative_args_t
, machine_mode
,
204 static int ia64_arg_partial_bytes (cumulative_args_t
, machine_mode
,
206 static rtx
ia64_function_arg_1 (cumulative_args_t
, machine_mode
,
207 const_tree
, bool, bool);
208 static rtx
ia64_function_arg (cumulative_args_t
, machine_mode
,
210 static rtx
ia64_function_incoming_arg (cumulative_args_t
,
211 machine_mode
, const_tree
, bool);
212 static void ia64_function_arg_advance (cumulative_args_t
, machine_mode
,
214 static pad_direction
ia64_function_arg_padding (machine_mode
, const_tree
);
215 static unsigned int ia64_function_arg_boundary (machine_mode
,
217 static bool ia64_function_ok_for_sibcall (tree
, tree
);
218 static bool ia64_return_in_memory (const_tree
, const_tree
);
219 static rtx
ia64_function_value (const_tree
, const_tree
, bool);
220 static rtx
ia64_libcall_value (machine_mode
, const_rtx
);
221 static bool ia64_function_value_regno_p (const unsigned int);
222 static int ia64_register_move_cost (machine_mode
, reg_class_t
,
224 static int ia64_memory_move_cost (machine_mode mode
, reg_class_t
,
226 static bool ia64_rtx_costs (rtx
, machine_mode
, int, int, int *, bool);
227 static int ia64_unspec_may_trap_p (const_rtx
, unsigned);
228 static void fix_range (const char *);
229 static struct machine_function
* ia64_init_machine_status (void);
230 static void emit_insn_group_barriers (FILE *);
231 static void emit_all_insn_group_barriers (FILE *);
232 static void final_emit_insn_group_barriers (FILE *);
233 static void emit_predicate_relation_info (void);
234 static void ia64_reorg (void);
235 static bool ia64_in_small_data_p (const_tree
);
236 static void process_epilogue (FILE *, rtx
, bool, bool);
238 static bool ia64_assemble_integer (rtx
, unsigned int, int);
239 static void ia64_output_function_prologue (FILE *);
240 static void ia64_output_function_epilogue (FILE *);
241 static void ia64_output_function_end_prologue (FILE *);
243 static void ia64_print_operand (FILE *, rtx
, int);
244 static void ia64_print_operand_address (FILE *, machine_mode
, rtx
);
245 static bool ia64_print_operand_punct_valid_p (unsigned char code
);
247 static int ia64_issue_rate (void);
248 static int ia64_adjust_cost (rtx_insn
*, int, rtx_insn
*, int, dw_t
);
249 static void ia64_sched_init (FILE *, int, int);
250 static void ia64_sched_init_global (FILE *, int, int);
251 static void ia64_sched_finish_global (FILE *, int);
252 static void ia64_sched_finish (FILE *, int);
253 static int ia64_dfa_sched_reorder (FILE *, int, rtx_insn
**, int *, int, int);
254 static int ia64_sched_reorder (FILE *, int, rtx_insn
**, int *, int);
255 static int ia64_sched_reorder2 (FILE *, int, rtx_insn
**, int *, int);
256 static int ia64_variable_issue (FILE *, int, rtx_insn
*, int);
258 static void ia64_asm_unwind_emit (FILE *, rtx_insn
*);
259 static void ia64_asm_emit_except_personality (rtx
);
260 static void ia64_asm_init_sections (void);
262 static enum unwind_info_type
ia64_debug_unwind_info (void);
264 static struct bundle_state
*get_free_bundle_state (void);
265 static void free_bundle_state (struct bundle_state
*);
266 static void initiate_bundle_states (void);
267 static void finish_bundle_states (void);
268 static int insert_bundle_state (struct bundle_state
*);
269 static void initiate_bundle_state_table (void);
270 static void finish_bundle_state_table (void);
271 static int try_issue_nops (struct bundle_state
*, int);
272 static int try_issue_insn (struct bundle_state
*, rtx
);
273 static void issue_nops_and_insn (struct bundle_state
*, int, rtx_insn
*,
275 static int get_max_pos (state_t
);
276 static int get_template (state_t
, int);
278 static rtx_insn
*get_next_important_insn (rtx_insn
*, rtx_insn
*);
279 static bool important_for_bundling_p (rtx_insn
*);
280 static bool unknown_for_bundling_p (rtx_insn
*);
281 static void bundling (FILE *, int, rtx_insn
*, rtx_insn
*);
283 static void ia64_output_mi_thunk (FILE *, tree
, HOST_WIDE_INT
,
284 HOST_WIDE_INT
, tree
);
285 static void ia64_file_start (void);
286 static void ia64_globalize_decl_name (FILE *, tree
);
288 static int ia64_hpux_reloc_rw_mask (void) ATTRIBUTE_UNUSED
;
289 static int ia64_reloc_rw_mask (void) ATTRIBUTE_UNUSED
;
290 static section
*ia64_select_rtx_section (machine_mode
, rtx
,
291 unsigned HOST_WIDE_INT
);
292 static void ia64_output_dwarf_dtprel (FILE *, int, rtx
)
294 static unsigned int ia64_section_type_flags (tree
, const char *, int);
295 static void ia64_init_libfuncs (void)
297 static void ia64_hpux_init_libfuncs (void)
299 static void ia64_sysv4_init_libfuncs (void)
301 static void ia64_vms_init_libfuncs (void)
303 static void ia64_soft_fp_init_libfuncs (void)
305 static bool ia64_vms_valid_pointer_mode (scalar_int_mode mode
)
307 static tree
ia64_vms_common_object_attribute (tree
*, tree
, tree
, int, bool *)
310 static bool ia64_attribute_takes_identifier_p (const_tree
);
311 static tree
ia64_handle_model_attribute (tree
*, tree
, tree
, int, bool *);
312 static tree
ia64_handle_version_id_attribute (tree
*, tree
, tree
, int, bool *);
313 static void ia64_encode_section_info (tree
, rtx
, int);
314 static rtx
ia64_struct_value_rtx (tree
, int);
315 static tree
ia64_gimplify_va_arg (tree
, tree
, gimple_seq
*, gimple_seq
*);
316 static bool ia64_scalar_mode_supported_p (scalar_mode mode
);
317 static bool ia64_vector_mode_supported_p (machine_mode mode
);
318 static bool ia64_legitimate_constant_p (machine_mode
, rtx
);
319 static bool ia64_legitimate_address_p (machine_mode
, rtx
, bool);
320 static bool ia64_cannot_force_const_mem (machine_mode
, rtx
);
321 static const char *ia64_mangle_type (const_tree
);
322 static const char *ia64_invalid_conversion (const_tree
, const_tree
);
323 static const char *ia64_invalid_unary_op (int, const_tree
);
324 static const char *ia64_invalid_binary_op (int, const_tree
, const_tree
);
325 static machine_mode
ia64_c_mode_for_suffix (char);
326 static void ia64_trampoline_init (rtx
, tree
, rtx
);
327 static void ia64_override_options_after_change (void);
328 static bool ia64_member_type_forces_blk (const_tree
, machine_mode
);
330 static tree
ia64_fold_builtin (tree
, int, tree
*, bool);
331 static tree
ia64_builtin_decl (unsigned, bool);
333 static reg_class_t
ia64_preferred_reload_class (rtx
, reg_class_t
);
334 static fixed_size_mode
ia64_get_reg_raw_mode (int regno
);
335 static section
* ia64_hpux_function_section (tree
, enum node_frequency
,
338 static bool ia64_vectorize_vec_perm_const (machine_mode
, rtx
, rtx
, rtx
,
339 const vec_perm_indices
&);
341 static unsigned int ia64_hard_regno_nregs (unsigned int, machine_mode
);
342 static bool ia64_hard_regno_mode_ok (unsigned int, machine_mode
);
343 static bool ia64_modes_tieable_p (machine_mode
, machine_mode
);
344 static bool ia64_can_change_mode_class (machine_mode
, machine_mode
,
347 #define MAX_VECT_LEN 8
349 struct expand_vec_perm_d
351 rtx target
, op0
, op1
;
352 unsigned char perm
[MAX_VECT_LEN
];
359 static bool ia64_expand_vec_perm_const_1 (struct expand_vec_perm_d
*d
);
362 /* Table of valid machine attributes. */
363 static const struct attribute_spec ia64_attribute_table
[] =
365 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
366 affects_type_identity, handler, exclude } */
367 { "syscall_linkage", 0, 0, false, true, true, false, NULL
, NULL
},
368 { "model", 1, 1, true, false, false, false,
369 ia64_handle_model_attribute
, NULL
},
370 #if TARGET_ABI_OPEN_VMS
371 { "common_object", 1, 1, true, false, false, false,
372 ia64_vms_common_object_attribute
, NULL
},
374 { "version_id", 1, 1, true, false, false, false,
375 ia64_handle_version_id_attribute
, NULL
},
376 { NULL
, 0, 0, false, false, false, false, NULL
, NULL
}
379 /* Initialize the GCC target structure. */
380 #undef TARGET_ATTRIBUTE_TABLE
381 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
383 #undef TARGET_INIT_BUILTINS
384 #define TARGET_INIT_BUILTINS ia64_init_builtins
386 #undef TARGET_FOLD_BUILTIN
387 #define TARGET_FOLD_BUILTIN ia64_fold_builtin
389 #undef TARGET_EXPAND_BUILTIN
390 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
392 #undef TARGET_BUILTIN_DECL
393 #define TARGET_BUILTIN_DECL ia64_builtin_decl
395 #undef TARGET_ASM_BYTE_OP
396 #define TARGET_ASM_BYTE_OP "\tdata1\t"
397 #undef TARGET_ASM_ALIGNED_HI_OP
398 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
399 #undef TARGET_ASM_ALIGNED_SI_OP
400 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
401 #undef TARGET_ASM_ALIGNED_DI_OP
402 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
403 #undef TARGET_ASM_UNALIGNED_HI_OP
404 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
405 #undef TARGET_ASM_UNALIGNED_SI_OP
406 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
407 #undef TARGET_ASM_UNALIGNED_DI_OP
408 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
409 #undef TARGET_ASM_INTEGER
410 #define TARGET_ASM_INTEGER ia64_assemble_integer
412 #undef TARGET_OPTION_OVERRIDE
413 #define TARGET_OPTION_OVERRIDE ia64_option_override
415 #undef TARGET_ASM_FUNCTION_PROLOGUE
416 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
417 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
418 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
419 #undef TARGET_ASM_FUNCTION_EPILOGUE
420 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
422 #undef TARGET_PRINT_OPERAND
423 #define TARGET_PRINT_OPERAND ia64_print_operand
424 #undef TARGET_PRINT_OPERAND_ADDRESS
425 #define TARGET_PRINT_OPERAND_ADDRESS ia64_print_operand_address
426 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
427 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P ia64_print_operand_punct_valid_p
429 #undef TARGET_IN_SMALL_DATA_P
430 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
432 #undef TARGET_SCHED_ADJUST_COST
433 #define TARGET_SCHED_ADJUST_COST ia64_adjust_cost
434 #undef TARGET_SCHED_ISSUE_RATE
435 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
436 #undef TARGET_SCHED_VARIABLE_ISSUE
437 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
438 #undef TARGET_SCHED_INIT
439 #define TARGET_SCHED_INIT ia64_sched_init
440 #undef TARGET_SCHED_FINISH
441 #define TARGET_SCHED_FINISH ia64_sched_finish
442 #undef TARGET_SCHED_INIT_GLOBAL
443 #define TARGET_SCHED_INIT_GLOBAL ia64_sched_init_global
444 #undef TARGET_SCHED_FINISH_GLOBAL
445 #define TARGET_SCHED_FINISH_GLOBAL ia64_sched_finish_global
446 #undef TARGET_SCHED_REORDER
447 #define TARGET_SCHED_REORDER ia64_sched_reorder
448 #undef TARGET_SCHED_REORDER2
449 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
451 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
452 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
454 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
455 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
457 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
458 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
459 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
460 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
462 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
463 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
464 ia64_first_cycle_multipass_dfa_lookahead_guard
466 #undef TARGET_SCHED_DFA_NEW_CYCLE
467 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
469 #undef TARGET_SCHED_H_I_D_EXTENDED
470 #define TARGET_SCHED_H_I_D_EXTENDED ia64_h_i_d_extended
472 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
473 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT ia64_alloc_sched_context
475 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
476 #define TARGET_SCHED_INIT_SCHED_CONTEXT ia64_init_sched_context
478 #undef TARGET_SCHED_SET_SCHED_CONTEXT
479 #define TARGET_SCHED_SET_SCHED_CONTEXT ia64_set_sched_context
481 #undef TARGET_SCHED_CLEAR_SCHED_CONTEXT
482 #define TARGET_SCHED_CLEAR_SCHED_CONTEXT ia64_clear_sched_context
484 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
485 #define TARGET_SCHED_FREE_SCHED_CONTEXT ia64_free_sched_context
487 #undef TARGET_SCHED_SET_SCHED_FLAGS
488 #define TARGET_SCHED_SET_SCHED_FLAGS ia64_set_sched_flags
490 #undef TARGET_SCHED_GET_INSN_SPEC_DS
491 #define TARGET_SCHED_GET_INSN_SPEC_DS ia64_get_insn_spec_ds
493 #undef TARGET_SCHED_GET_INSN_CHECKED_DS
494 #define TARGET_SCHED_GET_INSN_CHECKED_DS ia64_get_insn_checked_ds
496 #undef TARGET_SCHED_SPECULATE_INSN
497 #define TARGET_SCHED_SPECULATE_INSN ia64_speculate_insn
499 #undef TARGET_SCHED_NEEDS_BLOCK_P
500 #define TARGET_SCHED_NEEDS_BLOCK_P ia64_needs_block_p
502 #undef TARGET_SCHED_GEN_SPEC_CHECK
503 #define TARGET_SCHED_GEN_SPEC_CHECK ia64_gen_spec_check
505 #undef TARGET_SCHED_SKIP_RTX_P
506 #define TARGET_SCHED_SKIP_RTX_P ia64_skip_rtx_p
508 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
509 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
510 #undef TARGET_ARG_PARTIAL_BYTES
511 #define TARGET_ARG_PARTIAL_BYTES ia64_arg_partial_bytes
512 #undef TARGET_FUNCTION_ARG
513 #define TARGET_FUNCTION_ARG ia64_function_arg
514 #undef TARGET_FUNCTION_INCOMING_ARG
515 #define TARGET_FUNCTION_INCOMING_ARG ia64_function_incoming_arg
516 #undef TARGET_FUNCTION_ARG_ADVANCE
517 #define TARGET_FUNCTION_ARG_ADVANCE ia64_function_arg_advance
518 #undef TARGET_FUNCTION_ARG_PADDING
519 #define TARGET_FUNCTION_ARG_PADDING ia64_function_arg_padding
520 #undef TARGET_FUNCTION_ARG_BOUNDARY
521 #define TARGET_FUNCTION_ARG_BOUNDARY ia64_function_arg_boundary
523 #undef TARGET_ASM_OUTPUT_MI_THUNK
524 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
525 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
526 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
528 #undef TARGET_ASM_FILE_START
529 #define TARGET_ASM_FILE_START ia64_file_start
531 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
532 #define TARGET_ASM_GLOBALIZE_DECL_NAME ia64_globalize_decl_name
534 #undef TARGET_REGISTER_MOVE_COST
535 #define TARGET_REGISTER_MOVE_COST ia64_register_move_cost
536 #undef TARGET_MEMORY_MOVE_COST
537 #define TARGET_MEMORY_MOVE_COST ia64_memory_move_cost
538 #undef TARGET_RTX_COSTS
539 #define TARGET_RTX_COSTS ia64_rtx_costs
540 #undef TARGET_ADDRESS_COST
541 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
543 #undef TARGET_UNSPEC_MAY_TRAP_P
544 #define TARGET_UNSPEC_MAY_TRAP_P ia64_unspec_may_trap_p
546 #undef TARGET_MACHINE_DEPENDENT_REORG
547 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
549 #undef TARGET_ENCODE_SECTION_INFO
550 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
552 #undef TARGET_SECTION_TYPE_FLAGS
553 #define TARGET_SECTION_TYPE_FLAGS ia64_section_type_flags
556 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
557 #define TARGET_ASM_OUTPUT_DWARF_DTPREL ia64_output_dwarf_dtprel
560 /* ??? Investigate. */
562 #undef TARGET_PROMOTE_PROTOTYPES
563 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
566 #undef TARGET_FUNCTION_VALUE
567 #define TARGET_FUNCTION_VALUE ia64_function_value
568 #undef TARGET_LIBCALL_VALUE
569 #define TARGET_LIBCALL_VALUE ia64_libcall_value
570 #undef TARGET_FUNCTION_VALUE_REGNO_P
571 #define TARGET_FUNCTION_VALUE_REGNO_P ia64_function_value_regno_p
573 #undef TARGET_STRUCT_VALUE_RTX
574 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
575 #undef TARGET_RETURN_IN_MEMORY
576 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
577 #undef TARGET_SETUP_INCOMING_VARARGS
578 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
579 #undef TARGET_STRICT_ARGUMENT_NAMING
580 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
581 #undef TARGET_MUST_PASS_IN_STACK
582 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
583 #undef TARGET_GET_RAW_RESULT_MODE
584 #define TARGET_GET_RAW_RESULT_MODE ia64_get_reg_raw_mode
585 #undef TARGET_GET_RAW_ARG_MODE
586 #define TARGET_GET_RAW_ARG_MODE ia64_get_reg_raw_mode
588 #undef TARGET_MEMBER_TYPE_FORCES_BLK
589 #define TARGET_MEMBER_TYPE_FORCES_BLK ia64_member_type_forces_blk
591 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
592 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
594 #undef TARGET_ASM_UNWIND_EMIT
595 #define TARGET_ASM_UNWIND_EMIT ia64_asm_unwind_emit
596 #undef TARGET_ASM_EMIT_EXCEPT_PERSONALITY
597 #define TARGET_ASM_EMIT_EXCEPT_PERSONALITY ia64_asm_emit_except_personality
598 #undef TARGET_ASM_INIT_SECTIONS
599 #define TARGET_ASM_INIT_SECTIONS ia64_asm_init_sections
601 #undef TARGET_DEBUG_UNWIND_INFO
602 #define TARGET_DEBUG_UNWIND_INFO ia64_debug_unwind_info
604 #undef TARGET_SCALAR_MODE_SUPPORTED_P
605 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
606 #undef TARGET_VECTOR_MODE_SUPPORTED_P
607 #define TARGET_VECTOR_MODE_SUPPORTED_P ia64_vector_mode_supported_p
609 #undef TARGET_LEGITIMATE_CONSTANT_P
610 #define TARGET_LEGITIMATE_CONSTANT_P ia64_legitimate_constant_p
611 #undef TARGET_LEGITIMATE_ADDRESS_P
612 #define TARGET_LEGITIMATE_ADDRESS_P ia64_legitimate_address_p
615 #define TARGET_LRA_P hook_bool_void_false
617 #undef TARGET_CANNOT_FORCE_CONST_MEM
618 #define TARGET_CANNOT_FORCE_CONST_MEM ia64_cannot_force_const_mem
620 #undef TARGET_MANGLE_TYPE
621 #define TARGET_MANGLE_TYPE ia64_mangle_type
623 #undef TARGET_INVALID_CONVERSION
624 #define TARGET_INVALID_CONVERSION ia64_invalid_conversion
625 #undef TARGET_INVALID_UNARY_OP
626 #define TARGET_INVALID_UNARY_OP ia64_invalid_unary_op
627 #undef TARGET_INVALID_BINARY_OP
628 #define TARGET_INVALID_BINARY_OP ia64_invalid_binary_op
630 #undef TARGET_C_MODE_FOR_SUFFIX
631 #define TARGET_C_MODE_FOR_SUFFIX ia64_c_mode_for_suffix
633 #undef TARGET_CAN_ELIMINATE
634 #define TARGET_CAN_ELIMINATE ia64_can_eliminate
636 #undef TARGET_TRAMPOLINE_INIT
637 #define TARGET_TRAMPOLINE_INIT ia64_trampoline_init
639 #undef TARGET_CAN_USE_DOLOOP_P
640 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
641 #undef TARGET_INVALID_WITHIN_DOLOOP
642 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null
644 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
645 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE ia64_override_options_after_change
647 #undef TARGET_PREFERRED_RELOAD_CLASS
648 #define TARGET_PREFERRED_RELOAD_CLASS ia64_preferred_reload_class
650 #undef TARGET_DELAY_SCHED2
651 #define TARGET_DELAY_SCHED2 true
653 /* Variable tracking should be run after all optimizations which
654 change order of insns. It also needs a valid CFG. */
655 #undef TARGET_DELAY_VARTRACK
656 #define TARGET_DELAY_VARTRACK true
658 #undef TARGET_VECTORIZE_VEC_PERM_CONST
659 #define TARGET_VECTORIZE_VEC_PERM_CONST ia64_vectorize_vec_perm_const
661 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
662 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P ia64_attribute_takes_identifier_p
664 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
665 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 0
667 #undef TARGET_HARD_REGNO_NREGS
668 #define TARGET_HARD_REGNO_NREGS ia64_hard_regno_nregs
669 #undef TARGET_HARD_REGNO_MODE_OK
670 #define TARGET_HARD_REGNO_MODE_OK ia64_hard_regno_mode_ok
672 #undef TARGET_MODES_TIEABLE_P
673 #define TARGET_MODES_TIEABLE_P ia64_modes_tieable_p
675 #undef TARGET_CAN_CHANGE_MODE_CLASS
676 #define TARGET_CAN_CHANGE_MODE_CLASS ia64_can_change_mode_class
678 #undef TARGET_CONSTANT_ALIGNMENT
679 #define TARGET_CONSTANT_ALIGNMENT constant_alignment_word_strings
681 struct gcc_target targetm
= TARGET_INITIALIZER
;
683 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
684 identifier as an argument, so the front end shouldn't look it up. */
687 ia64_attribute_takes_identifier_p (const_tree attr_id
)
689 if (is_attribute_p ("model", attr_id
))
691 #if TARGET_ABI_OPEN_VMS
692 if (is_attribute_p ("common_object", attr_id
))
700 ADDR_AREA_NORMAL
, /* normal address area */
701 ADDR_AREA_SMALL
/* addressable by "addl" (-2MB < addr < 2MB) */
705 static GTY(()) tree small_ident1
;
706 static GTY(()) tree small_ident2
;
711 if (small_ident1
== 0)
713 small_ident1
= get_identifier ("small");
714 small_ident2
= get_identifier ("__small__");
718 /* Retrieve the address area that has been chosen for the given decl. */
720 static ia64_addr_area
721 ia64_get_addr_area (tree decl
)
725 model_attr
= lookup_attribute ("model", DECL_ATTRIBUTES (decl
));
731 id
= TREE_VALUE (TREE_VALUE (model_attr
));
732 if (id
== small_ident1
|| id
== small_ident2
)
733 return ADDR_AREA_SMALL
;
735 return ADDR_AREA_NORMAL
;
739 ia64_handle_model_attribute (tree
*node
, tree name
, tree args
,
740 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
742 ia64_addr_area addr_area
= ADDR_AREA_NORMAL
;
744 tree arg
, decl
= *node
;
747 arg
= TREE_VALUE (args
);
748 if (arg
== small_ident1
|| arg
== small_ident2
)
750 addr_area
= ADDR_AREA_SMALL
;
754 warning (OPT_Wattributes
, "invalid argument of %qE attribute",
756 *no_add_attrs
= true;
759 switch (TREE_CODE (decl
))
762 if ((DECL_CONTEXT (decl
) && TREE_CODE (DECL_CONTEXT (decl
))
764 && !TREE_STATIC (decl
))
766 error_at (DECL_SOURCE_LOCATION (decl
),
767 "an address area attribute cannot be specified for "
769 *no_add_attrs
= true;
771 area
= ia64_get_addr_area (decl
);
772 if (area
!= ADDR_AREA_NORMAL
&& addr_area
!= area
)
774 error ("address area of %q+D conflicts with previous "
775 "declaration", decl
);
776 *no_add_attrs
= true;
781 error_at (DECL_SOURCE_LOCATION (decl
),
782 "address area attribute cannot be specified for "
784 *no_add_attrs
= true;
788 warning (OPT_Wattributes
, "%qE attribute ignored",
790 *no_add_attrs
= true;
797 /* Part of the low level implementation of DEC Ada pragma Common_Object which
798 enables the shared use of variables stored in overlaid linker areas
799 corresponding to the use of Fortran COMMON. */
802 ia64_vms_common_object_attribute (tree
*node
, tree name
, tree args
,
803 int flags ATTRIBUTE_UNUSED
,
809 gcc_assert (DECL_P (decl
));
811 DECL_COMMON (decl
) = 1;
812 id
= TREE_VALUE (args
);
813 if (TREE_CODE (id
) != IDENTIFIER_NODE
&& TREE_CODE (id
) != STRING_CST
)
815 error ("%qE attribute requires a string constant argument", name
);
816 *no_add_attrs
= true;
822 /* Part of the low level implementation of DEC Ada pragma Common_Object. */
825 ia64_vms_output_aligned_decl_common (FILE *file
, tree decl
, const char *name
,
826 unsigned HOST_WIDE_INT size
,
829 tree attr
= DECL_ATTRIBUTES (decl
);
832 attr
= lookup_attribute ("common_object", attr
);
835 tree id
= TREE_VALUE (TREE_VALUE (attr
));
838 if (TREE_CODE (id
) == IDENTIFIER_NODE
)
839 name
= IDENTIFIER_POINTER (id
);
840 else if (TREE_CODE (id
) == STRING_CST
)
841 name
= TREE_STRING_POINTER (id
);
845 fprintf (file
, "\t.vms_common\t\"%s\",", name
);
848 fprintf (file
, "%s", COMMON_ASM_OP
);
850 /* Code from elfos.h. */
851 assemble_name (file
, name
);
852 fprintf (file
, "," HOST_WIDE_INT_PRINT_UNSIGNED
",%u",
853 size
, align
/ BITS_PER_UNIT
);
859 ia64_encode_addr_area (tree decl
, rtx symbol
)
863 flags
= SYMBOL_REF_FLAGS (symbol
);
864 switch (ia64_get_addr_area (decl
))
866 case ADDR_AREA_NORMAL
: break;
867 case ADDR_AREA_SMALL
: flags
|= SYMBOL_FLAG_SMALL_ADDR
; break;
868 default: gcc_unreachable ();
870 SYMBOL_REF_FLAGS (symbol
) = flags
;
874 ia64_encode_section_info (tree decl
, rtx rtl
, int first
)
876 default_encode_section_info (decl
, rtl
, first
);
878 /* Careful not to prod global register variables. */
879 if (TREE_CODE (decl
) == VAR_DECL
880 && GET_CODE (DECL_RTL (decl
)) == MEM
881 && GET_CODE (XEXP (DECL_RTL (decl
), 0)) == SYMBOL_REF
882 && (TREE_STATIC (decl
) || DECL_EXTERNAL (decl
)))
883 ia64_encode_addr_area (decl
, XEXP (rtl
, 0));
886 /* Return 1 if the operands of a move are ok. */
889 ia64_move_ok (rtx dst
, rtx src
)
891 /* If we're under init_recog_no_volatile, we'll not be able to use
892 memory_operand. So check the code directly and don't worry about
893 the validity of the underlying address, which should have been
894 checked elsewhere anyway. */
895 if (GET_CODE (dst
) != MEM
)
897 if (GET_CODE (src
) == MEM
)
899 if (register_operand (src
, VOIDmode
))
902 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
903 if (INTEGRAL_MODE_P (GET_MODE (dst
)))
904 return src
== const0_rtx
;
906 return satisfies_constraint_G (src
);
909 /* Return 1 if the operands are ok for a floating point load pair. */
912 ia64_load_pair_ok (rtx dst
, rtx src
)
914 /* ??? There is a thinko in the implementation of the "x" constraint and the
915 FP_REGS class. The constraint will also reject (reg f30:TI) so we must
916 also return false for it. */
917 if (GET_CODE (dst
) != REG
918 || !(FP_REGNO_P (REGNO (dst
)) && FP_REGNO_P (REGNO (dst
) + 1)))
920 if (GET_CODE (src
) != MEM
|| MEM_VOLATILE_P (src
))
922 switch (GET_CODE (XEXP (src
, 0)))
931 rtx adjust
= XEXP (XEXP (XEXP (src
, 0), 1), 1);
933 if (GET_CODE (adjust
) != CONST_INT
934 || INTVAL (adjust
) != GET_MODE_SIZE (GET_MODE (src
)))
945 addp4_optimize_ok (rtx op1
, rtx op2
)
947 return (basereg_operand (op1
, GET_MODE(op1
)) !=
948 basereg_operand (op2
, GET_MODE(op2
)));
951 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
952 Return the length of the field, or <= 0 on failure. */
955 ia64_depz_field_mask (rtx rop
, rtx rshift
)
957 unsigned HOST_WIDE_INT op
= INTVAL (rop
);
958 unsigned HOST_WIDE_INT shift
= INTVAL (rshift
);
960 /* Get rid of the zero bits we're shifting in. */
963 /* We must now have a solid block of 1's at bit 0. */
964 return exact_log2 (op
+ 1);
967 /* Return the TLS model to use for ADDR. */
969 static enum tls_model
970 tls_symbolic_operand_type (rtx addr
)
972 enum tls_model tls_kind
= TLS_MODEL_NONE
;
974 if (GET_CODE (addr
) == CONST
)
976 if (GET_CODE (XEXP (addr
, 0)) == PLUS
977 && GET_CODE (XEXP (XEXP (addr
, 0), 0)) == SYMBOL_REF
)
978 tls_kind
= SYMBOL_REF_TLS_MODEL (XEXP (XEXP (addr
, 0), 0));
980 else if (GET_CODE (addr
) == SYMBOL_REF
)
981 tls_kind
= SYMBOL_REF_TLS_MODEL (addr
);
986 /* Returns true if REG (assumed to be a `reg' RTX) is valid for use
987 as a base register. */
990 ia64_reg_ok_for_base_p (const_rtx reg
, bool strict
)
993 && REGNO_OK_FOR_BASE_P (REGNO (reg
)))
996 && (GENERAL_REGNO_P (REGNO (reg
))
997 || !HARD_REGISTER_P (reg
)))
1004 ia64_legitimate_address_reg (const_rtx reg
, bool strict
)
1006 if ((REG_P (reg
) && ia64_reg_ok_for_base_p (reg
, strict
))
1007 || (GET_CODE (reg
) == SUBREG
&& REG_P (XEXP (reg
, 0))
1008 && ia64_reg_ok_for_base_p (XEXP (reg
, 0), strict
)))
1015 ia64_legitimate_address_disp (const_rtx reg
, const_rtx disp
, bool strict
)
1017 if (GET_CODE (disp
) == PLUS
1018 && rtx_equal_p (reg
, XEXP (disp
, 0))
1019 && (ia64_legitimate_address_reg (XEXP (disp
, 1), strict
)
1020 || (CONST_INT_P (XEXP (disp
, 1))
1021 && IN_RANGE (INTVAL (XEXP (disp
, 1)), -256, 255))))
1027 /* Implement TARGET_LEGITIMATE_ADDRESS_P. */
1030 ia64_legitimate_address_p (machine_mode mode ATTRIBUTE_UNUSED
,
1033 if (ia64_legitimate_address_reg (x
, strict
))
1035 else if ((GET_CODE (x
) == POST_INC
|| GET_CODE (x
) == POST_DEC
)
1036 && ia64_legitimate_address_reg (XEXP (x
, 0), strict
)
1037 && XEXP (x
, 0) != arg_pointer_rtx
)
1039 else if (GET_CODE (x
) == POST_MODIFY
1040 && ia64_legitimate_address_reg (XEXP (x
, 0), strict
)
1041 && XEXP (x
, 0) != arg_pointer_rtx
1042 && ia64_legitimate_address_disp (XEXP (x
, 0), XEXP (x
, 1), strict
))
1048 /* Return true if X is a constant that is valid for some immediate
1049 field in an instruction. */
1052 ia64_legitimate_constant_p (machine_mode mode
, rtx x
)
1054 switch (GET_CODE (x
))
1061 if (GET_MODE (x
) == VOIDmode
|| mode
== SFmode
|| mode
== DFmode
)
1063 return satisfies_constraint_G (x
);
1067 /* ??? Short term workaround for PR 28490. We must make the code here
1068 match the code in ia64_expand_move and move_operand, even though they
1069 are both technically wrong. */
1070 if (tls_symbolic_operand_type (x
) == 0)
1072 HOST_WIDE_INT addend
= 0;
1075 if (GET_CODE (op
) == CONST
1076 && GET_CODE (XEXP (op
, 0)) == PLUS
1077 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == CONST_INT
)
1079 addend
= INTVAL (XEXP (XEXP (op
, 0), 1));
1080 op
= XEXP (XEXP (op
, 0), 0);
1083 if (any_offset_symbol_operand (op
, mode
)
1084 || function_operand (op
, mode
))
1086 if (aligned_offset_symbol_operand (op
, mode
))
1087 return (addend
& 0x3fff) == 0;
1093 if (mode
== V2SFmode
)
1094 return satisfies_constraint_Y (x
);
1096 return (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
1097 && GET_MODE_SIZE (mode
) <= 8);
1104 /* Don't allow TLS addresses to get spilled to memory. */
1107 ia64_cannot_force_const_mem (machine_mode mode
, rtx x
)
1111 return tls_symbolic_operand_type (x
) != 0;
1114 /* Expand a symbolic constant load. */
1117 ia64_expand_load_address (rtx dest
, rtx src
)
1119 gcc_assert (GET_CODE (dest
) == REG
);
1121 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
1122 having to pointer-extend the value afterward. Other forms of address
1123 computation below are also more natural to compute as 64-bit quantities.
1124 If we've been given an SImode destination register, change it. */
1125 if (GET_MODE (dest
) != Pmode
)
1126 dest
= gen_rtx_REG_offset (dest
, Pmode
, REGNO (dest
),
1127 byte_lowpart_offset (Pmode
, GET_MODE (dest
)));
1131 if (small_addr_symbolic_operand (src
, VOIDmode
))
1134 if (TARGET_AUTO_PIC
)
1135 emit_insn (gen_load_gprel64 (dest
, src
));
1136 else if (GET_CODE (src
) == SYMBOL_REF
&& SYMBOL_REF_FUNCTION_P (src
))
1137 emit_insn (gen_load_fptr (dest
, src
));
1138 else if (sdata_symbolic_operand (src
, VOIDmode
))
1139 emit_insn (gen_load_gprel (dest
, src
));
1140 else if (local_symbolic_operand64 (src
, VOIDmode
))
1142 /* We want to use @gprel rather than @ltoff relocations for local
1144 - @gprel does not require dynamic linker
1145 - and does not use .sdata section
1146 https://gcc.gnu.org/bugzilla/60465 */
1147 emit_insn (gen_load_gprel64 (dest
, src
));
1151 HOST_WIDE_INT addend
= 0;
1154 /* We did split constant offsets in ia64_expand_move, and we did try
1155 to keep them split in move_operand, but we also allowed reload to
1156 rematerialize arbitrary constants rather than spill the value to
1157 the stack and reload it. So we have to be prepared here to split
1158 them apart again. */
1159 if (GET_CODE (src
) == CONST
)
1161 HOST_WIDE_INT hi
, lo
;
1163 hi
= INTVAL (XEXP (XEXP (src
, 0), 1));
1164 lo
= ((hi
& 0x3fff) ^ 0x2000) - 0x2000;
1170 src
= plus_constant (Pmode
, XEXP (XEXP (src
, 0), 0), hi
);
1174 tmp
= gen_rtx_HIGH (Pmode
, src
);
1175 tmp
= gen_rtx_PLUS (Pmode
, tmp
, pic_offset_table_rtx
);
1176 emit_insn (gen_rtx_SET (dest
, tmp
));
1178 tmp
= gen_rtx_LO_SUM (Pmode
, gen_const_mem (Pmode
, dest
), src
);
1179 emit_insn (gen_rtx_SET (dest
, tmp
));
1183 tmp
= gen_rtx_PLUS (Pmode
, dest
, GEN_INT (addend
));
1184 emit_insn (gen_rtx_SET (dest
, tmp
));
1191 static GTY(()) rtx gen_tls_tga
;
1193 gen_tls_get_addr (void)
1196 gen_tls_tga
= init_one_libfunc ("__tls_get_addr");
1200 static GTY(()) rtx thread_pointer_rtx
;
1202 gen_thread_pointer (void)
1204 if (!thread_pointer_rtx
)
1205 thread_pointer_rtx
= gen_rtx_REG (Pmode
, 13);
1206 return thread_pointer_rtx
;
1210 ia64_expand_tls_address (enum tls_model tls_kind
, rtx op0
, rtx op1
,
1211 rtx orig_op1
, HOST_WIDE_INT addend
)
1213 rtx tga_op1
, tga_op2
, tga_ret
, tga_eqv
, tmp
;
1216 HOST_WIDE_INT addend_lo
, addend_hi
;
1220 case TLS_MODEL_GLOBAL_DYNAMIC
:
1223 tga_op1
= gen_reg_rtx (Pmode
);
1224 emit_insn (gen_load_dtpmod (tga_op1
, op1
));
1226 tga_op2
= gen_reg_rtx (Pmode
);
1227 emit_insn (gen_load_dtprel (tga_op2
, op1
));
1229 tga_ret
= emit_library_call_value (gen_tls_get_addr (), NULL_RTX
,
1231 tga_op1
, Pmode
, tga_op2
, Pmode
);
1233 insns
= get_insns ();
1236 if (GET_MODE (op0
) != Pmode
)
1238 emit_libcall_block (insns
, op0
, tga_ret
, op1
);
1241 case TLS_MODEL_LOCAL_DYNAMIC
:
1242 /* ??? This isn't the completely proper way to do local-dynamic
1243 If the call to __tls_get_addr is used only by a single symbol,
1244 then we should (somehow) move the dtprel to the second arg
1245 to avoid the extra add. */
1248 tga_op1
= gen_reg_rtx (Pmode
);
1249 emit_insn (gen_load_dtpmod (tga_op1
, op1
));
1251 tga_op2
= const0_rtx
;
1253 tga_ret
= emit_library_call_value (gen_tls_get_addr (), NULL_RTX
,
1255 tga_op1
, Pmode
, tga_op2
, Pmode
);
1257 insns
= get_insns ();
1260 tga_eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
1262 tmp
= gen_reg_rtx (Pmode
);
1263 emit_libcall_block (insns
, tmp
, tga_ret
, tga_eqv
);
1265 if (!register_operand (op0
, Pmode
))
1266 op0
= gen_reg_rtx (Pmode
);
1269 emit_insn (gen_load_dtprel (op0
, op1
));
1270 emit_insn (gen_adddi3 (op0
, tmp
, op0
));
1273 emit_insn (gen_add_dtprel (op0
, op1
, tmp
));
1276 case TLS_MODEL_INITIAL_EXEC
:
1277 addend_lo
= ((addend
& 0x3fff) ^ 0x2000) - 0x2000;
1278 addend_hi
= addend
- addend_lo
;
1280 op1
= plus_constant (Pmode
, op1
, addend_hi
);
1283 tmp
= gen_reg_rtx (Pmode
);
1284 emit_insn (gen_load_tprel (tmp
, op1
));
1286 if (!register_operand (op0
, Pmode
))
1287 op0
= gen_reg_rtx (Pmode
);
1288 emit_insn (gen_adddi3 (op0
, tmp
, gen_thread_pointer ()));
1291 case TLS_MODEL_LOCAL_EXEC
:
1292 if (!register_operand (op0
, Pmode
))
1293 op0
= gen_reg_rtx (Pmode
);
1299 emit_insn (gen_load_tprel (op0
, op1
));
1300 emit_insn (gen_adddi3 (op0
, op0
, gen_thread_pointer ()));
1303 emit_insn (gen_add_tprel (op0
, op1
, gen_thread_pointer ()));
1311 op0
= expand_simple_binop (Pmode
, PLUS
, op0
, GEN_INT (addend
),
1312 orig_op0
, 1, OPTAB_DIRECT
);
1313 if (orig_op0
== op0
)
1315 if (GET_MODE (orig_op0
) == Pmode
)
1317 return gen_lowpart (GET_MODE (orig_op0
), op0
);
1321 ia64_expand_move (rtx op0
, rtx op1
)
1323 machine_mode mode
= GET_MODE (op0
);
1325 if (!reload_in_progress
&& !reload_completed
&& !ia64_move_ok (op0
, op1
))
1326 op1
= force_reg (mode
, op1
);
1328 if ((mode
== Pmode
|| mode
== ptr_mode
) && symbolic_operand (op1
, VOIDmode
))
1330 HOST_WIDE_INT addend
= 0;
1331 enum tls_model tls_kind
;
1334 if (GET_CODE (op1
) == CONST
1335 && GET_CODE (XEXP (op1
, 0)) == PLUS
1336 && GET_CODE (XEXP (XEXP (op1
, 0), 1)) == CONST_INT
)
1338 addend
= INTVAL (XEXP (XEXP (op1
, 0), 1));
1339 sym
= XEXP (XEXP (op1
, 0), 0);
1342 tls_kind
= tls_symbolic_operand_type (sym
);
1344 return ia64_expand_tls_address (tls_kind
, op0
, sym
, op1
, addend
);
1346 if (any_offset_symbol_operand (sym
, mode
))
1348 else if (aligned_offset_symbol_operand (sym
, mode
))
1350 HOST_WIDE_INT addend_lo
, addend_hi
;
1352 addend_lo
= ((addend
& 0x3fff) ^ 0x2000) - 0x2000;
1353 addend_hi
= addend
- addend_lo
;
1357 op1
= plus_constant (mode
, sym
, addend_hi
);
1366 if (reload_completed
)
1368 /* We really should have taken care of this offset earlier. */
1369 gcc_assert (addend
== 0);
1370 if (ia64_expand_load_address (op0
, op1
))
1376 rtx subtarget
= !can_create_pseudo_p () ? op0
: gen_reg_rtx (mode
);
1378 emit_insn (gen_rtx_SET (subtarget
, op1
));
1380 op1
= expand_simple_binop (mode
, PLUS
, subtarget
,
1381 GEN_INT (addend
), op0
, 1, OPTAB_DIRECT
);
1390 /* Split a move from OP1 to OP0 conditional on COND. */
1393 ia64_emit_cond_move (rtx op0
, rtx op1
, rtx cond
)
1395 rtx_insn
*insn
, *first
= get_last_insn ();
1397 emit_move_insn (op0
, op1
);
1399 for (insn
= get_last_insn (); insn
!= first
; insn
= PREV_INSN (insn
))
1401 PATTERN (insn
) = gen_rtx_COND_EXEC (VOIDmode
, copy_rtx (cond
),
1405 /* Split a post-reload TImode or TFmode reference into two DImode
1406 components. This is made extra difficult by the fact that we do
1407 not get any scratch registers to work with, because reload cannot
1408 be prevented from giving us a scratch that overlaps the register
1409 pair involved. So instead, when addressing memory, we tweak the
1410 pointer register up and back down with POST_INCs. Or up and not
1411 back down when we can get away with it.
1413 REVERSED is true when the loads must be done in reversed order
1414 (high word first) for correctness. DEAD is true when the pointer
1415 dies with the second insn we generate and therefore the second
1416 address must not carry a postmodify.
1418 May return an insn which is to be emitted after the moves. */
1421 ia64_split_tmode (rtx out
[2], rtx in
, bool reversed
, bool dead
)
1425 switch (GET_CODE (in
))
1428 out
[reversed
] = gen_rtx_REG (DImode
, REGNO (in
));
1429 out
[!reversed
] = gen_rtx_REG (DImode
, REGNO (in
) + 1);
1434 /* Cannot occur reversed. */
1435 gcc_assert (!reversed
);
1437 if (GET_MODE (in
) != TFmode
)
1438 split_double (in
, &out
[0], &out
[1]);
1440 /* split_double does not understand how to split a TFmode
1441 quantity into a pair of DImode constants. */
1443 unsigned HOST_WIDE_INT p
[2];
1444 long l
[4]; /* TFmode is 128 bits */
1446 real_to_target (l
, CONST_DOUBLE_REAL_VALUE (in
), TFmode
);
1448 if (FLOAT_WORDS_BIG_ENDIAN
)
1450 p
[0] = (((unsigned HOST_WIDE_INT
) l
[0]) << 32) + l
[1];
1451 p
[1] = (((unsigned HOST_WIDE_INT
) l
[2]) << 32) + l
[3];
1455 p
[0] = (((unsigned HOST_WIDE_INT
) l
[1]) << 32) + l
[0];
1456 p
[1] = (((unsigned HOST_WIDE_INT
) l
[3]) << 32) + l
[2];
1458 out
[0] = GEN_INT (p
[0]);
1459 out
[1] = GEN_INT (p
[1]);
1465 rtx base
= XEXP (in
, 0);
1468 switch (GET_CODE (base
))
1473 out
[0] = adjust_automodify_address
1474 (in
, DImode
, gen_rtx_POST_INC (Pmode
, base
), 0);
1475 out
[1] = adjust_automodify_address
1476 (in
, DImode
, dead
? 0 : gen_rtx_POST_DEC (Pmode
, base
), 8);
1480 /* Reversal requires a pre-increment, which can only
1481 be done as a separate insn. */
1482 emit_insn (gen_adddi3 (base
, base
, GEN_INT (8)));
1483 out
[0] = adjust_automodify_address
1484 (in
, DImode
, gen_rtx_POST_DEC (Pmode
, base
), 8);
1485 out
[1] = adjust_address (in
, DImode
, 0);
1490 gcc_assert (!reversed
&& !dead
);
1492 /* Just do the increment in two steps. */
1493 out
[0] = adjust_automodify_address (in
, DImode
, 0, 0);
1494 out
[1] = adjust_automodify_address (in
, DImode
, 0, 8);
1498 gcc_assert (!reversed
&& !dead
);
1500 /* Add 8, subtract 24. */
1501 base
= XEXP (base
, 0);
1502 out
[0] = adjust_automodify_address
1503 (in
, DImode
, gen_rtx_POST_INC (Pmode
, base
), 0);
1504 out
[1] = adjust_automodify_address
1506 gen_rtx_POST_MODIFY (Pmode
, base
,
1507 plus_constant (Pmode
, base
, -24)),
1512 gcc_assert (!reversed
&& !dead
);
1514 /* Extract and adjust the modification. This case is
1515 trickier than the others, because we might have an
1516 index register, or we might have a combined offset that
1517 doesn't fit a signed 9-bit displacement field. We can
1518 assume the incoming expression is already legitimate. */
1519 offset
= XEXP (base
, 1);
1520 base
= XEXP (base
, 0);
1522 out
[0] = adjust_automodify_address
1523 (in
, DImode
, gen_rtx_POST_INC (Pmode
, base
), 0);
1525 if (GET_CODE (XEXP (offset
, 1)) == REG
)
1527 /* Can't adjust the postmodify to match. Emit the
1528 original, then a separate addition insn. */
1529 out
[1] = adjust_automodify_address (in
, DImode
, 0, 8);
1530 fixup
= gen_adddi3 (base
, base
, GEN_INT (-8));
1534 gcc_assert (GET_CODE (XEXP (offset
, 1)) == CONST_INT
);
1535 if (INTVAL (XEXP (offset
, 1)) < -256 + 8)
1537 /* Again the postmodify cannot be made to match,
1538 but in this case it's more efficient to get rid
1539 of the postmodify entirely and fix up with an
1541 out
[1] = adjust_automodify_address (in
, DImode
, base
, 8);
1543 (base
, base
, GEN_INT (INTVAL (XEXP (offset
, 1)) - 8));
1547 /* Combined offset still fits in the displacement field.
1548 (We cannot overflow it at the high end.) */
1549 out
[1] = adjust_automodify_address
1550 (in
, DImode
, gen_rtx_POST_MODIFY
1551 (Pmode
, base
, gen_rtx_PLUS
1553 GEN_INT (INTVAL (XEXP (offset
, 1)) - 8))),
1572 /* Split a TImode or TFmode move instruction after reload.
1573 This is used by *movtf_internal and *movti_internal. */
1575 ia64_split_tmode_move (rtx operands
[])
1577 rtx in
[2], out
[2], insn
;
1580 bool reversed
= false;
1582 /* It is possible for reload to decide to overwrite a pointer with
1583 the value it points to. In that case we have to do the loads in
1584 the appropriate order so that the pointer is not destroyed too
1585 early. Also we must not generate a postmodify for that second
1586 load, or rws_access_regno will die. And we must not generate a
1587 postmodify for the second load if the destination register
1588 overlaps with the base register. */
1589 if (GET_CODE (operands
[1]) == MEM
1590 && reg_overlap_mentioned_p (operands
[0], operands
[1]))
1592 rtx base
= XEXP (operands
[1], 0);
1593 while (GET_CODE (base
) != REG
)
1594 base
= XEXP (base
, 0);
1596 if (REGNO (base
) == REGNO (operands
[0]))
1599 if (refers_to_regno_p (REGNO (operands
[0]),
1600 REGNO (operands
[0])+2,
1604 /* Another reason to do the moves in reversed order is if the first
1605 element of the target register pair is also the second element of
1606 the source register pair. */
1607 if (GET_CODE (operands
[0]) == REG
&& GET_CODE (operands
[1]) == REG
1608 && REGNO (operands
[0]) == REGNO (operands
[1]) + 1)
1611 fixup
[0] = ia64_split_tmode (in
, operands
[1], reversed
, dead
);
1612 fixup
[1] = ia64_split_tmode (out
, operands
[0], reversed
, dead
);
1614 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1615 if (GET_CODE (EXP) == MEM \
1616 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1617 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1618 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1619 add_reg_note (insn, REG_INC, XEXP (XEXP (EXP, 0), 0))
1621 insn
= emit_insn (gen_rtx_SET (out
[0], in
[0]));
1622 MAYBE_ADD_REG_INC_NOTE (insn
, in
[0]);
1623 MAYBE_ADD_REG_INC_NOTE (insn
, out
[0]);
1625 insn
= emit_insn (gen_rtx_SET (out
[1], in
[1]));
1626 MAYBE_ADD_REG_INC_NOTE (insn
, in
[1]);
1627 MAYBE_ADD_REG_INC_NOTE (insn
, out
[1]);
1630 emit_insn (fixup
[0]);
1632 emit_insn (fixup
[1]);
1634 #undef MAYBE_ADD_REG_INC_NOTE
1637 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1638 through memory plus an extra GR scratch register. Except that you can
1639 either get the first from TARGET_SECONDARY_MEMORY_NEEDED or the second
1640 from SECONDARY_RELOAD_CLASS, but not both.
1642 We got into problems in the first place by allowing a construct like
1643 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1644 This solution attempts to prevent this situation from occurring. When
1645 we see something like the above, we spill the inner register to memory. */
1648 spill_xfmode_rfmode_operand (rtx in
, int force
, machine_mode mode
)
1650 if (GET_CODE (in
) == SUBREG
1651 && GET_MODE (SUBREG_REG (in
)) == TImode
1652 && GET_CODE (SUBREG_REG (in
)) == REG
)
1654 rtx memt
= assign_stack_temp (TImode
, 16);
1655 emit_move_insn (memt
, SUBREG_REG (in
));
1656 return adjust_address (memt
, mode
, 0);
1658 else if (force
&& GET_CODE (in
) == REG
)
1660 rtx memx
= assign_stack_temp (mode
, 16);
1661 emit_move_insn (memx
, in
);
1668 /* Expand the movxf or movrf pattern (MODE says which) with the given
1669 OPERANDS, returning true if the pattern should then invoke
1673 ia64_expand_movxf_movrf (machine_mode mode
, rtx operands
[])
1675 rtx op0
= operands
[0];
1677 if (GET_CODE (op0
) == SUBREG
)
1678 op0
= SUBREG_REG (op0
);
1680 /* We must support XFmode loads into general registers for stdarg/vararg,
1681 unprototyped calls, and a rare case where a long double is passed as
1682 an argument after a float HFA fills the FP registers. We split them into
1683 DImode loads for convenience. We also need to support XFmode stores
1684 for the last case. This case does not happen for stdarg/vararg routines,
1685 because we do a block store to memory of unnamed arguments. */
1687 if (GET_CODE (op0
) == REG
&& GR_REGNO_P (REGNO (op0
)))
1691 /* We're hoping to transform everything that deals with XFmode
1692 quantities and GR registers early in the compiler. */
1693 gcc_assert (can_create_pseudo_p ());
1695 /* Struct to register can just use TImode instead. */
1696 if ((GET_CODE (operands
[1]) == SUBREG
1697 && GET_MODE (SUBREG_REG (operands
[1])) == TImode
)
1698 || (GET_CODE (operands
[1]) == REG
1699 && GR_REGNO_P (REGNO (operands
[1]))))
1701 rtx op1
= operands
[1];
1703 if (GET_CODE (op1
) == SUBREG
)
1704 op1
= SUBREG_REG (op1
);
1706 op1
= gen_rtx_REG (TImode
, REGNO (op1
));
1708 emit_move_insn (gen_rtx_REG (TImode
, REGNO (op0
)), op1
);
1712 if (GET_CODE (operands
[1]) == CONST_DOUBLE
)
1714 /* Don't word-swap when reading in the constant. */
1715 emit_move_insn (gen_rtx_REG (DImode
, REGNO (op0
)),
1716 operand_subword (operands
[1], WORDS_BIG_ENDIAN
,
1718 emit_move_insn (gen_rtx_REG (DImode
, REGNO (op0
) + 1),
1719 operand_subword (operands
[1], !WORDS_BIG_ENDIAN
,
1724 /* If the quantity is in a register not known to be GR, spill it. */
1725 if (register_operand (operands
[1], mode
))
1726 operands
[1] = spill_xfmode_rfmode_operand (operands
[1], 1, mode
);
1728 gcc_assert (GET_CODE (operands
[1]) == MEM
);
1730 /* Don't word-swap when reading in the value. */
1731 out
[0] = gen_rtx_REG (DImode
, REGNO (op0
));
1732 out
[1] = gen_rtx_REG (DImode
, REGNO (op0
) + 1);
1734 emit_move_insn (out
[0], adjust_address (operands
[1], DImode
, 0));
1735 emit_move_insn (out
[1], adjust_address (operands
[1], DImode
, 8));
1739 if (GET_CODE (operands
[1]) == REG
&& GR_REGNO_P (REGNO (operands
[1])))
1741 /* We're hoping to transform everything that deals with XFmode
1742 quantities and GR registers early in the compiler. */
1743 gcc_assert (can_create_pseudo_p ());
1745 /* Op0 can't be a GR_REG here, as that case is handled above.
1746 If op0 is a register, then we spill op1, so that we now have a
1747 MEM operand. This requires creating an XFmode subreg of a TImode reg
1748 to force the spill. */
1749 if (register_operand (operands
[0], mode
))
1751 rtx op1
= gen_rtx_REG (TImode
, REGNO (operands
[1]));
1752 op1
= gen_rtx_SUBREG (mode
, op1
, 0);
1753 operands
[1] = spill_xfmode_rfmode_operand (op1
, 0, mode
);
1760 gcc_assert (GET_CODE (operands
[0]) == MEM
);
1762 /* Don't word-swap when writing out the value. */
1763 in
[0] = gen_rtx_REG (DImode
, REGNO (operands
[1]));
1764 in
[1] = gen_rtx_REG (DImode
, REGNO (operands
[1]) + 1);
1766 emit_move_insn (adjust_address (operands
[0], DImode
, 0), in
[0]);
1767 emit_move_insn (adjust_address (operands
[0], DImode
, 8), in
[1]);
1772 if (!reload_in_progress
&& !reload_completed
)
1774 operands
[1] = spill_xfmode_rfmode_operand (operands
[1], 0, mode
);
1776 if (GET_MODE (op0
) == TImode
&& GET_CODE (op0
) == REG
)
1778 rtx memt
, memx
, in
= operands
[1];
1779 if (CONSTANT_P (in
))
1780 in
= validize_mem (force_const_mem (mode
, in
));
1781 if (GET_CODE (in
) == MEM
)
1782 memt
= adjust_address (in
, TImode
, 0);
1785 memt
= assign_stack_temp (TImode
, 16);
1786 memx
= adjust_address (memt
, mode
, 0);
1787 emit_move_insn (memx
, in
);
1789 emit_move_insn (op0
, memt
);
1793 if (!ia64_move_ok (operands
[0], operands
[1]))
1794 operands
[1] = force_reg (mode
, operands
[1]);
1800 /* Emit comparison instruction if necessary, replacing *EXPR, *OP0, *OP1
1801 with the expression that holds the compare result (in VOIDmode). */
1803 static GTY(()) rtx cmptf_libfunc
;
1806 ia64_expand_compare (rtx
*expr
, rtx
*op0
, rtx
*op1
)
1808 enum rtx_code code
= GET_CODE (*expr
);
1811 /* If we have a BImode input, then we already have a compare result, and
1812 do not need to emit another comparison. */
1813 if (GET_MODE (*op0
) == BImode
)
1815 gcc_assert ((code
== NE
|| code
== EQ
) && *op1
== const0_rtx
);
1818 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1819 magic number as its third argument, that indicates what to do.
1820 The return value is an integer to be compared against zero. */
1821 else if (TARGET_HPUX
&& GET_MODE (*op0
) == TFmode
)
1824 QCMP_INV
= 1, /* Raise FP_INVALID on NaNs as a side effect. */
1831 enum rtx_code ncode
;
1834 gcc_assert (cmptf_libfunc
&& GET_MODE (*op1
) == TFmode
);
1837 /* 1 = equal, 0 = not equal. Equality operators do
1838 not raise FP_INVALID when given a NaN operand. */
1839 case EQ
: magic
= QCMP_EQ
; ncode
= NE
; break;
1840 case NE
: magic
= QCMP_EQ
; ncode
= EQ
; break;
1841 /* isunordered() from C99. */
1842 case UNORDERED
: magic
= QCMP_UNORD
; ncode
= NE
; break;
1843 case ORDERED
: magic
= QCMP_UNORD
; ncode
= EQ
; break;
1844 /* Relational operators raise FP_INVALID when given
1846 case LT
: magic
= QCMP_LT
|QCMP_INV
; ncode
= NE
; break;
1847 case LE
: magic
= QCMP_LT
|QCMP_EQ
|QCMP_INV
; ncode
= NE
; break;
1848 case GT
: magic
= QCMP_GT
|QCMP_INV
; ncode
= NE
; break;
1849 case GE
: magic
= QCMP_GT
|QCMP_EQ
|QCMP_INV
; ncode
= NE
; break;
1850 /* Unordered relational operators do not raise FP_INVALID
1851 when given a NaN operand. */
1852 case UNLT
: magic
= QCMP_LT
|QCMP_UNORD
; ncode
= NE
; break;
1853 case UNLE
: magic
= QCMP_LT
|QCMP_EQ
|QCMP_UNORD
; ncode
= NE
; break;
1854 case UNGT
: magic
= QCMP_GT
|QCMP_UNORD
; ncode
= NE
; break;
1855 case UNGE
: magic
= QCMP_GT
|QCMP_EQ
|QCMP_UNORD
; ncode
= NE
; break;
1856 /* Not supported. */
1859 default: gcc_unreachable ();
1864 ret
= emit_library_call_value (cmptf_libfunc
, 0, LCT_CONST
, DImode
,
1865 *op0
, TFmode
, *op1
, TFmode
,
1866 GEN_INT (magic
), DImode
);
1867 cmp
= gen_reg_rtx (BImode
);
1868 emit_insn (gen_rtx_SET (cmp
, gen_rtx_fmt_ee (ncode
, BImode
,
1871 rtx_insn
*insns
= get_insns ();
1874 emit_libcall_block (insns
, cmp
, cmp
,
1875 gen_rtx_fmt_ee (code
, BImode
, *op0
, *op1
));
1880 cmp
= gen_reg_rtx (BImode
);
1881 emit_insn (gen_rtx_SET (cmp
, gen_rtx_fmt_ee (code
, BImode
, *op0
, *op1
)));
1885 *expr
= gen_rtx_fmt_ee (code
, VOIDmode
, cmp
, const0_rtx
);
1890 /* Generate an integral vector comparison. Return true if the condition has
1891 been reversed, and so the sense of the comparison should be inverted. */
1894 ia64_expand_vecint_compare (enum rtx_code code
, machine_mode mode
,
1895 rtx dest
, rtx op0
, rtx op1
)
1897 bool negate
= false;
1900 /* Canonicalize the comparison to EQ, GT, GTU. */
1911 code
= reverse_condition (code
);
1917 code
= reverse_condition (code
);
1923 code
= swap_condition (code
);
1924 x
= op0
, op0
= op1
, op1
= x
;
1931 /* Unsigned parallel compare is not supported by the hardware. Play some
1932 tricks to turn this into a signed comparison against 0. */
1941 /* Subtract (-(INT MAX) - 1) from both operands to make
1943 mask
= gen_int_mode (0x80000000, SImode
);
1944 mask
= gen_const_vec_duplicate (V2SImode
, mask
);
1945 mask
= force_reg (mode
, mask
);
1946 t1
= gen_reg_rtx (mode
);
1947 emit_insn (gen_subv2si3 (t1
, op0
, mask
));
1948 t2
= gen_reg_rtx (mode
);
1949 emit_insn (gen_subv2si3 (t2
, op1
, mask
));
1958 /* Perform a parallel unsigned saturating subtraction. */
1959 x
= gen_reg_rtx (mode
);
1960 emit_insn (gen_rtx_SET (x
, gen_rtx_US_MINUS (mode
, op0
, op1
)));
1964 op1
= CONST0_RTX (mode
);
1973 x
= gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
1974 emit_insn (gen_rtx_SET (dest
, x
));
1979 /* Emit an integral vector conditional move. */
1982 ia64_expand_vecint_cmov (rtx operands
[])
1984 machine_mode mode
= GET_MODE (operands
[0]);
1985 enum rtx_code code
= GET_CODE (operands
[3]);
1989 cmp
= gen_reg_rtx (mode
);
1990 negate
= ia64_expand_vecint_compare (code
, mode
, cmp
,
1991 operands
[4], operands
[5]);
1993 ot
= operands
[1+negate
];
1994 of
= operands
[2-negate
];
1996 if (ot
== CONST0_RTX (mode
))
1998 if (of
== CONST0_RTX (mode
))
2000 emit_move_insn (operands
[0], ot
);
2004 x
= gen_rtx_NOT (mode
, cmp
);
2005 x
= gen_rtx_AND (mode
, x
, of
);
2006 emit_insn (gen_rtx_SET (operands
[0], x
));
2008 else if (of
== CONST0_RTX (mode
))
2010 x
= gen_rtx_AND (mode
, cmp
, ot
);
2011 emit_insn (gen_rtx_SET (operands
[0], x
));
2017 t
= gen_reg_rtx (mode
);
2018 x
= gen_rtx_AND (mode
, cmp
, operands
[1+negate
]);
2019 emit_insn (gen_rtx_SET (t
, x
));
2021 f
= gen_reg_rtx (mode
);
2022 x
= gen_rtx_NOT (mode
, cmp
);
2023 x
= gen_rtx_AND (mode
, x
, operands
[2-negate
]);
2024 emit_insn (gen_rtx_SET (f
, x
));
2026 x
= gen_rtx_IOR (mode
, t
, f
);
2027 emit_insn (gen_rtx_SET (operands
[0], x
));
2031 /* Emit an integral vector min or max operation. Return true if all done. */
2034 ia64_expand_vecint_minmax (enum rtx_code code
, machine_mode mode
,
2039 /* These four combinations are supported directly. */
2040 if (mode
== V8QImode
&& (code
== UMIN
|| code
== UMAX
))
2042 if (mode
== V4HImode
&& (code
== SMIN
|| code
== SMAX
))
2045 /* This combination can be implemented with only saturating subtraction. */
2046 if (mode
== V4HImode
&& code
== UMAX
)
2048 rtx x
, tmp
= gen_reg_rtx (mode
);
2050 x
= gen_rtx_US_MINUS (mode
, operands
[1], operands
[2]);
2051 emit_insn (gen_rtx_SET (tmp
, x
));
2053 emit_insn (gen_addv4hi3 (operands
[0], tmp
, operands
[2]));
2057 /* Everything else implemented via vector comparisons. */
2058 xops
[0] = operands
[0];
2059 xops
[4] = xops
[1] = operands
[1];
2060 xops
[5] = xops
[2] = operands
[2];
2079 xops
[3] = gen_rtx_fmt_ee (code
, VOIDmode
, operands
[1], operands
[2]);
2081 ia64_expand_vecint_cmov (xops
);
2085 /* The vectors LO and HI each contain N halves of a double-wide vector.
2086 Reassemble either the first N/2 or the second N/2 elements. */
2089 ia64_unpack_assemble (rtx out
, rtx lo
, rtx hi
, bool highp
)
2091 machine_mode vmode
= GET_MODE (lo
);
2092 unsigned int i
, high
, nelt
= GET_MODE_NUNITS (vmode
);
2093 struct expand_vec_perm_d d
;
2096 d
.target
= gen_lowpart (vmode
, out
);
2097 d
.op0
= (TARGET_BIG_ENDIAN
? hi
: lo
);
2098 d
.op1
= (TARGET_BIG_ENDIAN
? lo
: hi
);
2101 d
.one_operand_p
= false;
2102 d
.testing_p
= false;
2104 high
= (highp
? nelt
/ 2 : 0);
2105 for (i
= 0; i
< nelt
/ 2; ++i
)
2107 d
.perm
[i
* 2] = i
+ high
;
2108 d
.perm
[i
* 2 + 1] = i
+ high
+ nelt
;
2111 ok
= ia64_expand_vec_perm_const_1 (&d
);
2115 /* Return a vector of the sign-extension of VEC. */
2118 ia64_unpack_sign (rtx vec
, bool unsignedp
)
2120 machine_mode mode
= GET_MODE (vec
);
2121 rtx zero
= CONST0_RTX (mode
);
2127 rtx sign
= gen_reg_rtx (mode
);
2130 neg
= ia64_expand_vecint_compare (LT
, mode
, sign
, vec
, zero
);
2137 /* Emit an integral vector unpack operation. */
2140 ia64_expand_unpack (rtx operands
[3], bool unsignedp
, bool highp
)
2142 rtx sign
= ia64_unpack_sign (operands
[1], unsignedp
);
2143 ia64_unpack_assemble (operands
[0], operands
[1], sign
, highp
);
2146 /* Emit an integral vector widening sum operations. */
2149 ia64_expand_widen_sum (rtx operands
[3], bool unsignedp
)
2154 sign
= ia64_unpack_sign (operands
[1], unsignedp
);
2156 wmode
= GET_MODE (operands
[0]);
2157 l
= gen_reg_rtx (wmode
);
2158 h
= gen_reg_rtx (wmode
);
2160 ia64_unpack_assemble (l
, operands
[1], sign
, false);
2161 ia64_unpack_assemble (h
, operands
[1], sign
, true);
2163 t
= expand_binop (wmode
, add_optab
, l
, operands
[2], NULL
, 0, OPTAB_DIRECT
);
2164 t
= expand_binop (wmode
, add_optab
, h
, t
, operands
[0], 0, OPTAB_DIRECT
);
2165 if (t
!= operands
[0])
2166 emit_move_insn (operands
[0], t
);
2169 /* Emit the appropriate sequence for a call. */
2172 ia64_expand_call (rtx retval
, rtx addr
, rtx nextarg ATTRIBUTE_UNUSED
,
2177 addr
= XEXP (addr
, 0);
2178 addr
= convert_memory_address (DImode
, addr
);
2179 b0
= gen_rtx_REG (DImode
, R_BR (0));
2181 /* ??? Should do this for functions known to bind local too. */
2182 if (TARGET_NO_PIC
|| TARGET_AUTO_PIC
)
2185 insn
= gen_sibcall_nogp (addr
);
2187 insn
= gen_call_nogp (addr
, b0
);
2189 insn
= gen_call_value_nogp (retval
, addr
, b0
);
2190 insn
= emit_call_insn (insn
);
2195 insn
= gen_sibcall_gp (addr
);
2197 insn
= gen_call_gp (addr
, b0
);
2199 insn
= gen_call_value_gp (retval
, addr
, b0
);
2200 insn
= emit_call_insn (insn
);
2202 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), pic_offset_table_rtx
);
2206 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), b0
);
2208 if (TARGET_ABI_OPEN_VMS
)
2209 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
),
2210 gen_rtx_REG (DImode
, GR_REG (25)));
2214 reg_emitted (enum ia64_frame_regs r
)
2216 if (emitted_frame_related_regs
[r
] == 0)
2217 emitted_frame_related_regs
[r
] = current_frame_info
.r
[r
];
2219 gcc_assert (emitted_frame_related_regs
[r
] == current_frame_info
.r
[r
]);
2223 get_reg (enum ia64_frame_regs r
)
2226 return current_frame_info
.r
[r
];
2230 is_emitted (int regno
)
2234 for (r
= reg_fp
; r
< number_of_ia64_frame_regs
; r
++)
2235 if (emitted_frame_related_regs
[r
] == regno
)
2241 ia64_reload_gp (void)
2245 if (current_frame_info
.r
[reg_save_gp
])
2247 tmp
= gen_rtx_REG (DImode
, get_reg (reg_save_gp
));
2251 HOST_WIDE_INT offset
;
2254 offset
= (current_frame_info
.spill_cfa_off
2255 + current_frame_info
.spill_size
);
2256 if (frame_pointer_needed
)
2258 tmp
= hard_frame_pointer_rtx
;
2263 tmp
= stack_pointer_rtx
;
2264 offset
= current_frame_info
.total_size
- offset
;
2267 offset_r
= GEN_INT (offset
);
2268 if (satisfies_constraint_I (offset_r
))
2269 emit_insn (gen_adddi3 (pic_offset_table_rtx
, tmp
, offset_r
));
2272 emit_move_insn (pic_offset_table_rtx
, offset_r
);
2273 emit_insn (gen_adddi3 (pic_offset_table_rtx
,
2274 pic_offset_table_rtx
, tmp
));
2277 tmp
= gen_rtx_MEM (DImode
, pic_offset_table_rtx
);
2280 emit_move_insn (pic_offset_table_rtx
, tmp
);
2284 ia64_split_call (rtx retval
, rtx addr
, rtx retaddr
, rtx scratch_r
,
2285 rtx scratch_b
, int noreturn_p
, int sibcall_p
)
2288 bool is_desc
= false;
2290 /* If we find we're calling through a register, then we're actually
2291 calling through a descriptor, so load up the values. */
2292 if (REG_P (addr
) && GR_REGNO_P (REGNO (addr
)))
2297 /* ??? We are currently constrained to *not* use peep2, because
2298 we can legitimately change the global lifetime of the GP
2299 (in the form of killing where previously live). This is
2300 because a call through a descriptor doesn't use the previous
2301 value of the GP, while a direct call does, and we do not
2302 commit to either form until the split here.
2304 That said, this means that we lack precise life info for
2305 whether ADDR is dead after this call. This is not terribly
2306 important, since we can fix things up essentially for free
2307 with the POST_DEC below, but it's nice to not use it when we
2308 can immediately tell it's not necessary. */
2309 addr_dead_p
= ((noreturn_p
|| sibcall_p
2310 || TEST_HARD_REG_BIT (regs_invalidated_by_call
,
2312 && !FUNCTION_ARG_REGNO_P (REGNO (addr
)));
2314 /* Load the code address into scratch_b. */
2315 tmp
= gen_rtx_POST_INC (Pmode
, addr
);
2316 tmp
= gen_rtx_MEM (Pmode
, tmp
);
2317 emit_move_insn (scratch_r
, tmp
);
2318 emit_move_insn (scratch_b
, scratch_r
);
2320 /* Load the GP address. If ADDR is not dead here, then we must
2321 revert the change made above via the POST_INCREMENT. */
2323 tmp
= gen_rtx_POST_DEC (Pmode
, addr
);
2326 tmp
= gen_rtx_MEM (Pmode
, tmp
);
2327 emit_move_insn (pic_offset_table_rtx
, tmp
);
2334 insn
= gen_sibcall_nogp (addr
);
2336 insn
= gen_call_value_nogp (retval
, addr
, retaddr
);
2338 insn
= gen_call_nogp (addr
, retaddr
);
2339 emit_call_insn (insn
);
2341 if ((!TARGET_CONST_GP
|| is_desc
) && !noreturn_p
&& !sibcall_p
)
2345 /* Expand an atomic operation. We want to perform MEM <CODE>= VAL atomically.
2347 This differs from the generic code in that we know about the zero-extending
2348 properties of cmpxchg, and the zero-extending requirements of ar.ccv. We
2349 also know that ld.acq+cmpxchg.rel equals a full barrier.
2351 The loop we want to generate looks like
2356 new_reg = cmp_reg op val;
2357 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
2358 if (cmp_reg != old_reg)
2361 Note that we only do the plain load from memory once. Subsequent
2362 iterations use the value loaded by the compare-and-swap pattern. */
2365 ia64_expand_atomic_op (enum rtx_code code
, rtx mem
, rtx val
,
2366 rtx old_dst
, rtx new_dst
, enum memmodel model
)
2368 machine_mode mode
= GET_MODE (mem
);
2369 rtx old_reg
, new_reg
, cmp_reg
, ar_ccv
, label
;
2370 enum insn_code icode
;
2372 /* Special case for using fetchadd. */
2373 if ((mode
== SImode
|| mode
== DImode
)
2374 && (code
== PLUS
|| code
== MINUS
)
2375 && fetchadd_operand (val
, mode
))
2378 val
= GEN_INT (-INTVAL (val
));
2381 old_dst
= gen_reg_rtx (mode
);
2385 case MEMMODEL_ACQ_REL
:
2386 case MEMMODEL_SEQ_CST
:
2387 case MEMMODEL_SYNC_SEQ_CST
:
2388 emit_insn (gen_memory_barrier ());
2390 case MEMMODEL_RELAXED
:
2391 case MEMMODEL_ACQUIRE
:
2392 case MEMMODEL_SYNC_ACQUIRE
:
2393 case MEMMODEL_CONSUME
:
2395 icode
= CODE_FOR_fetchadd_acq_si
;
2397 icode
= CODE_FOR_fetchadd_acq_di
;
2399 case MEMMODEL_RELEASE
:
2400 case MEMMODEL_SYNC_RELEASE
:
2402 icode
= CODE_FOR_fetchadd_rel_si
;
2404 icode
= CODE_FOR_fetchadd_rel_di
;
2411 emit_insn (GEN_FCN (icode
) (old_dst
, mem
, val
));
2415 new_reg
= expand_simple_binop (mode
, PLUS
, old_dst
, val
, new_dst
,
2417 if (new_reg
!= new_dst
)
2418 emit_move_insn (new_dst
, new_reg
);
2423 /* Because of the volatile mem read, we get an ld.acq, which is the
2424 front half of the full barrier. The end half is the cmpxchg.rel.
2425 For relaxed and release memory models, we don't need this. But we
2426 also don't bother trying to prevent it either. */
2427 gcc_assert (is_mm_relaxed (model
) || is_mm_release (model
)
2428 || MEM_VOLATILE_P (mem
));
2430 old_reg
= gen_reg_rtx (DImode
);
2431 cmp_reg
= gen_reg_rtx (DImode
);
2432 label
= gen_label_rtx ();
2436 val
= simplify_gen_subreg (DImode
, val
, mode
, 0);
2437 emit_insn (gen_extend_insn (cmp_reg
, mem
, DImode
, mode
, 1));
2440 emit_move_insn (cmp_reg
, mem
);
2444 ar_ccv
= gen_rtx_REG (DImode
, AR_CCV_REGNUM
);
2445 emit_move_insn (old_reg
, cmp_reg
);
2446 emit_move_insn (ar_ccv
, cmp_reg
);
2449 emit_move_insn (old_dst
, gen_lowpart (mode
, cmp_reg
));
2454 new_reg
= expand_simple_binop (DImode
, AND
, new_reg
, val
, NULL_RTX
,
2455 true, OPTAB_DIRECT
);
2456 new_reg
= expand_simple_unop (DImode
, code
, new_reg
, NULL_RTX
, true);
2459 new_reg
= expand_simple_binop (DImode
, code
, new_reg
, val
, NULL_RTX
,
2460 true, OPTAB_DIRECT
);
2463 new_reg
= gen_lowpart (mode
, new_reg
);
2465 emit_move_insn (new_dst
, new_reg
);
2469 case MEMMODEL_RELAXED
:
2470 case MEMMODEL_ACQUIRE
:
2471 case MEMMODEL_SYNC_ACQUIRE
:
2472 case MEMMODEL_CONSUME
:
2475 case E_QImode
: icode
= CODE_FOR_cmpxchg_acq_qi
; break;
2476 case E_HImode
: icode
= CODE_FOR_cmpxchg_acq_hi
; break;
2477 case E_SImode
: icode
= CODE_FOR_cmpxchg_acq_si
; break;
2478 case E_DImode
: icode
= CODE_FOR_cmpxchg_acq_di
; break;
2484 case MEMMODEL_RELEASE
:
2485 case MEMMODEL_SYNC_RELEASE
:
2486 case MEMMODEL_ACQ_REL
:
2487 case MEMMODEL_SEQ_CST
:
2488 case MEMMODEL_SYNC_SEQ_CST
:
2491 case E_QImode
: icode
= CODE_FOR_cmpxchg_rel_qi
; break;
2492 case E_HImode
: icode
= CODE_FOR_cmpxchg_rel_hi
; break;
2493 case E_SImode
: icode
= CODE_FOR_cmpxchg_rel_si
; break;
2494 case E_DImode
: icode
= CODE_FOR_cmpxchg_rel_di
; break;
2504 emit_insn (GEN_FCN (icode
) (cmp_reg
, mem
, ar_ccv
, new_reg
));
2506 emit_cmp_and_jump_insns (cmp_reg
, old_reg
, NE
, NULL
, DImode
, true, label
);
2509 /* Begin the assembly file. */
2512 ia64_file_start (void)
2514 default_file_start ();
2515 emit_safe_across_calls ();
2519 emit_safe_across_calls (void)
2521 unsigned int rs
, re
;
2528 while (rs
< 64 && call_used_regs
[PR_REG (rs
)])
2532 for (re
= rs
+ 1; re
< 64 && ! call_used_regs
[PR_REG (re
)]; re
++)
2536 fputs ("\t.pred.safe_across_calls ", asm_out_file
);
2540 fputc (',', asm_out_file
);
2542 fprintf (asm_out_file
, "p%u", rs
);
2544 fprintf (asm_out_file
, "p%u-p%u", rs
, re
- 1);
2548 fputc ('\n', asm_out_file
);
2551 /* Globalize a declaration. */
2554 ia64_globalize_decl_name (FILE * stream
, tree decl
)
2556 const char *name
= XSTR (XEXP (DECL_RTL (decl
), 0), 0);
2557 tree version_attr
= lookup_attribute ("version_id", DECL_ATTRIBUTES (decl
));
2560 tree v
= TREE_VALUE (TREE_VALUE (version_attr
));
2561 const char *p
= TREE_STRING_POINTER (v
);
2562 fprintf (stream
, "\t.alias %s#, \"%s{%s}\"\n", name
, name
, p
);
2564 targetm
.asm_out
.globalize_label (stream
, name
);
2565 if (TREE_CODE (decl
) == FUNCTION_DECL
)
2566 ASM_OUTPUT_TYPE_DIRECTIVE (stream
, name
, "function");
2569 /* Helper function for ia64_compute_frame_size: find an appropriate general
2570 register to spill some special register to. SPECIAL_SPILL_MASK contains
2571 bits in GR0 to GR31 that have already been allocated by this routine.
2572 TRY_LOCALS is true if we should attempt to locate a local regnum. */
2575 find_gr_spill (enum ia64_frame_regs r
, int try_locals
)
2579 if (emitted_frame_related_regs
[r
] != 0)
2581 regno
= emitted_frame_related_regs
[r
];
2582 if (regno
>= LOC_REG (0) && regno
< LOC_REG (80 - frame_pointer_needed
)
2583 && current_frame_info
.n_local_regs
< regno
- LOC_REG (0) + 1)
2584 current_frame_info
.n_local_regs
= regno
- LOC_REG (0) + 1;
2585 else if (crtl
->is_leaf
2586 && regno
>= GR_REG (1) && regno
<= GR_REG (31))
2587 current_frame_info
.gr_used_mask
|= 1 << regno
;
2592 /* If this is a leaf function, first try an otherwise unused
2593 call-clobbered register. */
2596 for (regno
= GR_REG (1); regno
<= GR_REG (31); regno
++)
2597 if (! df_regs_ever_live_p (regno
)
2598 && call_used_regs
[regno
]
2599 && ! fixed_regs
[regno
]
2600 && ! global_regs
[regno
]
2601 && ((current_frame_info
.gr_used_mask
>> regno
) & 1) == 0
2602 && ! is_emitted (regno
))
2604 current_frame_info
.gr_used_mask
|= 1 << regno
;
2611 regno
= current_frame_info
.n_local_regs
;
2612 /* If there is a frame pointer, then we can't use loc79, because
2613 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
2614 reg_name switching code in ia64_expand_prologue. */
2615 while (regno
< (80 - frame_pointer_needed
))
2616 if (! is_emitted (LOC_REG (regno
++)))
2618 current_frame_info
.n_local_regs
= regno
;
2619 return LOC_REG (regno
- 1);
2623 /* Failed to find a general register to spill to. Must use stack. */
2627 /* In order to make for nice schedules, we try to allocate every temporary
2628 to a different register. We must of course stay away from call-saved,
2629 fixed, and global registers. We must also stay away from registers
2630 allocated in current_frame_info.gr_used_mask, since those include regs
2631 used all through the prologue.
2633 Any register allocated here must be used immediately. The idea is to
2634 aid scheduling, not to solve data flow problems. */
2636 static int last_scratch_gr_reg
;
2639 next_scratch_gr_reg (void)
2643 for (i
= 0; i
< 32; ++i
)
2645 regno
= (last_scratch_gr_reg
+ i
+ 1) & 31;
2646 if (call_used_regs
[regno
]
2647 && ! fixed_regs
[regno
]
2648 && ! global_regs
[regno
]
2649 && ((current_frame_info
.gr_used_mask
>> regno
) & 1) == 0)
2651 last_scratch_gr_reg
= regno
;
2656 /* There must be _something_ available. */
2660 /* Helper function for ia64_compute_frame_size, called through
2661 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
2664 mark_reg_gr_used_mask (rtx reg
, void *data ATTRIBUTE_UNUSED
)
2666 unsigned int regno
= REGNO (reg
);
2669 unsigned int i
, n
= REG_NREGS (reg
);
2670 for (i
= 0; i
< n
; ++i
)
2671 current_frame_info
.gr_used_mask
|= 1 << (regno
+ i
);
2676 /* Returns the number of bytes offset between the frame pointer and the stack
2677 pointer for the current function. SIZE is the number of bytes of space
2678 needed for local variables. */
2681 ia64_compute_frame_size (HOST_WIDE_INT size
)
2683 HOST_WIDE_INT total_size
;
2684 HOST_WIDE_INT spill_size
= 0;
2685 HOST_WIDE_INT extra_spill_size
= 0;
2686 HOST_WIDE_INT pretend_args_size
;
2689 int spilled_gr_p
= 0;
2690 int spilled_fr_p
= 0;
2696 if (current_frame_info
.initialized
)
2699 memset (¤t_frame_info
, 0, sizeof current_frame_info
);
2700 CLEAR_HARD_REG_SET (mask
);
2702 /* Don't allocate scratches to the return register. */
2703 diddle_return_value (mark_reg_gr_used_mask
, NULL
);
2705 /* Don't allocate scratches to the EH scratch registers. */
2706 if (cfun
->machine
->ia64_eh_epilogue_sp
)
2707 mark_reg_gr_used_mask (cfun
->machine
->ia64_eh_epilogue_sp
, NULL
);
2708 if (cfun
->machine
->ia64_eh_epilogue_bsp
)
2709 mark_reg_gr_used_mask (cfun
->machine
->ia64_eh_epilogue_bsp
, NULL
);
2711 /* Static stack checking uses r2 and r3. */
2712 if (flag_stack_check
== STATIC_BUILTIN_STACK_CHECK
2713 || flag_stack_clash_protection
)
2714 current_frame_info
.gr_used_mask
|= 0xc;
2716 /* Find the size of the register stack frame. We have only 80 local
2717 registers, because we reserve 8 for the inputs and 8 for the
2720 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
2721 since we'll be adjusting that down later. */
2722 regno
= LOC_REG (78) + ! frame_pointer_needed
;
2723 for (; regno
>= LOC_REG (0); regno
--)
2724 if (df_regs_ever_live_p (regno
) && !is_emitted (regno
))
2726 current_frame_info
.n_local_regs
= regno
- LOC_REG (0) + 1;
2728 /* For functions marked with the syscall_linkage attribute, we must mark
2729 all eight input registers as in use, so that locals aren't visible to
2732 if (cfun
->machine
->n_varargs
> 0
2733 || lookup_attribute ("syscall_linkage",
2734 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl
))))
2735 current_frame_info
.n_input_regs
= 8;
2738 for (regno
= IN_REG (7); regno
>= IN_REG (0); regno
--)
2739 if (df_regs_ever_live_p (regno
))
2741 current_frame_info
.n_input_regs
= regno
- IN_REG (0) + 1;
2744 for (regno
= OUT_REG (7); regno
>= OUT_REG (0); regno
--)
2745 if (df_regs_ever_live_p (regno
))
2747 i
= regno
- OUT_REG (0) + 1;
2749 #ifndef PROFILE_HOOK
2750 /* When -p profiling, we need one output register for the mcount argument.
2751 Likewise for -a profiling for the bb_init_func argument. For -ax
2752 profiling, we need two output registers for the two bb_init_trace_func
2757 current_frame_info
.n_output_regs
= i
;
2759 /* ??? No rotating register support yet. */
2760 current_frame_info
.n_rotate_regs
= 0;
2762 /* Discover which registers need spilling, and how much room that
2763 will take. Begin with floating point and general registers,
2764 which will always wind up on the stack. */
2766 for (regno
= FR_REG (2); regno
<= FR_REG (127); regno
++)
2767 if (df_regs_ever_live_p (regno
) && ! call_used_regs
[regno
])
2769 SET_HARD_REG_BIT (mask
, regno
);
2775 for (regno
= GR_REG (1); regno
<= GR_REG (31); regno
++)
2776 if (df_regs_ever_live_p (regno
) && ! call_used_regs
[regno
])
2778 SET_HARD_REG_BIT (mask
, regno
);
2784 for (regno
= BR_REG (1); regno
<= BR_REG (7); regno
++)
2785 if (df_regs_ever_live_p (regno
) && ! call_used_regs
[regno
])
2787 SET_HARD_REG_BIT (mask
, regno
);
2792 /* Now come all special registers that might get saved in other
2793 general registers. */
2795 if (frame_pointer_needed
)
2797 current_frame_info
.r
[reg_fp
] = find_gr_spill (reg_fp
, 1);
2798 /* If we did not get a register, then we take LOC79. This is guaranteed
2799 to be free, even if regs_ever_live is already set, because this is
2800 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
2801 as we don't count loc79 above. */
2802 if (current_frame_info
.r
[reg_fp
] == 0)
2804 current_frame_info
.r
[reg_fp
] = LOC_REG (79);
2805 current_frame_info
.n_local_regs
= LOC_REG (79) - LOC_REG (0) + 1;
2809 if (! crtl
->is_leaf
)
2811 /* Emit a save of BR0 if we call other functions. Do this even
2812 if this function doesn't return, as EH depends on this to be
2813 able to unwind the stack. */
2814 SET_HARD_REG_BIT (mask
, BR_REG (0));
2816 current_frame_info
.r
[reg_save_b0
] = find_gr_spill (reg_save_b0
, 1);
2817 if (current_frame_info
.r
[reg_save_b0
] == 0)
2819 extra_spill_size
+= 8;
2823 /* Similarly for ar.pfs. */
2824 SET_HARD_REG_BIT (mask
, AR_PFS_REGNUM
);
2825 current_frame_info
.r
[reg_save_ar_pfs
] = find_gr_spill (reg_save_ar_pfs
, 1);
2826 if (current_frame_info
.r
[reg_save_ar_pfs
] == 0)
2828 extra_spill_size
+= 8;
2832 /* Similarly for gp. Note that if we're calling setjmp, the stacked
2833 registers are clobbered, so we fall back to the stack. */
2834 current_frame_info
.r
[reg_save_gp
]
2835 = (cfun
->calls_setjmp
? 0 : find_gr_spill (reg_save_gp
, 1));
2836 if (current_frame_info
.r
[reg_save_gp
] == 0)
2838 SET_HARD_REG_BIT (mask
, GR_REG (1));
2845 if (df_regs_ever_live_p (BR_REG (0)) && ! call_used_regs
[BR_REG (0)])
2847 SET_HARD_REG_BIT (mask
, BR_REG (0));
2848 extra_spill_size
+= 8;
2852 if (df_regs_ever_live_p (AR_PFS_REGNUM
))
2854 SET_HARD_REG_BIT (mask
, AR_PFS_REGNUM
);
2855 current_frame_info
.r
[reg_save_ar_pfs
]
2856 = find_gr_spill (reg_save_ar_pfs
, 1);
2857 if (current_frame_info
.r
[reg_save_ar_pfs
] == 0)
2859 extra_spill_size
+= 8;
2865 /* Unwind descriptor hackery: things are most efficient if we allocate
2866 consecutive GR save registers for RP, PFS, FP in that order. However,
2867 it is absolutely critical that FP get the only hard register that's
2868 guaranteed to be free, so we allocated it first. If all three did
2869 happen to be allocated hard regs, and are consecutive, rearrange them
2870 into the preferred order now.
2872 If we have already emitted code for any of those registers,
2873 then it's already too late to change. */
2874 min_regno
= MIN (current_frame_info
.r
[reg_fp
],
2875 MIN (current_frame_info
.r
[reg_save_b0
],
2876 current_frame_info
.r
[reg_save_ar_pfs
]));
2877 max_regno
= MAX (current_frame_info
.r
[reg_fp
],
2878 MAX (current_frame_info
.r
[reg_save_b0
],
2879 current_frame_info
.r
[reg_save_ar_pfs
]));
2881 && min_regno
+ 2 == max_regno
2882 && (current_frame_info
.r
[reg_fp
] == min_regno
+ 1
2883 || current_frame_info
.r
[reg_save_b0
] == min_regno
+ 1
2884 || current_frame_info
.r
[reg_save_ar_pfs
] == min_regno
+ 1)
2885 && (emitted_frame_related_regs
[reg_save_b0
] == 0
2886 || emitted_frame_related_regs
[reg_save_b0
] == min_regno
)
2887 && (emitted_frame_related_regs
[reg_save_ar_pfs
] == 0
2888 || emitted_frame_related_regs
[reg_save_ar_pfs
] == min_regno
+ 1)
2889 && (emitted_frame_related_regs
[reg_fp
] == 0
2890 || emitted_frame_related_regs
[reg_fp
] == min_regno
+ 2))
2892 current_frame_info
.r
[reg_save_b0
] = min_regno
;
2893 current_frame_info
.r
[reg_save_ar_pfs
] = min_regno
+ 1;
2894 current_frame_info
.r
[reg_fp
] = min_regno
+ 2;
2897 /* See if we need to store the predicate register block. */
2898 for (regno
= PR_REG (0); regno
<= PR_REG (63); regno
++)
2899 if (df_regs_ever_live_p (regno
) && ! call_used_regs
[regno
])
2901 if (regno
<= PR_REG (63))
2903 SET_HARD_REG_BIT (mask
, PR_REG (0));
2904 current_frame_info
.r
[reg_save_pr
] = find_gr_spill (reg_save_pr
, 1);
2905 if (current_frame_info
.r
[reg_save_pr
] == 0)
2907 extra_spill_size
+= 8;
2911 /* ??? Mark them all as used so that register renaming and such
2912 are free to use them. */
2913 for (regno
= PR_REG (0); regno
<= PR_REG (63); regno
++)
2914 df_set_regs_ever_live (regno
, true);
2917 /* If we're forced to use st8.spill, we're forced to save and restore
2918 ar.unat as well. The check for existing liveness allows inline asm
2919 to touch ar.unat. */
2920 if (spilled_gr_p
|| cfun
->machine
->n_varargs
2921 || df_regs_ever_live_p (AR_UNAT_REGNUM
))
2923 df_set_regs_ever_live (AR_UNAT_REGNUM
, true);
2924 SET_HARD_REG_BIT (mask
, AR_UNAT_REGNUM
);
2925 current_frame_info
.r
[reg_save_ar_unat
]
2926 = find_gr_spill (reg_save_ar_unat
, spill_size
== 0);
2927 if (current_frame_info
.r
[reg_save_ar_unat
] == 0)
2929 extra_spill_size
+= 8;
2934 if (df_regs_ever_live_p (AR_LC_REGNUM
))
2936 SET_HARD_REG_BIT (mask
, AR_LC_REGNUM
);
2937 current_frame_info
.r
[reg_save_ar_lc
]
2938 = find_gr_spill (reg_save_ar_lc
, spill_size
== 0);
2939 if (current_frame_info
.r
[reg_save_ar_lc
] == 0)
2941 extra_spill_size
+= 8;
2946 /* If we have an odd number of words of pretend arguments written to
2947 the stack, then the FR save area will be unaligned. We round the
2948 size of this area up to keep things 16 byte aligned. */
2950 pretend_args_size
= IA64_STACK_ALIGN (crtl
->args
.pretend_args_size
);
2952 pretend_args_size
= crtl
->args
.pretend_args_size
;
2954 total_size
= (spill_size
+ extra_spill_size
+ size
+ pretend_args_size
2955 + crtl
->outgoing_args_size
);
2956 total_size
= IA64_STACK_ALIGN (total_size
);
2958 /* We always use the 16-byte scratch area provided by the caller, but
2959 if we are a leaf function, there's no one to which we need to provide
2960 a scratch area. However, if the function allocates dynamic stack space,
2961 the dynamic offset is computed early and contains STACK_POINTER_OFFSET,
2962 so we need to cope. */
2963 if (crtl
->is_leaf
&& !cfun
->calls_alloca
)
2964 total_size
= MAX (0, total_size
- 16);
2966 current_frame_info
.total_size
= total_size
;
2967 current_frame_info
.spill_cfa_off
= pretend_args_size
- 16;
2968 current_frame_info
.spill_size
= spill_size
;
2969 current_frame_info
.extra_spill_size
= extra_spill_size
;
2970 COPY_HARD_REG_SET (current_frame_info
.mask
, mask
);
2971 current_frame_info
.n_spilled
= n_spilled
;
2972 current_frame_info
.initialized
= reload_completed
;
2975 /* Worker function for TARGET_CAN_ELIMINATE. */
2978 ia64_can_eliminate (const int from ATTRIBUTE_UNUSED
, const int to
)
2980 return (to
== BR_REG (0) ? crtl
->is_leaf
: true);
2983 /* Compute the initial difference between the specified pair of registers. */
2986 ia64_initial_elimination_offset (int from
, int to
)
2988 HOST_WIDE_INT offset
;
2990 ia64_compute_frame_size (get_frame_size ());
2993 case FRAME_POINTER_REGNUM
:
2996 case HARD_FRAME_POINTER_REGNUM
:
2997 offset
= -current_frame_info
.total_size
;
2998 if (!crtl
->is_leaf
|| cfun
->calls_alloca
)
2999 offset
+= 16 + crtl
->outgoing_args_size
;
3002 case STACK_POINTER_REGNUM
:
3004 if (!crtl
->is_leaf
|| cfun
->calls_alloca
)
3005 offset
+= 16 + crtl
->outgoing_args_size
;
3013 case ARG_POINTER_REGNUM
:
3014 /* Arguments start above the 16 byte save area, unless stdarg
3015 in which case we store through the 16 byte save area. */
3018 case HARD_FRAME_POINTER_REGNUM
:
3019 offset
= 16 - crtl
->args
.pretend_args_size
;
3022 case STACK_POINTER_REGNUM
:
3023 offset
= (current_frame_info
.total_size
3024 + 16 - crtl
->args
.pretend_args_size
);
3039 /* If there are more than a trivial number of register spills, we use
3040 two interleaved iterators so that we can get two memory references
3043 In order to simplify things in the prologue and epilogue expanders,
3044 we use helper functions to fix up the memory references after the
3045 fact with the appropriate offsets to a POST_MODIFY memory mode.
3046 The following data structure tracks the state of the two iterators
3047 while insns are being emitted. */
3049 struct spill_fill_data
3051 rtx_insn
*init_after
; /* point at which to emit initializations */
3052 rtx init_reg
[2]; /* initial base register */
3053 rtx iter_reg
[2]; /* the iterator registers */
3054 rtx
*prev_addr
[2]; /* address of last memory use */
3055 rtx_insn
*prev_insn
[2]; /* the insn corresponding to prev_addr */
3056 HOST_WIDE_INT prev_off
[2]; /* last offset */
3057 int n_iter
; /* number of iterators in use */
3058 int next_iter
; /* next iterator to use */
3059 unsigned int save_gr_used_mask
;
3062 static struct spill_fill_data spill_fill_data
;
3065 setup_spill_pointers (int n_spills
, rtx init_reg
, HOST_WIDE_INT cfa_off
)
3069 spill_fill_data
.init_after
= get_last_insn ();
3070 spill_fill_data
.init_reg
[0] = init_reg
;
3071 spill_fill_data
.init_reg
[1] = init_reg
;
3072 spill_fill_data
.prev_addr
[0] = NULL
;
3073 spill_fill_data
.prev_addr
[1] = NULL
;
3074 spill_fill_data
.prev_insn
[0] = NULL
;
3075 spill_fill_data
.prev_insn
[1] = NULL
;
3076 spill_fill_data
.prev_off
[0] = cfa_off
;
3077 spill_fill_data
.prev_off
[1] = cfa_off
;
3078 spill_fill_data
.next_iter
= 0;
3079 spill_fill_data
.save_gr_used_mask
= current_frame_info
.gr_used_mask
;
3081 spill_fill_data
.n_iter
= 1 + (n_spills
> 2);
3082 for (i
= 0; i
< spill_fill_data
.n_iter
; ++i
)
3084 int regno
= next_scratch_gr_reg ();
3085 spill_fill_data
.iter_reg
[i
] = gen_rtx_REG (DImode
, regno
);
3086 current_frame_info
.gr_used_mask
|= 1 << regno
;
3091 finish_spill_pointers (void)
3093 current_frame_info
.gr_used_mask
= spill_fill_data
.save_gr_used_mask
;
3097 spill_restore_mem (rtx reg
, HOST_WIDE_INT cfa_off
)
3099 int iter
= spill_fill_data
.next_iter
;
3100 HOST_WIDE_INT disp
= spill_fill_data
.prev_off
[iter
] - cfa_off
;
3101 rtx disp_rtx
= GEN_INT (disp
);
3104 if (spill_fill_data
.prev_addr
[iter
])
3106 if (satisfies_constraint_N (disp_rtx
))
3108 *spill_fill_data
.prev_addr
[iter
]
3109 = gen_rtx_POST_MODIFY (DImode
, spill_fill_data
.iter_reg
[iter
],
3110 gen_rtx_PLUS (DImode
,
3111 spill_fill_data
.iter_reg
[iter
],
3113 add_reg_note (spill_fill_data
.prev_insn
[iter
],
3114 REG_INC
, spill_fill_data
.iter_reg
[iter
]);
3118 /* ??? Could use register post_modify for loads. */
3119 if (!satisfies_constraint_I (disp_rtx
))
3121 rtx tmp
= gen_rtx_REG (DImode
, next_scratch_gr_reg ());
3122 emit_move_insn (tmp
, disp_rtx
);
3125 emit_insn (gen_adddi3 (spill_fill_data
.iter_reg
[iter
],
3126 spill_fill_data
.iter_reg
[iter
], disp_rtx
));
3129 /* Micro-optimization: if we've created a frame pointer, it's at
3130 CFA 0, which may allow the real iterator to be initialized lower,
3131 slightly increasing parallelism. Also, if there are few saves
3132 it may eliminate the iterator entirely. */
3134 && spill_fill_data
.init_reg
[iter
] == stack_pointer_rtx
3135 && frame_pointer_needed
)
3137 mem
= gen_rtx_MEM (GET_MODE (reg
), hard_frame_pointer_rtx
);
3138 set_mem_alias_set (mem
, get_varargs_alias_set ());
3147 seq
= gen_movdi (spill_fill_data
.iter_reg
[iter
],
3148 spill_fill_data
.init_reg
[iter
]);
3153 if (!satisfies_constraint_I (disp_rtx
))
3155 rtx tmp
= gen_rtx_REG (DImode
, next_scratch_gr_reg ());
3156 emit_move_insn (tmp
, disp_rtx
);
3160 emit_insn (gen_adddi3 (spill_fill_data
.iter_reg
[iter
],
3161 spill_fill_data
.init_reg
[iter
],
3168 /* Careful for being the first insn in a sequence. */
3169 if (spill_fill_data
.init_after
)
3170 insn
= emit_insn_after (seq
, spill_fill_data
.init_after
);
3173 rtx_insn
*first
= get_insns ();
3175 insn
= emit_insn_before (seq
, first
);
3177 insn
= emit_insn (seq
);
3179 spill_fill_data
.init_after
= insn
;
3182 mem
= gen_rtx_MEM (GET_MODE (reg
), spill_fill_data
.iter_reg
[iter
]);
3184 /* ??? Not all of the spills are for varargs, but some of them are.
3185 The rest of the spills belong in an alias set of their own. But
3186 it doesn't actually hurt to include them here. */
3187 set_mem_alias_set (mem
, get_varargs_alias_set ());
3189 spill_fill_data
.prev_addr
[iter
] = &XEXP (mem
, 0);
3190 spill_fill_data
.prev_off
[iter
] = cfa_off
;
3192 if (++iter
>= spill_fill_data
.n_iter
)
3194 spill_fill_data
.next_iter
= iter
;
3200 do_spill (rtx (*move_fn
) (rtx
, rtx
, rtx
), rtx reg
, HOST_WIDE_INT cfa_off
,
3203 int iter
= spill_fill_data
.next_iter
;
3207 mem
= spill_restore_mem (reg
, cfa_off
);
3208 insn
= emit_insn ((*move_fn
) (mem
, reg
, GEN_INT (cfa_off
)));
3209 spill_fill_data
.prev_insn
[iter
] = insn
;
3216 RTX_FRAME_RELATED_P (insn
) = 1;
3218 /* Don't even pretend that the unwind code can intuit its way
3219 through a pair of interleaved post_modify iterators. Just
3220 provide the correct answer. */
3222 if (frame_pointer_needed
)
3224 base
= hard_frame_pointer_rtx
;
3229 base
= stack_pointer_rtx
;
3230 off
= current_frame_info
.total_size
- cfa_off
;
3233 add_reg_note (insn
, REG_CFA_OFFSET
,
3234 gen_rtx_SET (gen_rtx_MEM (GET_MODE (reg
),
3235 plus_constant (Pmode
,
3242 do_restore (rtx (*move_fn
) (rtx
, rtx
, rtx
), rtx reg
, HOST_WIDE_INT cfa_off
)
3244 int iter
= spill_fill_data
.next_iter
;
3247 insn
= emit_insn ((*move_fn
) (reg
, spill_restore_mem (reg
, cfa_off
),
3248 GEN_INT (cfa_off
)));
3249 spill_fill_data
.prev_insn
[iter
] = insn
;
3252 /* Wrapper functions that discards the CONST_INT spill offset. These
3253 exist so that we can give gr_spill/gr_fill the offset they need and
3254 use a consistent function interface. */
3257 gen_movdi_x (rtx dest
, rtx src
, rtx offset ATTRIBUTE_UNUSED
)
3259 return gen_movdi (dest
, src
);
3263 gen_fr_spill_x (rtx dest
, rtx src
, rtx offset ATTRIBUTE_UNUSED
)
3265 return gen_fr_spill (dest
, src
);
3269 gen_fr_restore_x (rtx dest
, rtx src
, rtx offset ATTRIBUTE_UNUSED
)
3271 return gen_fr_restore (dest
, src
);
3274 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
3276 /* See Table 6.2 of the IA-64 Software Developer Manual, Volume 2. */
3277 #define BACKING_STORE_SIZE(N) ((N) > 0 ? ((N) + (N)/63 + 1) * 8 : 0)
3279 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
3280 inclusive. These are offsets from the current stack pointer. BS_SIZE
3281 is the size of the backing store. ??? This clobbers r2 and r3. */
3284 ia64_emit_probe_stack_range (HOST_WIDE_INT first
, HOST_WIDE_INT size
,
3287 rtx r2
= gen_rtx_REG (Pmode
, GR_REG (2));
3288 rtx r3
= gen_rtx_REG (Pmode
, GR_REG (3));
3289 rtx p6
= gen_rtx_REG (BImode
, PR_REG (6));
3291 /* On the IA-64 there is a second stack in memory, namely the Backing Store
3292 of the Register Stack Engine. We also need to probe it after checking
3293 that the 2 stacks don't overlap. */
3294 emit_insn (gen_bsp_value (r3
));
3295 emit_move_insn (r2
, GEN_INT (-(first
+ size
)));
3297 /* Compare current value of BSP and SP registers. */
3298 emit_insn (gen_rtx_SET (p6
, gen_rtx_fmt_ee (LTU
, BImode
,
3299 r3
, stack_pointer_rtx
)));
3301 /* Compute the address of the probe for the Backing Store (which grows
3302 towards higher addresses). We probe only at the first offset of
3303 the next page because some OS (eg Linux/ia64) only extend the
3304 backing store when this specific address is hit (but generate a SEGV
3305 on other address). Page size is the worst case (4KB). The reserve
3306 size is at least 4096 - (96 + 2) * 8 = 3312 bytes, which is enough.
3307 Also compute the address of the last probe for the memory stack
3308 (which grows towards lower addresses). */
3309 emit_insn (gen_rtx_SET (r3
, plus_constant (Pmode
, r3
, 4095)));
3310 emit_insn (gen_rtx_SET (r2
, gen_rtx_PLUS (Pmode
, stack_pointer_rtx
, r2
)));
3312 /* Compare them and raise SEGV if the former has topped the latter. */
3313 emit_insn (gen_rtx_COND_EXEC (VOIDmode
,
3314 gen_rtx_fmt_ee (NE
, VOIDmode
, p6
, const0_rtx
),
3315 gen_rtx_SET (p6
, gen_rtx_fmt_ee (GEU
, BImode
,
3317 emit_insn (gen_rtx_SET (gen_rtx_ZERO_EXTRACT (DImode
, r3
, GEN_INT (12),
3320 emit_insn (gen_rtx_COND_EXEC (VOIDmode
,
3321 gen_rtx_fmt_ee (NE
, VOIDmode
, p6
, const0_rtx
),
3322 gen_rtx_TRAP_IF (VOIDmode
, const1_rtx
,
3325 /* Probe the Backing Store if necessary. */
3327 emit_stack_probe (r3
);
3329 /* Probe the memory stack if necessary. */
3333 /* See if we have a constant small number of probes to generate. If so,
3334 that's the easy case. */
3335 else if (size
<= PROBE_INTERVAL
)
3336 emit_stack_probe (r2
);
3338 /* The run-time loop is made up of 9 insns in the generic case while this
3339 compile-time loop is made up of 5+2*(n-2) insns for n # of intervals. */
3340 else if (size
<= 4 * PROBE_INTERVAL
)
3344 emit_move_insn (r2
, GEN_INT (-(first
+ PROBE_INTERVAL
)));
3345 emit_insn (gen_rtx_SET (r2
,
3346 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
, r2
)));
3347 emit_stack_probe (r2
);
3349 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
3350 it exceeds SIZE. If only two probes are needed, this will not
3351 generate any code. Then probe at FIRST + SIZE. */
3352 for (i
= 2 * PROBE_INTERVAL
; i
< size
; i
+= PROBE_INTERVAL
)
3354 emit_insn (gen_rtx_SET (r2
,
3355 plus_constant (Pmode
, r2
, -PROBE_INTERVAL
)));
3356 emit_stack_probe (r2
);
3359 emit_insn (gen_rtx_SET (r2
,
3360 plus_constant (Pmode
, r2
,
3361 (i
- PROBE_INTERVAL
) - size
)));
3362 emit_stack_probe (r2
);
3365 /* Otherwise, do the same as above, but in a loop. Note that we must be
3366 extra careful with variables wrapping around because we might be at
3367 the very top (or the very bottom) of the address space and we have
3368 to be able to handle this case properly; in particular, we use an
3369 equality test for the loop condition. */
3372 HOST_WIDE_INT rounded_size
;
3374 emit_move_insn (r2
, GEN_INT (-first
));
3377 /* Step 1: round SIZE to the previous multiple of the interval. */
3379 rounded_size
= size
& -PROBE_INTERVAL
;
3382 /* Step 2: compute initial and final value of the loop counter. */
3384 /* TEST_ADDR = SP + FIRST. */
3385 emit_insn (gen_rtx_SET (r2
,
3386 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
, r2
)));
3388 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
3389 if (rounded_size
> (1 << 21))
3391 emit_move_insn (r3
, GEN_INT (-rounded_size
));
3392 emit_insn (gen_rtx_SET (r3
, gen_rtx_PLUS (Pmode
, r2
, r3
)));
3395 emit_insn (gen_rtx_SET (r3
, gen_rtx_PLUS (Pmode
, r2
,
3396 GEN_INT (-rounded_size
))));
3403 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
3406 while (TEST_ADDR != LAST_ADDR)
3408 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
3409 until it is equal to ROUNDED_SIZE. */
3411 emit_insn (gen_probe_stack_range (r2
, r2
, r3
));
3414 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
3415 that SIZE is equal to ROUNDED_SIZE. */
3417 /* TEMP = SIZE - ROUNDED_SIZE. */
3418 if (size
!= rounded_size
)
3420 emit_insn (gen_rtx_SET (r2
, plus_constant (Pmode
, r2
,
3421 rounded_size
- size
)));
3422 emit_stack_probe (r2
);
3426 /* Make sure nothing is scheduled before we are done. */
3427 emit_insn (gen_blockage ());
3430 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
3431 absolute addresses. */
3434 output_probe_stack_range (rtx reg1
, rtx reg2
)
3436 static int labelno
= 0;
3440 ASM_GENERATE_INTERNAL_LABEL (loop_lab
, "LPSRL", labelno
++);
3443 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, loop_lab
);
3445 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
3447 xops
[1] = GEN_INT (-PROBE_INTERVAL
);
3448 output_asm_insn ("addl %0 = %1, %0", xops
);
3449 fputs ("\t;;\n", asm_out_file
);
3451 /* Probe at TEST_ADDR. */
3452 output_asm_insn ("probe.w.fault %0, 0", xops
);
3454 /* Test if TEST_ADDR == LAST_ADDR. */
3456 xops
[2] = gen_rtx_REG (BImode
, PR_REG (6));
3457 output_asm_insn ("cmp.eq %2, %I2 = %0, %1", xops
);
3460 fprintf (asm_out_file
, "\t(%s) br.cond.dpnt ", reg_names
[PR_REG (7)]);
3461 assemble_name_raw (asm_out_file
, loop_lab
);
3462 fputc ('\n', asm_out_file
);
3467 /* Called after register allocation to add any instructions needed for the
3468 prologue. Using a prologue insn is favored compared to putting all of the
3469 instructions in output_function_prologue(), since it allows the scheduler
3470 to intermix instructions with the saves of the caller saved registers. In
3471 some cases, it might be necessary to emit a barrier instruction as the last
3472 insn to prevent such scheduling.
3474 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
3475 so that the debug info generation code can handle them properly.
3477 The register save area is laid out like so:
3479 [ varargs spill area ]
3480 [ fr register spill area ]
3481 [ br register spill area ]
3482 [ ar register spill area ]
3483 [ pr register spill area ]
3484 [ gr register spill area ] */
3486 /* ??? Get inefficient code when the frame size is larger than can fit in an
3487 adds instruction. */
3490 ia64_expand_prologue (void)
3493 rtx ar_pfs_save_reg
, ar_unat_save_reg
;
3494 int i
, epilogue_p
, regno
, alt_regno
, cfa_off
, n_varargs
;
3497 ia64_compute_frame_size (get_frame_size ());
3498 last_scratch_gr_reg
= 15;
3500 if (flag_stack_usage_info
)
3501 current_function_static_stack_size
= current_frame_info
.total_size
;
3503 if (flag_stack_check
== STATIC_BUILTIN_STACK_CHECK
3504 || flag_stack_clash_protection
)
3506 HOST_WIDE_INT size
= current_frame_info
.total_size
;
3507 int bs_size
= BACKING_STORE_SIZE (current_frame_info
.n_input_regs
3508 + current_frame_info
.n_local_regs
);
3510 if (crtl
->is_leaf
&& !cfun
->calls_alloca
)
3512 if (size
> PROBE_INTERVAL
&& size
> get_stack_check_protect ())
3513 ia64_emit_probe_stack_range (get_stack_check_protect (),
3514 size
- get_stack_check_protect (),
3516 else if (size
+ bs_size
> get_stack_check_protect ())
3517 ia64_emit_probe_stack_range (get_stack_check_protect (),
3520 else if (size
+ bs_size
> 0)
3521 ia64_emit_probe_stack_range (get_stack_check_protect (), size
, bs_size
);
3526 fprintf (dump_file
, "ia64 frame related registers "
3527 "recorded in current_frame_info.r[]:\n");
3528 #define PRINTREG(a) if (current_frame_info.r[a]) \
3529 fprintf(dump_file, "%s = %d\n", #a, current_frame_info.r[a])
3531 PRINTREG(reg_save_b0
);
3532 PRINTREG(reg_save_pr
);
3533 PRINTREG(reg_save_ar_pfs
);
3534 PRINTREG(reg_save_ar_unat
);
3535 PRINTREG(reg_save_ar_lc
);
3536 PRINTREG(reg_save_gp
);
3540 /* If there is no epilogue, then we don't need some prologue insns.
3541 We need to avoid emitting the dead prologue insns, because flow
3542 will complain about them. */
3548 FOR_EACH_EDGE (e
, ei
, EXIT_BLOCK_PTR_FOR_FN (cfun
)->preds
)
3549 if ((e
->flags
& EDGE_FAKE
) == 0
3550 && (e
->flags
& EDGE_FALLTHRU
) != 0)
3552 epilogue_p
= (e
!= NULL
);
3557 /* Set the local, input, and output register names. We need to do this
3558 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
3559 half. If we use in/loc/out register names, then we get assembler errors
3560 in crtn.S because there is no alloc insn or regstk directive in there. */
3561 if (! TARGET_REG_NAMES
)
3563 int inputs
= current_frame_info
.n_input_regs
;
3564 int locals
= current_frame_info
.n_local_regs
;
3565 int outputs
= current_frame_info
.n_output_regs
;
3567 for (i
= 0; i
< inputs
; i
++)
3568 reg_names
[IN_REG (i
)] = ia64_reg_numbers
[i
];
3569 for (i
= 0; i
< locals
; i
++)
3570 reg_names
[LOC_REG (i
)] = ia64_reg_numbers
[inputs
+ i
];
3571 for (i
= 0; i
< outputs
; i
++)
3572 reg_names
[OUT_REG (i
)] = ia64_reg_numbers
[inputs
+ locals
+ i
];
3575 /* Set the frame pointer register name. The regnum is logically loc79,
3576 but of course we'll not have allocated that many locals. Rather than
3577 worrying about renumbering the existing rtxs, we adjust the name. */
3578 /* ??? This code means that we can never use one local register when
3579 there is a frame pointer. loc79 gets wasted in this case, as it is
3580 renamed to a register that will never be used. See also the try_locals
3581 code in find_gr_spill. */
3582 if (current_frame_info
.r
[reg_fp
])
3584 const char *tmp
= reg_names
[HARD_FRAME_POINTER_REGNUM
];
3585 reg_names
[HARD_FRAME_POINTER_REGNUM
]
3586 = reg_names
[current_frame_info
.r
[reg_fp
]];
3587 reg_names
[current_frame_info
.r
[reg_fp
]] = tmp
;
3590 /* We don't need an alloc instruction if we've used no outputs or locals. */
3591 if (current_frame_info
.n_local_regs
== 0
3592 && current_frame_info
.n_output_regs
== 0
3593 && current_frame_info
.n_input_regs
<= crtl
->args
.info
.int_regs
3594 && !TEST_HARD_REG_BIT (current_frame_info
.mask
, AR_PFS_REGNUM
))
3596 /* If there is no alloc, but there are input registers used, then we
3597 need a .regstk directive. */
3598 current_frame_info
.need_regstk
= (TARGET_REG_NAMES
!= 0);
3599 ar_pfs_save_reg
= NULL_RTX
;
3603 current_frame_info
.need_regstk
= 0;
3605 if (current_frame_info
.r
[reg_save_ar_pfs
])
3607 regno
= current_frame_info
.r
[reg_save_ar_pfs
];
3608 reg_emitted (reg_save_ar_pfs
);
3611 regno
= next_scratch_gr_reg ();
3612 ar_pfs_save_reg
= gen_rtx_REG (DImode
, regno
);
3614 insn
= emit_insn (gen_alloc (ar_pfs_save_reg
,
3615 GEN_INT (current_frame_info
.n_input_regs
),
3616 GEN_INT (current_frame_info
.n_local_regs
),
3617 GEN_INT (current_frame_info
.n_output_regs
),
3618 GEN_INT (current_frame_info
.n_rotate_regs
)));
3619 if (current_frame_info
.r
[reg_save_ar_pfs
])
3621 RTX_FRAME_RELATED_P (insn
) = 1;
3622 add_reg_note (insn
, REG_CFA_REGISTER
,
3623 gen_rtx_SET (ar_pfs_save_reg
,
3624 gen_rtx_REG (DImode
, AR_PFS_REGNUM
)));
3628 /* Set up frame pointer, stack pointer, and spill iterators. */
3630 n_varargs
= cfun
->machine
->n_varargs
;
3631 setup_spill_pointers (current_frame_info
.n_spilled
+ n_varargs
,
3632 stack_pointer_rtx
, 0);
3634 if (frame_pointer_needed
)
3636 insn
= emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
);
3637 RTX_FRAME_RELATED_P (insn
) = 1;
3639 /* Force the unwind info to recognize this as defining a new CFA,
3640 rather than some temp register setup. */
3641 add_reg_note (insn
, REG_CFA_ADJUST_CFA
, NULL_RTX
);
3644 if (current_frame_info
.total_size
!= 0)
3646 rtx frame_size_rtx
= GEN_INT (- current_frame_info
.total_size
);
3649 if (satisfies_constraint_I (frame_size_rtx
))
3650 offset
= frame_size_rtx
;
3653 regno
= next_scratch_gr_reg ();
3654 offset
= gen_rtx_REG (DImode
, regno
);
3655 emit_move_insn (offset
, frame_size_rtx
);
3658 insn
= emit_insn (gen_adddi3 (stack_pointer_rtx
,
3659 stack_pointer_rtx
, offset
));
3661 if (! frame_pointer_needed
)
3663 RTX_FRAME_RELATED_P (insn
) = 1;
3664 add_reg_note (insn
, REG_CFA_ADJUST_CFA
,
3665 gen_rtx_SET (stack_pointer_rtx
,
3666 gen_rtx_PLUS (DImode
,
3671 /* ??? At this point we must generate a magic insn that appears to
3672 modify the stack pointer, the frame pointer, and all spill
3673 iterators. This would allow the most scheduling freedom. For
3674 now, just hard stop. */
3675 emit_insn (gen_blockage ());
3678 /* Must copy out ar.unat before doing any integer spills. */
3679 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, AR_UNAT_REGNUM
))
3681 if (current_frame_info
.r
[reg_save_ar_unat
])
3684 = gen_rtx_REG (DImode
, current_frame_info
.r
[reg_save_ar_unat
]);
3685 reg_emitted (reg_save_ar_unat
);
3689 alt_regno
= next_scratch_gr_reg ();
3690 ar_unat_save_reg
= gen_rtx_REG (DImode
, alt_regno
);
3691 current_frame_info
.gr_used_mask
|= 1 << alt_regno
;
3694 reg
= gen_rtx_REG (DImode
, AR_UNAT_REGNUM
);
3695 insn
= emit_move_insn (ar_unat_save_reg
, reg
);
3696 if (current_frame_info
.r
[reg_save_ar_unat
])
3698 RTX_FRAME_RELATED_P (insn
) = 1;
3699 add_reg_note (insn
, REG_CFA_REGISTER
, NULL_RTX
);
3702 /* Even if we're not going to generate an epilogue, we still
3703 need to save the register so that EH works. */
3704 if (! epilogue_p
&& current_frame_info
.r
[reg_save_ar_unat
])
3705 emit_insn (gen_prologue_use (ar_unat_save_reg
));
3708 ar_unat_save_reg
= NULL_RTX
;
3710 /* Spill all varargs registers. Do this before spilling any GR registers,
3711 since we want the UNAT bits for the GR registers to override the UNAT
3712 bits from varargs, which we don't care about. */
3715 for (regno
= GR_ARG_FIRST
+ 7; n_varargs
> 0; --n_varargs
, --regno
)
3717 reg
= gen_rtx_REG (DImode
, regno
);
3718 do_spill (gen_gr_spill
, reg
, cfa_off
+= 8, NULL_RTX
);
3721 /* Locate the bottom of the register save area. */
3722 cfa_off
= (current_frame_info
.spill_cfa_off
3723 + current_frame_info
.spill_size
3724 + current_frame_info
.extra_spill_size
);
3726 /* Save the predicate register block either in a register or in memory. */
3727 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, PR_REG (0)))
3729 reg
= gen_rtx_REG (DImode
, PR_REG (0));
3730 if (current_frame_info
.r
[reg_save_pr
] != 0)
3732 alt_reg
= gen_rtx_REG (DImode
, current_frame_info
.r
[reg_save_pr
]);
3733 reg_emitted (reg_save_pr
);
3734 insn
= emit_move_insn (alt_reg
, reg
);
3736 /* ??? Denote pr spill/fill by a DImode move that modifies all
3737 64 hard registers. */
3738 RTX_FRAME_RELATED_P (insn
) = 1;
3739 add_reg_note (insn
, REG_CFA_REGISTER
, NULL_RTX
);
3741 /* Even if we're not going to generate an epilogue, we still
3742 need to save the register so that EH works. */
3744 emit_insn (gen_prologue_use (alt_reg
));
3748 alt_regno
= next_scratch_gr_reg ();
3749 alt_reg
= gen_rtx_REG (DImode
, alt_regno
);
3750 insn
= emit_move_insn (alt_reg
, reg
);
3751 do_spill (gen_movdi_x
, alt_reg
, cfa_off
, reg
);
3756 /* Handle AR regs in numerical order. All of them get special handling. */
3757 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, AR_UNAT_REGNUM
)
3758 && current_frame_info
.r
[reg_save_ar_unat
] == 0)
3760 reg
= gen_rtx_REG (DImode
, AR_UNAT_REGNUM
);
3761 do_spill (gen_movdi_x
, ar_unat_save_reg
, cfa_off
, reg
);
3765 /* The alloc insn already copied ar.pfs into a general register. The
3766 only thing we have to do now is copy that register to a stack slot
3767 if we'd not allocated a local register for the job. */
3768 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, AR_PFS_REGNUM
)
3769 && current_frame_info
.r
[reg_save_ar_pfs
] == 0)
3771 reg
= gen_rtx_REG (DImode
, AR_PFS_REGNUM
);
3772 do_spill (gen_movdi_x
, ar_pfs_save_reg
, cfa_off
, reg
);
3776 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, AR_LC_REGNUM
))
3778 reg
= gen_rtx_REG (DImode
, AR_LC_REGNUM
);
3779 if (current_frame_info
.r
[reg_save_ar_lc
] != 0)
3781 alt_reg
= gen_rtx_REG (DImode
, current_frame_info
.r
[reg_save_ar_lc
]);
3782 reg_emitted (reg_save_ar_lc
);
3783 insn
= emit_move_insn (alt_reg
, reg
);
3784 RTX_FRAME_RELATED_P (insn
) = 1;
3785 add_reg_note (insn
, REG_CFA_REGISTER
, NULL_RTX
);
3787 /* Even if we're not going to generate an epilogue, we still
3788 need to save the register so that EH works. */
3790 emit_insn (gen_prologue_use (alt_reg
));
3794 alt_regno
= next_scratch_gr_reg ();
3795 alt_reg
= gen_rtx_REG (DImode
, alt_regno
);
3796 emit_move_insn (alt_reg
, reg
);
3797 do_spill (gen_movdi_x
, alt_reg
, cfa_off
, reg
);
3802 /* Save the return pointer. */
3803 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, BR_REG (0)))
3805 reg
= gen_rtx_REG (DImode
, BR_REG (0));
3806 if (current_frame_info
.r
[reg_save_b0
] != 0)
3808 alt_reg
= gen_rtx_REG (DImode
, current_frame_info
.r
[reg_save_b0
]);
3809 reg_emitted (reg_save_b0
);
3810 insn
= emit_move_insn (alt_reg
, reg
);
3811 RTX_FRAME_RELATED_P (insn
) = 1;
3812 add_reg_note (insn
, REG_CFA_REGISTER
, gen_rtx_SET (alt_reg
, pc_rtx
));
3814 /* Even if we're not going to generate an epilogue, we still
3815 need to save the register so that EH works. */
3817 emit_insn (gen_prologue_use (alt_reg
));
3821 alt_regno
= next_scratch_gr_reg ();
3822 alt_reg
= gen_rtx_REG (DImode
, alt_regno
);
3823 emit_move_insn (alt_reg
, reg
);
3824 do_spill (gen_movdi_x
, alt_reg
, cfa_off
, reg
);
3829 if (current_frame_info
.r
[reg_save_gp
])
3831 reg_emitted (reg_save_gp
);
3832 insn
= emit_move_insn (gen_rtx_REG (DImode
,
3833 current_frame_info
.r
[reg_save_gp
]),
3834 pic_offset_table_rtx
);
3837 /* We should now be at the base of the gr/br/fr spill area. */
3838 gcc_assert (cfa_off
== (current_frame_info
.spill_cfa_off
3839 + current_frame_info
.spill_size
));
3841 /* Spill all general registers. */
3842 for (regno
= GR_REG (1); regno
<= GR_REG (31); ++regno
)
3843 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, regno
))
3845 reg
= gen_rtx_REG (DImode
, regno
);
3846 do_spill (gen_gr_spill
, reg
, cfa_off
, reg
);
3850 /* Spill the rest of the BR registers. */
3851 for (regno
= BR_REG (1); regno
<= BR_REG (7); ++regno
)
3852 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, regno
))
3854 alt_regno
= next_scratch_gr_reg ();
3855 alt_reg
= gen_rtx_REG (DImode
, alt_regno
);
3856 reg
= gen_rtx_REG (DImode
, regno
);
3857 emit_move_insn (alt_reg
, reg
);
3858 do_spill (gen_movdi_x
, alt_reg
, cfa_off
, reg
);
3862 /* Align the frame and spill all FR registers. */
3863 for (regno
= FR_REG (2); regno
<= FR_REG (127); ++regno
)
3864 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, regno
))
3866 gcc_assert (!(cfa_off
& 15));
3867 reg
= gen_rtx_REG (XFmode
, regno
);
3868 do_spill (gen_fr_spill_x
, reg
, cfa_off
, reg
);
3872 gcc_assert (cfa_off
== current_frame_info
.spill_cfa_off
);
3874 finish_spill_pointers ();
3877 /* Output the textual info surrounding the prologue. */
3880 ia64_start_function (FILE *file
, const char *fnname
,
3881 tree decl ATTRIBUTE_UNUSED
)
3883 #if TARGET_ABI_OPEN_VMS
3884 vms_start_function (fnname
);
3887 fputs ("\t.proc ", file
);
3888 assemble_name (file
, fnname
);
3890 ASM_OUTPUT_LABEL (file
, fnname
);
3893 /* Called after register allocation to add any instructions needed for the
3894 epilogue. Using an epilogue insn is favored compared to putting all of the
3895 instructions in output_function_prologue(), since it allows the scheduler
3896 to intermix instructions with the saves of the caller saved registers. In
3897 some cases, it might be necessary to emit a barrier instruction as the last
3898 insn to prevent such scheduling. */
3901 ia64_expand_epilogue (int sibcall_p
)
3904 rtx reg
, alt_reg
, ar_unat_save_reg
;
3905 int regno
, alt_regno
, cfa_off
;
3907 ia64_compute_frame_size (get_frame_size ());
3909 /* If there is a frame pointer, then we use it instead of the stack
3910 pointer, so that the stack pointer does not need to be valid when
3911 the epilogue starts. See EXIT_IGNORE_STACK. */
3912 if (frame_pointer_needed
)
3913 setup_spill_pointers (current_frame_info
.n_spilled
,
3914 hard_frame_pointer_rtx
, 0);
3916 setup_spill_pointers (current_frame_info
.n_spilled
, stack_pointer_rtx
,
3917 current_frame_info
.total_size
);
3919 if (current_frame_info
.total_size
!= 0)
3921 /* ??? At this point we must generate a magic insn that appears to
3922 modify the spill iterators and the frame pointer. This would
3923 allow the most scheduling freedom. For now, just hard stop. */
3924 emit_insn (gen_blockage ());
3927 /* Locate the bottom of the register save area. */
3928 cfa_off
= (current_frame_info
.spill_cfa_off
3929 + current_frame_info
.spill_size
3930 + current_frame_info
.extra_spill_size
);
3932 /* Restore the predicate registers. */
3933 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, PR_REG (0)))
3935 if (current_frame_info
.r
[reg_save_pr
] != 0)
3937 alt_reg
= gen_rtx_REG (DImode
, current_frame_info
.r
[reg_save_pr
]);
3938 reg_emitted (reg_save_pr
);
3942 alt_regno
= next_scratch_gr_reg ();
3943 alt_reg
= gen_rtx_REG (DImode
, alt_regno
);
3944 do_restore (gen_movdi_x
, alt_reg
, cfa_off
);
3947 reg
= gen_rtx_REG (DImode
, PR_REG (0));
3948 emit_move_insn (reg
, alt_reg
);
3951 /* Restore the application registers. */
3953 /* Load the saved unat from the stack, but do not restore it until
3954 after the GRs have been restored. */
3955 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, AR_UNAT_REGNUM
))
3957 if (current_frame_info
.r
[reg_save_ar_unat
] != 0)
3960 = gen_rtx_REG (DImode
, current_frame_info
.r
[reg_save_ar_unat
]);
3961 reg_emitted (reg_save_ar_unat
);
3965 alt_regno
= next_scratch_gr_reg ();
3966 ar_unat_save_reg
= gen_rtx_REG (DImode
, alt_regno
);
3967 current_frame_info
.gr_used_mask
|= 1 << alt_regno
;
3968 do_restore (gen_movdi_x
, ar_unat_save_reg
, cfa_off
);
3973 ar_unat_save_reg
= NULL_RTX
;
3975 if (current_frame_info
.r
[reg_save_ar_pfs
] != 0)
3977 reg_emitted (reg_save_ar_pfs
);
3978 alt_reg
= gen_rtx_REG (DImode
, current_frame_info
.r
[reg_save_ar_pfs
]);
3979 reg
= gen_rtx_REG (DImode
, AR_PFS_REGNUM
);
3980 emit_move_insn (reg
, alt_reg
);
3982 else if (TEST_HARD_REG_BIT (current_frame_info
.mask
, AR_PFS_REGNUM
))
3984 alt_regno
= next_scratch_gr_reg ();
3985 alt_reg
= gen_rtx_REG (DImode
, alt_regno
);
3986 do_restore (gen_movdi_x
, alt_reg
, cfa_off
);
3988 reg
= gen_rtx_REG (DImode
, AR_PFS_REGNUM
);
3989 emit_move_insn (reg
, alt_reg
);
3992 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, AR_LC_REGNUM
))
3994 if (current_frame_info
.r
[reg_save_ar_lc
] != 0)
3996 alt_reg
= gen_rtx_REG (DImode
, current_frame_info
.r
[reg_save_ar_lc
]);
3997 reg_emitted (reg_save_ar_lc
);
4001 alt_regno
= next_scratch_gr_reg ();
4002 alt_reg
= gen_rtx_REG (DImode
, alt_regno
);
4003 do_restore (gen_movdi_x
, alt_reg
, cfa_off
);
4006 reg
= gen_rtx_REG (DImode
, AR_LC_REGNUM
);
4007 emit_move_insn (reg
, alt_reg
);
4010 /* Restore the return pointer. */
4011 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, BR_REG (0)))
4013 if (current_frame_info
.r
[reg_save_b0
] != 0)
4015 alt_reg
= gen_rtx_REG (DImode
, current_frame_info
.r
[reg_save_b0
]);
4016 reg_emitted (reg_save_b0
);
4020 alt_regno
= next_scratch_gr_reg ();
4021 alt_reg
= gen_rtx_REG (DImode
, alt_regno
);
4022 do_restore (gen_movdi_x
, alt_reg
, cfa_off
);
4025 reg
= gen_rtx_REG (DImode
, BR_REG (0));
4026 emit_move_insn (reg
, alt_reg
);
4029 /* We should now be at the base of the gr/br/fr spill area. */
4030 gcc_assert (cfa_off
== (current_frame_info
.spill_cfa_off
4031 + current_frame_info
.spill_size
));
4033 /* The GP may be stored on the stack in the prologue, but it's
4034 never restored in the epilogue. Skip the stack slot. */
4035 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, GR_REG (1)))
4038 /* Restore all general registers. */
4039 for (regno
= GR_REG (2); regno
<= GR_REG (31); ++regno
)
4040 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, regno
))
4042 reg
= gen_rtx_REG (DImode
, regno
);
4043 do_restore (gen_gr_restore
, reg
, cfa_off
);
4047 /* Restore the branch registers. */
4048 for (regno
= BR_REG (1); regno
<= BR_REG (7); ++regno
)
4049 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, regno
))
4051 alt_regno
= next_scratch_gr_reg ();
4052 alt_reg
= gen_rtx_REG (DImode
, alt_regno
);
4053 do_restore (gen_movdi_x
, alt_reg
, cfa_off
);
4055 reg
= gen_rtx_REG (DImode
, regno
);
4056 emit_move_insn (reg
, alt_reg
);
4059 /* Restore floating point registers. */
4060 for (regno
= FR_REG (2); regno
<= FR_REG (127); ++regno
)
4061 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, regno
))
4063 gcc_assert (!(cfa_off
& 15));
4064 reg
= gen_rtx_REG (XFmode
, regno
);
4065 do_restore (gen_fr_restore_x
, reg
, cfa_off
);
4069 /* Restore ar.unat for real. */
4070 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, AR_UNAT_REGNUM
))
4072 reg
= gen_rtx_REG (DImode
, AR_UNAT_REGNUM
);
4073 emit_move_insn (reg
, ar_unat_save_reg
);
4076 gcc_assert (cfa_off
== current_frame_info
.spill_cfa_off
);
4078 finish_spill_pointers ();
4080 if (current_frame_info
.total_size
4081 || cfun
->machine
->ia64_eh_epilogue_sp
4082 || frame_pointer_needed
)
4084 /* ??? At this point we must generate a magic insn that appears to
4085 modify the spill iterators, the stack pointer, and the frame
4086 pointer. This would allow the most scheduling freedom. For now,
4088 emit_insn (gen_blockage ());
4091 if (cfun
->machine
->ia64_eh_epilogue_sp
)
4092 emit_move_insn (stack_pointer_rtx
, cfun
->machine
->ia64_eh_epilogue_sp
);
4093 else if (frame_pointer_needed
)
4095 insn
= emit_move_insn (stack_pointer_rtx
, hard_frame_pointer_rtx
);
4096 RTX_FRAME_RELATED_P (insn
) = 1;
4097 add_reg_note (insn
, REG_CFA_ADJUST_CFA
, NULL
);
4099 else if (current_frame_info
.total_size
)
4101 rtx offset
, frame_size_rtx
;
4103 frame_size_rtx
= GEN_INT (current_frame_info
.total_size
);
4104 if (satisfies_constraint_I (frame_size_rtx
))
4105 offset
= frame_size_rtx
;
4108 regno
= next_scratch_gr_reg ();
4109 offset
= gen_rtx_REG (DImode
, regno
);
4110 emit_move_insn (offset
, frame_size_rtx
);
4113 insn
= emit_insn (gen_adddi3 (stack_pointer_rtx
, stack_pointer_rtx
,
4116 RTX_FRAME_RELATED_P (insn
) = 1;
4117 add_reg_note (insn
, REG_CFA_ADJUST_CFA
,
4118 gen_rtx_SET (stack_pointer_rtx
,
4119 gen_rtx_PLUS (DImode
,
4124 if (cfun
->machine
->ia64_eh_epilogue_bsp
)
4125 emit_insn (gen_set_bsp (cfun
->machine
->ia64_eh_epilogue_bsp
));
4128 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode
, BR_REG (0))));
4131 int fp
= GR_REG (2);
4132 /* We need a throw away register here, r0 and r1 are reserved,
4133 so r2 is the first available call clobbered register. If
4134 there was a frame_pointer register, we may have swapped the
4135 names of r2 and HARD_FRAME_POINTER_REGNUM, so we have to make
4136 sure we're using the string "r2" when emitting the register
4137 name for the assembler. */
4138 if (current_frame_info
.r
[reg_fp
]
4139 && current_frame_info
.r
[reg_fp
] == GR_REG (2))
4140 fp
= HARD_FRAME_POINTER_REGNUM
;
4142 /* We must emit an alloc to force the input registers to become output
4143 registers. Otherwise, if the callee tries to pass its parameters
4144 through to another call without an intervening alloc, then these
4146 /* ??? We don't need to preserve all input registers. We only need to
4147 preserve those input registers used as arguments to the sibling call.
4148 It is unclear how to compute that number here. */
4149 if (current_frame_info
.n_input_regs
!= 0)
4151 rtx n_inputs
= GEN_INT (current_frame_info
.n_input_regs
);
4153 insn
= emit_insn (gen_alloc (gen_rtx_REG (DImode
, fp
),
4154 const0_rtx
, const0_rtx
,
4155 n_inputs
, const0_rtx
));
4156 RTX_FRAME_RELATED_P (insn
) = 1;
4158 /* ??? We need to mark the alloc as frame-related so that it gets
4159 passed into ia64_asm_unwind_emit for ia64-specific unwinding.
4160 But there's nothing dwarf2 related to be done wrt the register
4161 windows. If we do nothing, dwarf2out will abort on the UNSPEC;
4162 the empty parallel means dwarf2out will not see anything. */
4163 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
4164 gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (0)));
4169 /* Return 1 if br.ret can do all the work required to return from a
4173 ia64_direct_return (void)
4175 if (reload_completed
&& ! frame_pointer_needed
)
4177 ia64_compute_frame_size (get_frame_size ());
4179 return (current_frame_info
.total_size
== 0
4180 && current_frame_info
.n_spilled
== 0
4181 && current_frame_info
.r
[reg_save_b0
] == 0
4182 && current_frame_info
.r
[reg_save_pr
] == 0
4183 && current_frame_info
.r
[reg_save_ar_pfs
] == 0
4184 && current_frame_info
.r
[reg_save_ar_unat
] == 0
4185 && current_frame_info
.r
[reg_save_ar_lc
] == 0);
4190 /* Return the magic cookie that we use to hold the return address
4191 during early compilation. */
4194 ia64_return_addr_rtx (HOST_WIDE_INT count
, rtx frame ATTRIBUTE_UNUSED
)
4198 return gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
), UNSPEC_RET_ADDR
);
4201 /* Split this value after reload, now that we know where the return
4202 address is saved. */
4205 ia64_split_return_addr_rtx (rtx dest
)
4209 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, BR_REG (0)))
4211 if (current_frame_info
.r
[reg_save_b0
] != 0)
4213 src
= gen_rtx_REG (DImode
, current_frame_info
.r
[reg_save_b0
]);
4214 reg_emitted (reg_save_b0
);
4222 /* Compute offset from CFA for BR0. */
4223 /* ??? Must be kept in sync with ia64_expand_prologue. */
4224 off
= (current_frame_info
.spill_cfa_off
4225 + current_frame_info
.spill_size
);
4226 for (regno
= GR_REG (1); regno
<= GR_REG (31); ++regno
)
4227 if (TEST_HARD_REG_BIT (current_frame_info
.mask
, regno
))
4230 /* Convert CFA offset to a register based offset. */
4231 if (frame_pointer_needed
)
4232 src
= hard_frame_pointer_rtx
;
4235 src
= stack_pointer_rtx
;
4236 off
+= current_frame_info
.total_size
;
4239 /* Load address into scratch register. */
4240 off_r
= GEN_INT (off
);
4241 if (satisfies_constraint_I (off_r
))
4242 emit_insn (gen_adddi3 (dest
, src
, off_r
));
4245 emit_move_insn (dest
, off_r
);
4246 emit_insn (gen_adddi3 (dest
, src
, dest
));
4249 src
= gen_rtx_MEM (Pmode
, dest
);
4253 src
= gen_rtx_REG (DImode
, BR_REG (0));
4255 emit_move_insn (dest
, src
);
4259 ia64_hard_regno_rename_ok (int from
, int to
)
4261 /* Don't clobber any of the registers we reserved for the prologue. */
4264 for (r
= reg_fp
; r
<= reg_save_ar_lc
; r
++)
4265 if (to
== current_frame_info
.r
[r
]
4266 || from
== current_frame_info
.r
[r
]
4267 || to
== emitted_frame_related_regs
[r
]
4268 || from
== emitted_frame_related_regs
[r
])
4271 /* Don't use output registers outside the register frame. */
4272 if (OUT_REGNO_P (to
) && to
>= OUT_REG (current_frame_info
.n_output_regs
))
4275 /* Retain even/oddness on predicate register pairs. */
4276 if (PR_REGNO_P (from
) && PR_REGNO_P (to
))
4277 return (from
& 1) == (to
& 1);
4282 /* Implement TARGET_HARD_REGNO_NREGS.
4284 ??? We say that BImode PR values require two registers. This allows us to
4285 easily store the normal and inverted values. We use CCImode to indicate
4286 a single predicate register. */
4289 ia64_hard_regno_nregs (unsigned int regno
, machine_mode mode
)
4291 if (regno
== PR_REG (0) && mode
== DImode
)
4293 if (PR_REGNO_P (regno
) && (mode
) == BImode
)
4295 if ((PR_REGNO_P (regno
) || GR_REGNO_P (regno
)) && mode
== CCImode
)
4297 if (FR_REGNO_P (regno
) && mode
== XFmode
)
4299 if (FR_REGNO_P (regno
) && mode
== RFmode
)
4301 if (FR_REGNO_P (regno
) && mode
== XCmode
)
4303 return CEIL (GET_MODE_SIZE (mode
), UNITS_PER_WORD
);
4306 /* Implement TARGET_HARD_REGNO_MODE_OK. */
4309 ia64_hard_regno_mode_ok (unsigned int regno
, machine_mode mode
)
4311 if (FR_REGNO_P (regno
))
4312 return (GET_MODE_CLASS (mode
) != MODE_CC
4316 if (PR_REGNO_P (regno
))
4317 return mode
== BImode
|| GET_MODE_CLASS (mode
) == MODE_CC
;
4319 if (GR_REGNO_P (regno
))
4320 return mode
!= XFmode
&& mode
!= XCmode
&& mode
!= RFmode
;
4322 if (AR_REGNO_P (regno
))
4323 return mode
== DImode
;
4325 if (BR_REGNO_P (regno
))
4326 return mode
== DImode
;
4331 /* Implement TARGET_MODES_TIEABLE_P.
4333 Don't tie integer and FP modes, as that causes us to get integer registers
4334 allocated for FP instructions. XFmode only supported in FP registers so
4335 we can't tie it with any other modes. */
4338 ia64_modes_tieable_p (machine_mode mode1
, machine_mode mode2
)
4340 return (GET_MODE_CLASS (mode1
) == GET_MODE_CLASS (mode2
)
4341 && ((mode1
== XFmode
|| mode1
== XCmode
|| mode1
== RFmode
)
4342 == (mode2
== XFmode
|| mode2
== XCmode
|| mode2
== RFmode
))
4343 && (mode1
== BImode
) == (mode2
== BImode
));
4346 /* Target hook for assembling integer objects. Handle word-sized
4347 aligned objects and detect the cases when @fptr is needed. */
4350 ia64_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
4352 if (size
== POINTER_SIZE
/ BITS_PER_UNIT
4353 && !(TARGET_NO_PIC
|| TARGET_AUTO_PIC
)
4354 && GET_CODE (x
) == SYMBOL_REF
4355 && SYMBOL_REF_FUNCTION_P (x
))
4357 static const char * const directive
[2][2] = {
4358 /* 64-bit pointer */ /* 32-bit pointer */
4359 { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
4360 { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
4362 fputs (directive
[(aligned_p
!= 0)][POINTER_SIZE
== 32], asm_out_file
);
4363 output_addr_const (asm_out_file
, x
);
4364 fputs (")\n", asm_out_file
);
4367 return default_assemble_integer (x
, size
, aligned_p
);
4370 /* Emit the function prologue. */
4373 ia64_output_function_prologue (FILE *file
)
4375 int mask
, grsave
, grsave_prev
;
4377 if (current_frame_info
.need_regstk
)
4378 fprintf (file
, "\t.regstk %d, %d, %d, %d\n",
4379 current_frame_info
.n_input_regs
,
4380 current_frame_info
.n_local_regs
,
4381 current_frame_info
.n_output_regs
,
4382 current_frame_info
.n_rotate_regs
);
4384 if (ia64_except_unwind_info (&global_options
) != UI_TARGET
)
4387 /* Emit the .prologue directive. */
4390 grsave
= grsave_prev
= 0;
4391 if (current_frame_info
.r
[reg_save_b0
] != 0)
4394 grsave
= grsave_prev
= current_frame_info
.r
[reg_save_b0
];
4396 if (current_frame_info
.r
[reg_save_ar_pfs
] != 0
4397 && (grsave_prev
== 0
4398 || current_frame_info
.r
[reg_save_ar_pfs
] == grsave_prev
+ 1))
4401 if (grsave_prev
== 0)
4402 grsave
= current_frame_info
.r
[reg_save_ar_pfs
];
4403 grsave_prev
= current_frame_info
.r
[reg_save_ar_pfs
];
4405 if (current_frame_info
.r
[reg_fp
] != 0
4406 && (grsave_prev
== 0
4407 || current_frame_info
.r
[reg_fp
] == grsave_prev
+ 1))
4410 if (grsave_prev
== 0)
4411 grsave
= HARD_FRAME_POINTER_REGNUM
;
4412 grsave_prev
= current_frame_info
.r
[reg_fp
];
4414 if (current_frame_info
.r
[reg_save_pr
] != 0
4415 && (grsave_prev
== 0
4416 || current_frame_info
.r
[reg_save_pr
] == grsave_prev
+ 1))
4419 if (grsave_prev
== 0)
4420 grsave
= current_frame_info
.r
[reg_save_pr
];
4423 if (mask
&& TARGET_GNU_AS
)
4424 fprintf (file
, "\t.prologue %d, %d\n", mask
,
4425 ia64_dbx_register_number (grsave
));
4427 fputs ("\t.prologue\n", file
);
4429 /* Emit a .spill directive, if necessary, to relocate the base of
4430 the register spill area. */
4431 if (current_frame_info
.spill_cfa_off
!= -16)
4432 fprintf (file
, "\t.spill %ld\n",
4433 (long) (current_frame_info
.spill_cfa_off
4434 + current_frame_info
.spill_size
));
4437 /* Emit the .body directive at the scheduled end of the prologue. */
4440 ia64_output_function_end_prologue (FILE *file
)
4442 if (ia64_except_unwind_info (&global_options
) != UI_TARGET
)
4445 fputs ("\t.body\n", file
);
4448 /* Emit the function epilogue. */
4451 ia64_output_function_epilogue (FILE *)
4455 if (current_frame_info
.r
[reg_fp
])
4457 const char *tmp
= reg_names
[HARD_FRAME_POINTER_REGNUM
];
4458 reg_names
[HARD_FRAME_POINTER_REGNUM
]
4459 = reg_names
[current_frame_info
.r
[reg_fp
]];
4460 reg_names
[current_frame_info
.r
[reg_fp
]] = tmp
;
4461 reg_emitted (reg_fp
);
4463 if (! TARGET_REG_NAMES
)
4465 for (i
= 0; i
< current_frame_info
.n_input_regs
; i
++)
4466 reg_names
[IN_REG (i
)] = ia64_input_reg_names
[i
];
4467 for (i
= 0; i
< current_frame_info
.n_local_regs
; i
++)
4468 reg_names
[LOC_REG (i
)] = ia64_local_reg_names
[i
];
4469 for (i
= 0; i
< current_frame_info
.n_output_regs
; i
++)
4470 reg_names
[OUT_REG (i
)] = ia64_output_reg_names
[i
];
4473 current_frame_info
.initialized
= 0;
4477 ia64_dbx_register_number (int regno
)
4479 /* In ia64_expand_prologue we quite literally renamed the frame pointer
4480 from its home at loc79 to something inside the register frame. We
4481 must perform the same renumbering here for the debug info. */
4482 if (current_frame_info
.r
[reg_fp
])
4484 if (regno
== HARD_FRAME_POINTER_REGNUM
)
4485 regno
= current_frame_info
.r
[reg_fp
];
4486 else if (regno
== current_frame_info
.r
[reg_fp
])
4487 regno
= HARD_FRAME_POINTER_REGNUM
;
4490 if (IN_REGNO_P (regno
))
4491 return 32 + regno
- IN_REG (0);
4492 else if (LOC_REGNO_P (regno
))
4493 return 32 + current_frame_info
.n_input_regs
+ regno
- LOC_REG (0);
4494 else if (OUT_REGNO_P (regno
))
4495 return (32 + current_frame_info
.n_input_regs
4496 + current_frame_info
.n_local_regs
+ regno
- OUT_REG (0));
4501 /* Implement TARGET_TRAMPOLINE_INIT.
4503 The trampoline should set the static chain pointer to value placed
4504 into the trampoline and should branch to the specified routine.
4505 To make the normal indirect-subroutine calling convention work,
4506 the trampoline must look like a function descriptor; the first
4507 word being the target address and the second being the target's
4510 We abuse the concept of a global pointer by arranging for it
4511 to point to the data we need to load. The complete trampoline
4512 has the following form:
4514 +-------------------+ \
4515 TRAMP: | __ia64_trampoline | |
4516 +-------------------+ > fake function descriptor
4518 +-------------------+ /
4519 | target descriptor |
4520 +-------------------+
4522 +-------------------+
4526 ia64_trampoline_init (rtx m_tramp
, tree fndecl
, rtx static_chain
)
4528 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
4529 rtx addr
, addr_reg
, tramp
, eight
= GEN_INT (8);
4531 /* The Intel assembler requires that the global __ia64_trampoline symbol
4532 be declared explicitly */
4535 static bool declared_ia64_trampoline
= false;
4537 if (!declared_ia64_trampoline
)
4539 declared_ia64_trampoline
= true;
4540 (*targetm
.asm_out
.globalize_label
) (asm_out_file
,
4541 "__ia64_trampoline");
4545 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
4546 addr
= convert_memory_address (Pmode
, XEXP (m_tramp
, 0));
4547 fnaddr
= convert_memory_address (Pmode
, fnaddr
);
4548 static_chain
= convert_memory_address (Pmode
, static_chain
);
4550 /* Load up our iterator. */
4551 addr_reg
= copy_to_reg (addr
);
4552 m_tramp
= adjust_automodify_address (m_tramp
, Pmode
, addr_reg
, 0);
4554 /* The first two words are the fake descriptor:
4555 __ia64_trampoline, ADDR+16. */
4556 tramp
= gen_rtx_SYMBOL_REF (Pmode
, "__ia64_trampoline");
4557 if (TARGET_ABI_OPEN_VMS
)
4559 /* HP decided to break the ELF ABI on VMS (to deal with an ambiguity
4560 in the Macro-32 compiler) and changed the semantics of the LTOFF22
4561 relocation against function symbols to make it identical to the
4562 LTOFF_FPTR22 relocation. Emit the latter directly to stay within
4563 strict ELF and dereference to get the bare code address. */
4564 rtx reg
= gen_reg_rtx (Pmode
);
4565 SYMBOL_REF_FLAGS (tramp
) |= SYMBOL_FLAG_FUNCTION
;
4566 emit_move_insn (reg
, tramp
);
4567 emit_move_insn (reg
, gen_rtx_MEM (Pmode
, reg
));
4570 emit_move_insn (m_tramp
, tramp
);
4571 emit_insn (gen_adddi3 (addr_reg
, addr_reg
, eight
));
4572 m_tramp
= adjust_automodify_address (m_tramp
, VOIDmode
, NULL
, 8);
4574 emit_move_insn (m_tramp
, force_reg (Pmode
, plus_constant (Pmode
, addr
, 16)));
4575 emit_insn (gen_adddi3 (addr_reg
, addr_reg
, eight
));
4576 m_tramp
= adjust_automodify_address (m_tramp
, VOIDmode
, NULL
, 8);
4578 /* The third word is the target descriptor. */
4579 emit_move_insn (m_tramp
, force_reg (Pmode
, fnaddr
));
4580 emit_insn (gen_adddi3 (addr_reg
, addr_reg
, eight
));
4581 m_tramp
= adjust_automodify_address (m_tramp
, VOIDmode
, NULL
, 8);
4583 /* The fourth word is the static chain. */
4584 emit_move_insn (m_tramp
, static_chain
);
4587 /* Do any needed setup for a variadic function. CUM has not been updated
4588 for the last named argument which has type TYPE and mode MODE.
4590 We generate the actual spill instructions during prologue generation. */
4593 ia64_setup_incoming_varargs (cumulative_args_t cum
, machine_mode mode
,
4594 tree type
, int * pretend_size
,
4595 int second_time ATTRIBUTE_UNUSED
)
4597 CUMULATIVE_ARGS next_cum
= *get_cumulative_args (cum
);
4599 /* Skip the current argument. */
4600 ia64_function_arg_advance (pack_cumulative_args (&next_cum
), mode
, type
, 1);
4602 if (next_cum
.words
< MAX_ARGUMENT_SLOTS
)
4604 int n
= MAX_ARGUMENT_SLOTS
- next_cum
.words
;
4605 *pretend_size
= n
* UNITS_PER_WORD
;
4606 cfun
->machine
->n_varargs
= n
;
4610 /* Check whether TYPE is a homogeneous floating point aggregate. If
4611 it is, return the mode of the floating point type that appears
4612 in all leafs. If it is not, return VOIDmode.
4614 An aggregate is a homogeneous floating point aggregate is if all
4615 fields/elements in it have the same floating point type (e.g,
4616 SFmode). 128-bit quad-precision floats are excluded.
4618 Variable sized aggregates should never arrive here, since we should
4619 have already decided to pass them by reference. Top-level zero-sized
4620 aggregates are excluded because our parallels crash the middle-end. */
4623 hfa_element_mode (const_tree type
, bool nested
)
4625 machine_mode element_mode
= VOIDmode
;
4627 enum tree_code code
= TREE_CODE (type
);
4628 int know_element_mode
= 0;
4631 if (!nested
&& (!TYPE_SIZE (type
) || integer_zerop (TYPE_SIZE (type
))))
4636 case VOID_TYPE
: case INTEGER_TYPE
: case ENUMERAL_TYPE
:
4637 case BOOLEAN_TYPE
: case POINTER_TYPE
:
4638 case OFFSET_TYPE
: case REFERENCE_TYPE
: case METHOD_TYPE
:
4639 case LANG_TYPE
: case FUNCTION_TYPE
:
4642 /* Fortran complex types are supposed to be HFAs, so we need to handle
4643 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
4646 if (GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_COMPLEX_FLOAT
4647 && TYPE_MODE (type
) != TCmode
)
4648 return GET_MODE_INNER (TYPE_MODE (type
));
4653 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
4654 mode if this is contained within an aggregate. */
4655 if (nested
&& TYPE_MODE (type
) != TFmode
)
4656 return TYPE_MODE (type
);
4661 return hfa_element_mode (TREE_TYPE (type
), 1);
4665 case QUAL_UNION_TYPE
:
4666 for (t
= TYPE_FIELDS (type
); t
; t
= DECL_CHAIN (t
))
4668 if (TREE_CODE (t
) != FIELD_DECL
)
4671 mode
= hfa_element_mode (TREE_TYPE (t
), 1);
4672 if (know_element_mode
)
4674 if (mode
!= element_mode
)
4677 else if (GET_MODE_CLASS (mode
) != MODE_FLOAT
)
4681 know_element_mode
= 1;
4682 element_mode
= mode
;
4685 return element_mode
;
4688 /* If we reach here, we probably have some front-end specific type
4689 that the backend doesn't know about. This can happen via the
4690 aggregate_value_p call in init_function_start. All we can do is
4691 ignore unknown tree types. */
4698 /* Return the number of words required to hold a quantity of TYPE and MODE
4699 when passed as an argument. */
4701 ia64_function_arg_words (const_tree type
, machine_mode mode
)
4705 if (mode
== BLKmode
)
4706 words
= int_size_in_bytes (type
);
4708 words
= GET_MODE_SIZE (mode
);
4710 return (words
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
; /* round up */
4713 /* Return the number of registers that should be skipped so the current
4714 argument (described by TYPE and WORDS) will be properly aligned.
4716 Integer and float arguments larger than 8 bytes start at the next
4717 even boundary. Aggregates larger than 8 bytes start at the next
4718 even boundary if the aggregate has 16 byte alignment. Note that
4719 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
4720 but are still to be aligned in registers.
4722 ??? The ABI does not specify how to handle aggregates with
4723 alignment from 9 to 15 bytes, or greater than 16. We handle them
4724 all as if they had 16 byte alignment. Such aggregates can occur
4725 only if gcc extensions are used. */
4727 ia64_function_arg_offset (const CUMULATIVE_ARGS
*cum
,
4728 const_tree type
, int words
)
4730 /* No registers are skipped on VMS. */
4731 if (TARGET_ABI_OPEN_VMS
|| (cum
->words
& 1) == 0)
4735 && TREE_CODE (type
) != INTEGER_TYPE
4736 && TREE_CODE (type
) != REAL_TYPE
)
4737 return TYPE_ALIGN (type
) > 8 * BITS_PER_UNIT
;
4742 /* Return rtx for register where argument is passed, or zero if it is passed
4744 /* ??? 128-bit quad-precision floats are always passed in general
4748 ia64_function_arg_1 (cumulative_args_t cum_v
, machine_mode mode
,
4749 const_tree type
, bool named
, bool incoming
)
4751 const CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
4753 int basereg
= (incoming
? GR_ARG_FIRST
: AR_ARG_FIRST
);
4754 int words
= ia64_function_arg_words (type
, mode
);
4755 int offset
= ia64_function_arg_offset (cum
, type
, words
);
4756 machine_mode hfa_mode
= VOIDmode
;
4758 /* For OPEN VMS, emit the instruction setting up the argument register here,
4759 when we know this will be together with the other arguments setup related
4760 insns. This is not the conceptually best place to do this, but this is
4761 the easiest as we have convenient access to cumulative args info. */
4763 if (TARGET_ABI_OPEN_VMS
&& mode
== VOIDmode
&& type
== void_type_node
4766 unsigned HOST_WIDE_INT regval
= cum
->words
;
4769 for (i
= 0; i
< 8; i
++)
4770 regval
|= ((int) cum
->atypes
[i
]) << (i
* 3 + 8);
4772 emit_move_insn (gen_rtx_REG (DImode
, GR_REG (25)),
4776 /* If all argument slots are used, then it must go on the stack. */
4777 if (cum
->words
+ offset
>= MAX_ARGUMENT_SLOTS
)
4780 /* On OpenVMS argument is either in Rn or Fn. */
4781 if (TARGET_ABI_OPEN_VMS
)
4783 if (FLOAT_MODE_P (mode
))
4784 return gen_rtx_REG (mode
, FR_ARG_FIRST
+ cum
->words
);
4786 return gen_rtx_REG (mode
, basereg
+ cum
->words
);
4789 /* Check for and handle homogeneous FP aggregates. */
4791 hfa_mode
= hfa_element_mode (type
, 0);
4793 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4794 and unprototyped hfas are passed specially. */
4795 if (hfa_mode
!= VOIDmode
&& (! cum
->prototype
|| named
))
4799 int fp_regs
= cum
->fp_regs
;
4800 int int_regs
= cum
->words
+ offset
;
4801 int hfa_size
= GET_MODE_SIZE (hfa_mode
);
4805 /* If prototyped, pass it in FR regs then GR regs.
4806 If not prototyped, pass it in both FR and GR regs.
4808 If this is an SFmode aggregate, then it is possible to run out of
4809 FR regs while GR regs are still left. In that case, we pass the
4810 remaining part in the GR regs. */
4812 /* Fill the FP regs. We do this always. We stop if we reach the end
4813 of the argument, the last FP register, or the last argument slot. */
4815 byte_size
= ((mode
== BLKmode
)
4816 ? int_size_in_bytes (type
) : GET_MODE_SIZE (mode
));
4817 args_byte_size
= int_regs
* UNITS_PER_WORD
;
4819 for (; (offset
< byte_size
&& fp_regs
< MAX_ARGUMENT_SLOTS
4820 && args_byte_size
< (MAX_ARGUMENT_SLOTS
* UNITS_PER_WORD
)); i
++)
4822 loc
[i
] = gen_rtx_EXPR_LIST (VOIDmode
,
4823 gen_rtx_REG (hfa_mode
, (FR_ARG_FIRST
4827 args_byte_size
+= hfa_size
;
4831 /* If no prototype, then the whole thing must go in GR regs. */
4832 if (! cum
->prototype
)
4834 /* If this is an SFmode aggregate, then we might have some left over
4835 that needs to go in GR regs. */
4836 else if (byte_size
!= offset
)
4837 int_regs
+= offset
/ UNITS_PER_WORD
;
4839 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
4841 for (; offset
< byte_size
&& int_regs
< MAX_ARGUMENT_SLOTS
; i
++)
4843 machine_mode gr_mode
= DImode
;
4844 unsigned int gr_size
;
4846 /* If we have an odd 4 byte hunk because we ran out of FR regs,
4847 then this goes in a GR reg left adjusted/little endian, right
4848 adjusted/big endian. */
4849 /* ??? Currently this is handled wrong, because 4-byte hunks are
4850 always right adjusted/little endian. */
4853 /* If we have an even 4 byte hunk because the aggregate is a
4854 multiple of 4 bytes in size, then this goes in a GR reg right
4855 adjusted/little endian. */
4856 else if (byte_size
- offset
== 4)
4859 loc
[i
] = gen_rtx_EXPR_LIST (VOIDmode
,
4860 gen_rtx_REG (gr_mode
, (basereg
4864 gr_size
= GET_MODE_SIZE (gr_mode
);
4866 if (gr_size
== UNITS_PER_WORD
4867 || (gr_size
< UNITS_PER_WORD
&& offset
% UNITS_PER_WORD
== 0))
4869 else if (gr_size
> UNITS_PER_WORD
)
4870 int_regs
+= gr_size
/ UNITS_PER_WORD
;
4872 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (i
, loc
));
4875 /* Integral and aggregates go in general registers. If we have run out of
4876 FR registers, then FP values must also go in general registers. This can
4877 happen when we have a SFmode HFA. */
4878 else if (mode
== TFmode
|| mode
== TCmode
4879 || (! FLOAT_MODE_P (mode
) || cum
->fp_regs
== MAX_ARGUMENT_SLOTS
))
4881 int byte_size
= ((mode
== BLKmode
)
4882 ? int_size_in_bytes (type
) : GET_MODE_SIZE (mode
));
4883 if (BYTES_BIG_ENDIAN
4884 && (mode
== BLKmode
|| (type
&& AGGREGATE_TYPE_P (type
)))
4885 && byte_size
< UNITS_PER_WORD
4888 rtx gr_reg
= gen_rtx_EXPR_LIST (VOIDmode
,
4889 gen_rtx_REG (DImode
,
4890 (basereg
+ cum
->words
4893 return gen_rtx_PARALLEL (mode
, gen_rtvec (1, gr_reg
));
4896 return gen_rtx_REG (mode
, basereg
+ cum
->words
+ offset
);
4900 /* If there is a prototype, then FP values go in a FR register when
4901 named, and in a GR register when unnamed. */
4902 else if (cum
->prototype
)
4905 return gen_rtx_REG (mode
, FR_ARG_FIRST
+ cum
->fp_regs
);
4906 /* In big-endian mode, an anonymous SFmode value must be represented
4907 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
4908 the value into the high half of the general register. */
4909 else if (BYTES_BIG_ENDIAN
&& mode
== SFmode
)
4910 return gen_rtx_PARALLEL (mode
,
4912 gen_rtx_EXPR_LIST (VOIDmode
,
4913 gen_rtx_REG (DImode
, basereg
+ cum
->words
+ offset
),
4916 return gen_rtx_REG (mode
, basereg
+ cum
->words
+ offset
);
4918 /* If there is no prototype, then FP values go in both FR and GR
4922 /* See comment above. */
4923 machine_mode inner_mode
=
4924 (BYTES_BIG_ENDIAN
&& mode
== SFmode
) ? DImode
: mode
;
4926 rtx fp_reg
= gen_rtx_EXPR_LIST (VOIDmode
,
4927 gen_rtx_REG (mode
, (FR_ARG_FIRST
4930 rtx gr_reg
= gen_rtx_EXPR_LIST (VOIDmode
,
4931 gen_rtx_REG (inner_mode
,
4932 (basereg
+ cum
->words
4936 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, fp_reg
, gr_reg
));
4940 /* Implement TARGET_FUNCION_ARG target hook. */
4943 ia64_function_arg (cumulative_args_t cum
, machine_mode mode
,
4944 const_tree type
, bool named
)
4946 return ia64_function_arg_1 (cum
, mode
, type
, named
, false);
4949 /* Implement TARGET_FUNCION_INCOMING_ARG target hook. */
4952 ia64_function_incoming_arg (cumulative_args_t cum
,
4954 const_tree type
, bool named
)
4956 return ia64_function_arg_1 (cum
, mode
, type
, named
, true);
4959 /* Return number of bytes, at the beginning of the argument, that must be
4960 put in registers. 0 is the argument is entirely in registers or entirely
4964 ia64_arg_partial_bytes (cumulative_args_t cum_v
, machine_mode mode
,
4965 tree type
, bool named ATTRIBUTE_UNUSED
)
4967 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
4969 int words
= ia64_function_arg_words (type
, mode
);
4970 int offset
= ia64_function_arg_offset (cum
, type
, words
);
4972 /* If all argument slots are used, then it must go on the stack. */
4973 if (cum
->words
+ offset
>= MAX_ARGUMENT_SLOTS
)
4976 /* It doesn't matter whether the argument goes in FR or GR regs. If
4977 it fits within the 8 argument slots, then it goes entirely in
4978 registers. If it extends past the last argument slot, then the rest
4979 goes on the stack. */
4981 if (words
+ cum
->words
+ offset
<= MAX_ARGUMENT_SLOTS
)
4984 return (MAX_ARGUMENT_SLOTS
- cum
->words
- offset
) * UNITS_PER_WORD
;
4987 /* Return ivms_arg_type based on machine_mode. */
4989 static enum ivms_arg_type
4990 ia64_arg_type (machine_mode mode
)
5003 /* Update CUM to point after this argument. This is patterned after
5004 ia64_function_arg. */
5007 ia64_function_arg_advance (cumulative_args_t cum_v
, machine_mode mode
,
5008 const_tree type
, bool named
)
5010 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
5011 int words
= ia64_function_arg_words (type
, mode
);
5012 int offset
= ia64_function_arg_offset (cum
, type
, words
);
5013 machine_mode hfa_mode
= VOIDmode
;
5015 /* If all arg slots are already full, then there is nothing to do. */
5016 if (cum
->words
>= MAX_ARGUMENT_SLOTS
)
5018 cum
->words
+= words
+ offset
;
5022 cum
->atypes
[cum
->words
] = ia64_arg_type (mode
);
5023 cum
->words
+= words
+ offset
;
5025 /* On OpenVMS argument is either in Rn or Fn. */
5026 if (TARGET_ABI_OPEN_VMS
)
5028 cum
->int_regs
= cum
->words
;
5029 cum
->fp_regs
= cum
->words
;
5033 /* Check for and handle homogeneous FP aggregates. */
5035 hfa_mode
= hfa_element_mode (type
, 0);
5037 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
5038 and unprototyped hfas are passed specially. */
5039 if (hfa_mode
!= VOIDmode
&& (! cum
->prototype
|| named
))
5041 int fp_regs
= cum
->fp_regs
;
5042 /* This is the original value of cum->words + offset. */
5043 int int_regs
= cum
->words
- words
;
5044 int hfa_size
= GET_MODE_SIZE (hfa_mode
);
5048 /* If prototyped, pass it in FR regs then GR regs.
5049 If not prototyped, pass it in both FR and GR regs.
5051 If this is an SFmode aggregate, then it is possible to run out of
5052 FR regs while GR regs are still left. In that case, we pass the
5053 remaining part in the GR regs. */
5055 /* Fill the FP regs. We do this always. We stop if we reach the end
5056 of the argument, the last FP register, or the last argument slot. */
5058 byte_size
= ((mode
== BLKmode
)
5059 ? int_size_in_bytes (type
) : GET_MODE_SIZE (mode
));
5060 args_byte_size
= int_regs
* UNITS_PER_WORD
;
5062 for (; (offset
< byte_size
&& fp_regs
< MAX_ARGUMENT_SLOTS
5063 && args_byte_size
< (MAX_ARGUMENT_SLOTS
* UNITS_PER_WORD
));)
5066 args_byte_size
+= hfa_size
;
5070 cum
->fp_regs
= fp_regs
;
5073 /* Integral and aggregates go in general registers. So do TFmode FP values.
5074 If we have run out of FR registers, then other FP values must also go in
5075 general registers. This can happen when we have a SFmode HFA. */
5076 else if (mode
== TFmode
|| mode
== TCmode
5077 || (! FLOAT_MODE_P (mode
) || cum
->fp_regs
== MAX_ARGUMENT_SLOTS
))
5078 cum
->int_regs
= cum
->words
;
5080 /* If there is a prototype, then FP values go in a FR register when
5081 named, and in a GR register when unnamed. */
5082 else if (cum
->prototype
)
5085 cum
->int_regs
= cum
->words
;
5087 /* ??? Complex types should not reach here. */
5088 cum
->fp_regs
+= (GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
? 2 : 1);
5090 /* If there is no prototype, then FP values go in both FR and GR
5094 /* ??? Complex types should not reach here. */
5095 cum
->fp_regs
+= (GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
? 2 : 1);
5096 cum
->int_regs
= cum
->words
;
5100 /* Arguments with alignment larger than 8 bytes start at the next even
5101 boundary. On ILP32 HPUX, TFmode arguments start on next even boundary
5102 even though their normal alignment is 8 bytes. See ia64_function_arg. */
5105 ia64_function_arg_boundary (machine_mode mode
, const_tree type
)
5107 if (mode
== TFmode
&& TARGET_HPUX
&& TARGET_ILP32
)
5108 return PARM_BOUNDARY
* 2;
5112 if (TYPE_ALIGN (type
) > PARM_BOUNDARY
)
5113 return PARM_BOUNDARY
* 2;
5115 return PARM_BOUNDARY
;
5118 if (GET_MODE_BITSIZE (mode
) > PARM_BOUNDARY
)
5119 return PARM_BOUNDARY
* 2;
5121 return PARM_BOUNDARY
;
5124 /* True if it is OK to do sibling call optimization for the specified
5125 call expression EXP. DECL will be the called function, or NULL if
5126 this is an indirect call. */
5128 ia64_function_ok_for_sibcall (tree decl
, tree exp ATTRIBUTE_UNUSED
)
5130 /* We can't perform a sibcall if the current function has the syscall_linkage
5132 if (lookup_attribute ("syscall_linkage",
5133 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl
))))
5136 /* We must always return with our current GP. This means we can
5137 only sibcall to functions defined in the current module unless
5138 TARGET_CONST_GP is set to true. */
5139 return (decl
&& (*targetm
.binds_local_p
) (decl
)) || TARGET_CONST_GP
;
5143 /* Implement va_arg. */
5146 ia64_gimplify_va_arg (tree valist
, tree type
, gimple_seq
*pre_p
,
5149 /* Variable sized types are passed by reference. */
5150 if (pass_by_reference (NULL
, TYPE_MODE (type
), type
, false))
5152 tree ptrtype
= build_pointer_type (type
);
5153 tree addr
= std_gimplify_va_arg_expr (valist
, ptrtype
, pre_p
, post_p
);
5154 return build_va_arg_indirect_ref (addr
);
5157 /* Aggregate arguments with alignment larger than 8 bytes start at
5158 the next even boundary. Integer and floating point arguments
5159 do so if they are larger than 8 bytes, whether or not they are
5160 also aligned larger than 8 bytes. */
5161 if ((TREE_CODE (type
) == REAL_TYPE
|| TREE_CODE (type
) == INTEGER_TYPE
)
5162 ? int_size_in_bytes (type
) > 8 : TYPE_ALIGN (type
) > 8 * BITS_PER_UNIT
)
5164 tree t
= fold_build_pointer_plus_hwi (valist
, 2 * UNITS_PER_WORD
- 1);
5165 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (t
), t
,
5166 build_int_cst (TREE_TYPE (t
), -2 * UNITS_PER_WORD
));
5167 gimplify_assign (unshare_expr (valist
), t
, pre_p
);
5170 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
5173 /* Return 1 if function return value returned in memory. Return 0 if it is
5177 ia64_return_in_memory (const_tree valtype
, const_tree fntype ATTRIBUTE_UNUSED
)
5180 machine_mode hfa_mode
;
5181 HOST_WIDE_INT byte_size
;
5183 mode
= TYPE_MODE (valtype
);
5184 byte_size
= GET_MODE_SIZE (mode
);
5185 if (mode
== BLKmode
)
5187 byte_size
= int_size_in_bytes (valtype
);
5192 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
5194 hfa_mode
= hfa_element_mode (valtype
, 0);
5195 if (hfa_mode
!= VOIDmode
)
5197 int hfa_size
= GET_MODE_SIZE (hfa_mode
);
5199 if (byte_size
/ hfa_size
> MAX_ARGUMENT_SLOTS
)
5204 else if (byte_size
> UNITS_PER_WORD
* MAX_INT_RETURN_SLOTS
)
5210 /* Return rtx for register that holds the function return value. */
5213 ia64_function_value (const_tree valtype
,
5214 const_tree fn_decl_or_type
,
5215 bool outgoing ATTRIBUTE_UNUSED
)
5218 machine_mode hfa_mode
;
5220 const_tree func
= fn_decl_or_type
;
5223 && !DECL_P (fn_decl_or_type
))
5226 mode
= TYPE_MODE (valtype
);
5227 hfa_mode
= hfa_element_mode (valtype
, 0);
5229 if (hfa_mode
!= VOIDmode
)
5237 hfa_size
= GET_MODE_SIZE (hfa_mode
);
5238 byte_size
= ((mode
== BLKmode
)
5239 ? int_size_in_bytes (valtype
) : GET_MODE_SIZE (mode
));
5241 for (i
= 0; offset
< byte_size
; i
++)
5243 loc
[i
] = gen_rtx_EXPR_LIST (VOIDmode
,
5244 gen_rtx_REG (hfa_mode
, FR_ARG_FIRST
+ i
),
5248 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (i
, loc
));
5250 else if (FLOAT_TYPE_P (valtype
) && mode
!= TFmode
&& mode
!= TCmode
)
5251 return gen_rtx_REG (mode
, FR_ARG_FIRST
);
5254 bool need_parallel
= false;
5256 /* In big-endian mode, we need to manage the layout of aggregates
5257 in the registers so that we get the bits properly aligned in
5258 the highpart of the registers. */
5259 if (BYTES_BIG_ENDIAN
5260 && (mode
== BLKmode
|| (valtype
&& AGGREGATE_TYPE_P (valtype
))))
5261 need_parallel
= true;
5263 /* Something like struct S { long double x; char a[0] } is not an
5264 HFA structure, and therefore doesn't go in fp registers. But
5265 the middle-end will give it XFmode anyway, and XFmode values
5266 don't normally fit in integer registers. So we need to smuggle
5267 the value inside a parallel. */
5268 else if (mode
== XFmode
|| mode
== XCmode
|| mode
== RFmode
)
5269 need_parallel
= true;
5279 bytesize
= int_size_in_bytes (valtype
);
5280 /* An empty PARALLEL is invalid here, but the return value
5281 doesn't matter for empty structs. */
5283 return gen_rtx_REG (mode
, GR_RET_FIRST
);
5284 for (i
= 0; offset
< bytesize
; i
++)
5286 loc
[i
] = gen_rtx_EXPR_LIST (VOIDmode
,
5287 gen_rtx_REG (DImode
,
5290 offset
+= UNITS_PER_WORD
;
5292 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (i
, loc
));
5295 mode
= promote_function_mode (valtype
, mode
, &unsignedp
,
5296 func
? TREE_TYPE (func
) : NULL_TREE
,
5299 return gen_rtx_REG (mode
, GR_RET_FIRST
);
5303 /* Worker function for TARGET_LIBCALL_VALUE. */
5306 ia64_libcall_value (machine_mode mode
,
5307 const_rtx fun ATTRIBUTE_UNUSED
)
5309 return gen_rtx_REG (mode
,
5310 (((GET_MODE_CLASS (mode
) == MODE_FLOAT
5311 || GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
)
5312 && (mode
) != TFmode
)
5313 ? FR_RET_FIRST
: GR_RET_FIRST
));
5316 /* Worker function for FUNCTION_VALUE_REGNO_P. */
5319 ia64_function_value_regno_p (const unsigned int regno
)
5321 return ((regno
>= GR_RET_FIRST
&& regno
<= GR_RET_LAST
)
5322 || (regno
>= FR_RET_FIRST
&& regno
<= FR_RET_LAST
));
5325 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
5326 We need to emit DTP-relative relocations. */
5329 ia64_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
5331 gcc_assert (size
== 4 || size
== 8);
5333 fputs ("\tdata4.ua\t@dtprel(", file
);
5335 fputs ("\tdata8.ua\t@dtprel(", file
);
5336 output_addr_const (file
, x
);
5340 /* Print a memory address as an operand to reference that memory location. */
5342 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
5343 also call this from ia64_print_operand for memory addresses. */
5346 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED
,
5347 machine_mode
/*mode*/,
5348 rtx address ATTRIBUTE_UNUSED
)
5352 /* Print an operand to an assembler instruction.
5353 C Swap and print a comparison operator.
5354 D Print an FP comparison operator.
5355 E Print 32 - constant, for SImode shifts as extract.
5356 e Print 64 - constant, for DImode rotates.
5357 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
5358 a floating point register emitted normally.
5359 G A floating point constant.
5360 I Invert a predicate register by adding 1.
5361 J Select the proper predicate register for a condition.
5362 j Select the inverse predicate register for a condition.
5363 O Append .acq for volatile load.
5364 P Postincrement of a MEM.
5365 Q Append .rel for volatile store.
5366 R Print .s .d or nothing for a single, double or no truncation.
5367 S Shift amount for shladd instruction.
5368 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
5369 for Intel assembler.
5370 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
5371 for Intel assembler.
5372 X A pair of floating point registers.
5373 r Print register name, or constant 0 as r0. HP compatibility for
5375 v Print vector constant value as an 8-byte integer value. */
5378 ia64_print_operand (FILE * file
, rtx x
, int code
)
5385 /* Handled below. */
5390 enum rtx_code c
= swap_condition (GET_CODE (x
));
5391 fputs (GET_RTX_NAME (c
), file
);
5396 switch (GET_CODE (x
))
5423 str
= GET_RTX_NAME (GET_CODE (x
));
5430 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 32 - INTVAL (x
));
5434 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 64 - INTVAL (x
));
5438 if (x
== CONST0_RTX (GET_MODE (x
)))
5439 str
= reg_names
[FR_REG (0)];
5440 else if (x
== CONST1_RTX (GET_MODE (x
)))
5441 str
= reg_names
[FR_REG (1)];
5444 gcc_assert (GET_CODE (x
) == REG
);
5445 str
= reg_names
[REGNO (x
)];
5453 real_to_target (val
, CONST_DOUBLE_REAL_VALUE (x
), GET_MODE (x
));
5454 if (GET_MODE (x
) == SFmode
)
5455 fprintf (file
, "0x%08lx", val
[0] & 0xffffffff);
5456 else if (GET_MODE (x
) == DFmode
)
5457 fprintf (file
, "0x%08lx%08lx", (WORDS_BIG_ENDIAN
? val
[0] : val
[1])
5459 (WORDS_BIG_ENDIAN
? val
[1] : val
[0])
5462 output_operand_lossage ("invalid %%G mode");
5467 fputs (reg_names
[REGNO (x
) + 1], file
);
5473 unsigned int regno
= REGNO (XEXP (x
, 0));
5474 if (GET_CODE (x
) == EQ
)
5478 fputs (reg_names
[regno
], file
);
5483 if (MEM_VOLATILE_P (x
))
5484 fputs(".acq", file
);
5489 HOST_WIDE_INT value
;
5491 switch (GET_CODE (XEXP (x
, 0)))
5497 x
= XEXP (XEXP (XEXP (x
, 0), 1), 1);
5498 if (GET_CODE (x
) == CONST_INT
)
5502 gcc_assert (GET_CODE (x
) == REG
);
5503 fprintf (file
, ", %s", reg_names
[REGNO (x
)]);
5509 value
= GET_MODE_SIZE (GET_MODE (x
));
5513 value
= - (HOST_WIDE_INT
) GET_MODE_SIZE (GET_MODE (x
));
5517 fprintf (file
, ", " HOST_WIDE_INT_PRINT_DEC
, value
);
5522 if (MEM_VOLATILE_P (x
))
5523 fputs(".rel", file
);
5527 if (x
== CONST0_RTX (GET_MODE (x
)))
5529 else if (x
== CONST1_RTX (GET_MODE (x
)))
5531 else if (x
== CONST2_RTX (GET_MODE (x
)))
5534 output_operand_lossage ("invalid %%R value");
5538 fprintf (file
, "%d", exact_log2 (INTVAL (x
)));
5542 if (! TARGET_GNU_AS
&& GET_CODE (x
) == CONST_INT
)
5544 fprintf (file
, "0x%x", (int) INTVAL (x
) & 0xffffffff);
5550 if (! TARGET_GNU_AS
&& GET_CODE (x
) == CONST_INT
)
5552 const char *prefix
= "0x";
5553 if (INTVAL (x
) & 0x80000000)
5555 fprintf (file
, "0xffffffff");
5558 fprintf (file
, "%s%x", prefix
, (int) INTVAL (x
) & 0xffffffff);
5565 unsigned int regno
= REGNO (x
);
5566 fprintf (file
, "%s, %s", reg_names
[regno
], reg_names
[regno
+ 1]);
5571 /* If this operand is the constant zero, write it as register zero.
5572 Any register, zero, or CONST_INT value is OK here. */
5573 if (GET_CODE (x
) == REG
)
5574 fputs (reg_names
[REGNO (x
)], file
);
5575 else if (x
== CONST0_RTX (GET_MODE (x
)))
5577 else if (GET_CODE (x
) == CONST_INT
)
5578 output_addr_const (file
, x
);
5580 output_operand_lossage ("invalid %%r value");
5584 gcc_assert (GET_CODE (x
) == CONST_VECTOR
);
5585 x
= simplify_subreg (DImode
, x
, GET_MODE (x
), 0);
5592 /* For conditional branches, returns or calls, substitute
5593 sptk, dptk, dpnt, or spnt for %s. */
5594 x
= find_reg_note (current_output_insn
, REG_BR_PROB
, 0);
5597 int pred_val
= profile_probability::from_reg_br_prob_note
5598 (XINT (x
, 0)).to_reg_br_prob_base ();
5600 /* Guess top and bottom 10% statically predicted. */
5601 if (pred_val
< REG_BR_PROB_BASE
/ 50
5602 && br_prob_note_reliable_p (x
))
5604 else if (pred_val
< REG_BR_PROB_BASE
/ 2)
5606 else if (pred_val
< REG_BR_PROB_BASE
/ 100 * 98
5607 || !br_prob_note_reliable_p (x
))
5612 else if (CALL_P (current_output_insn
))
5617 fputs (which
, file
);
5622 x
= current_insn_predicate
;
5625 unsigned int regno
= REGNO (XEXP (x
, 0));
5626 if (GET_CODE (x
) == EQ
)
5628 fprintf (file
, "(%s) ", reg_names
[regno
]);
5633 output_operand_lossage ("ia64_print_operand: unknown code");
5637 switch (GET_CODE (x
))
5639 /* This happens for the spill/restore instructions. */
5647 fputs (reg_names
[REGNO (x
)], file
);
5652 rtx addr
= XEXP (x
, 0);
5653 if (GET_RTX_CLASS (GET_CODE (addr
)) == RTX_AUTOINC
)
5654 addr
= XEXP (addr
, 0);
5655 fprintf (file
, "[%s]", reg_names
[REGNO (addr
)]);
5660 output_addr_const (file
, x
);
5667 /* Worker function for TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
5670 ia64_print_operand_punct_valid_p (unsigned char code
)
5672 return (code
== '+' || code
== ',');
5675 /* Compute a (partial) cost for rtx X. Return true if the complete
5676 cost has been computed, and false if subexpressions should be
5677 scanned. In either case, *TOTAL contains the cost result. */
5678 /* ??? This is incomplete. */
5681 ia64_rtx_costs (rtx x
, machine_mode mode
, int outer_code
,
5682 int opno ATTRIBUTE_UNUSED
,
5683 int *total
, bool speed ATTRIBUTE_UNUSED
)
5685 int code
= GET_CODE (x
);
5693 *total
= satisfies_constraint_J (x
) ? 0 : COSTS_N_INSNS (1);
5696 if (satisfies_constraint_I (x
))
5698 else if (satisfies_constraint_J (x
))
5701 *total
= COSTS_N_INSNS (1);
5704 if (satisfies_constraint_K (x
) || satisfies_constraint_L (x
))
5707 *total
= COSTS_N_INSNS (1);
5712 *total
= COSTS_N_INSNS (1);
5718 *total
= COSTS_N_INSNS (3);
5722 *total
= COSTS_N_INSNS (4);
5726 /* For multiplies wider than HImode, we have to go to the FPU,
5727 which normally involves copies. Plus there's the latency
5728 of the multiply itself, and the latency of the instructions to
5729 transfer integer regs to FP regs. */
5730 if (FLOAT_MODE_P (mode
))
5731 *total
= COSTS_N_INSNS (4);
5732 else if (GET_MODE_SIZE (mode
) > 2)
5733 *total
= COSTS_N_INSNS (10);
5735 *total
= COSTS_N_INSNS (2);
5740 if (FLOAT_MODE_P (mode
))
5742 *total
= COSTS_N_INSNS (4);
5750 *total
= COSTS_N_INSNS (1);
5757 /* We make divide expensive, so that divide-by-constant will be
5758 optimized to a multiply. */
5759 *total
= COSTS_N_INSNS (60);
5767 /* Calculate the cost of moving data from a register in class FROM to
5768 one in class TO, using MODE. */
5771 ia64_register_move_cost (machine_mode mode
, reg_class_t from
,
5774 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
5775 if (to
== ADDL_REGS
)
5777 if (from
== ADDL_REGS
)
5780 /* All costs are symmetric, so reduce cases by putting the
5781 lower number class as the destination. */
5784 reg_class_t tmp
= to
;
5785 to
= from
, from
= tmp
;
5788 /* Moving from FR<->GR in XFmode must be more expensive than 2,
5789 so that we get secondary memory reloads. Between FR_REGS,
5790 we have to make this at least as expensive as memory_move_cost
5791 to avoid spectacularly poor register class preferencing. */
5792 if (mode
== XFmode
|| mode
== RFmode
)
5794 if (to
!= GR_REGS
|| from
!= GR_REGS
)
5795 return memory_move_cost (mode
, to
, false);
5803 /* Moving between PR registers takes two insns. */
5804 if (from
== PR_REGS
)
5806 /* Moving between PR and anything but GR is impossible. */
5807 if (from
!= GR_REGS
)
5808 return memory_move_cost (mode
, to
, false);
5812 /* Moving between BR and anything but GR is impossible. */
5813 if (from
!= GR_REGS
&& from
!= GR_AND_BR_REGS
)
5814 return memory_move_cost (mode
, to
, false);
5819 /* Moving between AR and anything but GR is impossible. */
5820 if (from
!= GR_REGS
)
5821 return memory_move_cost (mode
, to
, false);
5827 case GR_AND_FR_REGS
:
5828 case GR_AND_BR_REGS
:
5839 /* Calculate the cost of moving data of MODE from a register to or from
5843 ia64_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED
,
5845 bool in ATTRIBUTE_UNUSED
)
5847 if (rclass
== GENERAL_REGS
5848 || rclass
== FR_REGS
5849 || rclass
== FP_REGS
5850 || rclass
== GR_AND_FR_REGS
)
5856 /* Implement TARGET_PREFERRED_RELOAD_CLASS. Place additional restrictions
5857 on RCLASS to use when copying X into that class. */
5860 ia64_preferred_reload_class (rtx x
, reg_class_t rclass
)
5866 /* Don't allow volatile mem reloads into floating point registers.
5867 This is defined to force reload to choose the r/m case instead
5868 of the f/f case when reloading (set (reg fX) (mem/v)). */
5869 if (MEM_P (x
) && MEM_VOLATILE_P (x
))
5872 /* Force all unrecognized constants into the constant pool. */
5890 /* This function returns the register class required for a secondary
5891 register when copying between one of the registers in RCLASS, and X,
5892 using MODE. A return value of NO_REGS means that no secondary register
5896 ia64_secondary_reload_class (enum reg_class rclass
,
5897 machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
5901 if (GET_CODE (x
) == REG
|| GET_CODE (x
) == SUBREG
)
5902 regno
= true_regnum (x
);
5909 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
5910 interaction. We end up with two pseudos with overlapping lifetimes
5911 both of which are equiv to the same constant, and both which need
5912 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
5913 changes depending on the path length, which means the qty_first_reg
5914 check in make_regs_eqv can give different answers at different times.
5915 At some point I'll probably need a reload_indi pattern to handle
5918 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
5919 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
5920 non-general registers for good measure. */
5921 if (regno
>= 0 && ! GENERAL_REGNO_P (regno
))
5924 /* This is needed if a pseudo used as a call_operand gets spilled to a
5926 if (GET_CODE (x
) == MEM
)
5932 /* Need to go through general registers to get to other class regs. */
5933 if (regno
>= 0 && ! (FR_REGNO_P (regno
) || GENERAL_REGNO_P (regno
)))
5936 /* This can happen when a paradoxical subreg is an operand to the
5938 /* ??? This shouldn't be necessary after instruction scheduling is
5939 enabled, because paradoxical subregs are not accepted by
5940 register_operand when INSN_SCHEDULING is defined. Or alternatively,
5941 stop the paradoxical subreg stupidity in the *_operand functions
5943 if (GET_CODE (x
) == MEM
5944 && (GET_MODE (x
) == SImode
|| GET_MODE (x
) == HImode
5945 || GET_MODE (x
) == QImode
))
5948 /* This can happen because of the ior/and/etc patterns that accept FP
5949 registers as operands. If the third operand is a constant, then it
5950 needs to be reloaded into a FP register. */
5951 if (GET_CODE (x
) == CONST_INT
)
5954 /* This can happen because of register elimination in a muldi3 insn.
5955 E.g. `26107 * (unsigned long)&u'. */
5956 if (GET_CODE (x
) == PLUS
)
5961 /* ??? This happens if we cse/gcse a BImode value across a call,
5962 and the function has a nonlocal goto. This is because global
5963 does not allocate call crossing pseudos to hard registers when
5964 crtl->has_nonlocal_goto is true. This is relatively
5965 common for C++ programs that use exceptions. To reproduce,
5966 return NO_REGS and compile libstdc++. */
5967 if (GET_CODE (x
) == MEM
)
5970 /* This can happen when we take a BImode subreg of a DImode value,
5971 and that DImode value winds up in some non-GR register. */
5972 if (regno
>= 0 && ! GENERAL_REGNO_P (regno
) && ! PR_REGNO_P (regno
))
5984 /* Implement targetm.unspec_may_trap_p hook. */
5986 ia64_unspec_may_trap_p (const_rtx x
, unsigned flags
)
5988 switch (XINT (x
, 1))
5994 case UNSPEC_CHKACLR
:
5996 /* These unspecs are just wrappers. */
5997 return may_trap_p_1 (XVECEXP (x
, 0, 0), flags
);
6000 return default_unspec_may_trap_p (x
, flags
);
6004 /* Parse the -mfixed-range= option string. */
6007 fix_range (const char *const_str
)
6010 char *str
, *dash
, *comma
;
6012 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
6013 REG2 are either register names or register numbers. The effect
6014 of this option is to mark the registers in the range from REG1 to
6015 REG2 as ``fixed'' so they won't be used by the compiler. This is
6016 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
6018 i
= strlen (const_str
);
6019 str
= (char *) alloca (i
+ 1);
6020 memcpy (str
, const_str
, i
+ 1);
6024 dash
= strchr (str
, '-');
6027 warning (0, "value of -mfixed-range must have form REG1-REG2");
6032 comma
= strchr (dash
+ 1, ',');
6036 first
= decode_reg_name (str
);
6039 warning (0, "unknown register name: %s", str
);
6043 last
= decode_reg_name (dash
+ 1);
6046 warning (0, "unknown register name: %s", dash
+ 1);
6054 warning (0, "%s-%s is an empty range", str
, dash
+ 1);
6058 for (i
= first
; i
<= last
; ++i
)
6059 fixed_regs
[i
] = call_used_regs
[i
] = 1;
6069 /* Implement TARGET_OPTION_OVERRIDE. */
6072 ia64_option_override (void)
6075 cl_deferred_option
*opt
;
6076 vec
<cl_deferred_option
> *v
6077 = (vec
<cl_deferred_option
> *) ia64_deferred_options
;
6080 FOR_EACH_VEC_ELT (*v
, i
, opt
)
6082 switch (opt
->opt_index
)
6084 case OPT_mfixed_range_
:
6085 fix_range (opt
->arg
);
6093 if (TARGET_AUTO_PIC
)
6094 target_flags
|= MASK_CONST_GP
;
6096 /* Numerous experiment shows that IRA based loop pressure
6097 calculation works better for RTL loop invariant motion on targets
6098 with enough (>= 32) registers. It is an expensive optimization.
6099 So it is on only for peak performance. */
6101 flag_ira_loop_pressure
= 1;
6104 ia64_section_threshold
= (global_options_set
.x_g_switch_value
6106 : IA64_DEFAULT_GVALUE
);
6108 init_machine_status
= ia64_init_machine_status
;
6110 if (flag_align_functions
&& !str_align_functions
)
6111 str_align_functions
= "64";
6112 if (flag_align_loops
&& !str_align_loops
)
6113 str_align_loops
= "32";
6114 if (TARGET_ABI_OPEN_VMS
)
6117 ia64_override_options_after_change();
6120 /* Implement targetm.override_options_after_change. */
6123 ia64_override_options_after_change (void)
6126 && !global_options_set
.x_flag_selective_scheduling
6127 && !global_options_set
.x_flag_selective_scheduling2
)
6129 flag_selective_scheduling2
= 1;
6130 flag_sel_sched_pipelining
= 1;
6132 if (mflag_sched_control_spec
== 2)
6134 /* Control speculation is on by default for the selective scheduler,
6135 but not for the Haifa scheduler. */
6136 mflag_sched_control_spec
= flag_selective_scheduling2
? 1 : 0;
6138 if (flag_sel_sched_pipelining
&& flag_auto_inc_dec
)
6140 /* FIXME: remove this when we'd implement breaking autoinsns as
6141 a transformation. */
6142 flag_auto_inc_dec
= 0;
6146 /* Initialize the record of emitted frame related registers. */
6148 void ia64_init_expanders (void)
6150 memset (&emitted_frame_related_regs
, 0, sizeof (emitted_frame_related_regs
));
6153 static struct machine_function
*
6154 ia64_init_machine_status (void)
6156 return ggc_cleared_alloc
<machine_function
> ();
6159 static enum attr_itanium_class
ia64_safe_itanium_class (rtx_insn
*);
6160 static enum attr_type
ia64_safe_type (rtx_insn
*);
6162 static enum attr_itanium_class
6163 ia64_safe_itanium_class (rtx_insn
*insn
)
6165 if (recog_memoized (insn
) >= 0)
6166 return get_attr_itanium_class (insn
);
6167 else if (DEBUG_INSN_P (insn
))
6168 return ITANIUM_CLASS_IGNORE
;
6170 return ITANIUM_CLASS_UNKNOWN
;
6173 static enum attr_type
6174 ia64_safe_type (rtx_insn
*insn
)
6176 if (recog_memoized (insn
) >= 0)
6177 return get_attr_type (insn
);
6179 return TYPE_UNKNOWN
;
6182 /* The following collection of routines emit instruction group stop bits as
6183 necessary to avoid dependencies. */
6185 /* Need to track some additional registers as far as serialization is
6186 concerned so we can properly handle br.call and br.ret. We could
6187 make these registers visible to gcc, but since these registers are
6188 never explicitly used in gcc generated code, it seems wasteful to
6189 do so (plus it would make the call and return patterns needlessly
6191 #define REG_RP (BR_REG (0))
6192 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
6193 /* This is used for volatile asms which may require a stop bit immediately
6194 before and after them. */
6195 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
6196 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
6197 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
6199 /* For each register, we keep track of how it has been written in the
6200 current instruction group.
6202 If a register is written unconditionally (no qualifying predicate),
6203 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
6205 If a register is written if its qualifying predicate P is true, we
6206 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
6207 may be written again by the complement of P (P^1) and when this happens,
6208 WRITE_COUNT gets set to 2.
6210 The result of this is that whenever an insn attempts to write a register
6211 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
6213 If a predicate register is written by a floating-point insn, we set
6214 WRITTEN_BY_FP to true.
6216 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
6217 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
6219 #if GCC_VERSION >= 4000
6220 #define RWS_FIELD_TYPE __extension__ unsigned short
6222 #define RWS_FIELD_TYPE unsigned int
6224 struct reg_write_state
6226 RWS_FIELD_TYPE write_count
: 2;
6227 RWS_FIELD_TYPE first_pred
: 10;
6228 RWS_FIELD_TYPE written_by_fp
: 1;
6229 RWS_FIELD_TYPE written_by_and
: 1;
6230 RWS_FIELD_TYPE written_by_or
: 1;
6233 /* Cumulative info for the current instruction group. */
6234 struct reg_write_state rws_sum
[NUM_REGS
];
6236 /* Bitmap whether a register has been written in the current insn. */
6237 HARD_REG_ELT_TYPE rws_insn
[(NUM_REGS
+ HOST_BITS_PER_WIDEST_FAST_INT
- 1)
6238 / HOST_BITS_PER_WIDEST_FAST_INT
];
6241 rws_insn_set (int regno
)
6243 gcc_assert (!TEST_HARD_REG_BIT (rws_insn
, regno
));
6244 SET_HARD_REG_BIT (rws_insn
, regno
);
6248 rws_insn_test (int regno
)
6250 return TEST_HARD_REG_BIT (rws_insn
, regno
);
6253 /* When not checking, track just REG_AR_CFM and REG_VOLATILE. */
6254 unsigned char rws_insn
[2];
6257 rws_insn_set (int regno
)
6259 if (regno
== REG_AR_CFM
)
6261 else if (regno
== REG_VOLATILE
)
6266 rws_insn_test (int regno
)
6268 if (regno
== REG_AR_CFM
)
6270 if (regno
== REG_VOLATILE
)
6276 /* Indicates whether this is the first instruction after a stop bit,
6277 in which case we don't need another stop bit. Without this,
6278 ia64_variable_issue will die when scheduling an alloc. */
6279 static int first_instruction
;
6281 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
6282 RTL for one instruction. */
6285 unsigned int is_write
: 1; /* Is register being written? */
6286 unsigned int is_fp
: 1; /* Is register used as part of an fp op? */
6287 unsigned int is_branch
: 1; /* Is register used as part of a branch? */
6288 unsigned int is_and
: 1; /* Is register used as part of and.orcm? */
6289 unsigned int is_or
: 1; /* Is register used as part of or.andcm? */
6290 unsigned int is_sibcall
: 1; /* Is this a sibling or normal call? */
6293 static void rws_update (int, struct reg_flags
, int);
6294 static int rws_access_regno (int, struct reg_flags
, int);
6295 static int rws_access_reg (rtx
, struct reg_flags
, int);
6296 static void update_set_flags (rtx
, struct reg_flags
*);
6297 static int set_src_needs_barrier (rtx
, struct reg_flags
, int);
6298 static int rtx_needs_barrier (rtx
, struct reg_flags
, int);
6299 static void init_insn_group_barriers (void);
6300 static int group_barrier_needed (rtx_insn
*);
6301 static int safe_group_barrier_needed (rtx_insn
*);
6302 static int in_safe_group_barrier
;
6304 /* Update *RWS for REGNO, which is being written by the current instruction,
6305 with predicate PRED, and associated register flags in FLAGS. */
6308 rws_update (int regno
, struct reg_flags flags
, int pred
)
6311 rws_sum
[regno
].write_count
++;
6313 rws_sum
[regno
].write_count
= 2;
6314 rws_sum
[regno
].written_by_fp
|= flags
.is_fp
;
6315 /* ??? Not tracking and/or across differing predicates. */
6316 rws_sum
[regno
].written_by_and
= flags
.is_and
;
6317 rws_sum
[regno
].written_by_or
= flags
.is_or
;
6318 rws_sum
[regno
].first_pred
= pred
;
6321 /* Handle an access to register REGNO of type FLAGS using predicate register
6322 PRED. Update rws_sum array. Return 1 if this access creates
6323 a dependency with an earlier instruction in the same group. */
6326 rws_access_regno (int regno
, struct reg_flags flags
, int pred
)
6328 int need_barrier
= 0;
6330 gcc_assert (regno
< NUM_REGS
);
6332 if (! PR_REGNO_P (regno
))
6333 flags
.is_and
= flags
.is_or
= 0;
6339 rws_insn_set (regno
);
6340 write_count
= rws_sum
[regno
].write_count
;
6342 switch (write_count
)
6345 /* The register has not been written yet. */
6346 if (!in_safe_group_barrier
)
6347 rws_update (regno
, flags
, pred
);
6351 /* The register has been written via a predicate. Treat
6352 it like a unconditional write and do not try to check
6353 for complementary pred reg in earlier write. */
6354 if (flags
.is_and
&& rws_sum
[regno
].written_by_and
)
6356 else if (flags
.is_or
&& rws_sum
[regno
].written_by_or
)
6360 if (!in_safe_group_barrier
)
6361 rws_update (regno
, flags
, pred
);
6365 /* The register has been unconditionally written already. We
6367 if (flags
.is_and
&& rws_sum
[regno
].written_by_and
)
6369 else if (flags
.is_or
&& rws_sum
[regno
].written_by_or
)
6373 if (!in_safe_group_barrier
)
6375 rws_sum
[regno
].written_by_and
= flags
.is_and
;
6376 rws_sum
[regno
].written_by_or
= flags
.is_or
;
6386 if (flags
.is_branch
)
6388 /* Branches have several RAW exceptions that allow to avoid
6391 if (REGNO_REG_CLASS (regno
) == BR_REGS
|| regno
== AR_PFS_REGNUM
)
6392 /* RAW dependencies on branch regs are permissible as long
6393 as the writer is a non-branch instruction. Since we
6394 never generate code that uses a branch register written
6395 by a branch instruction, handling this case is
6399 if (REGNO_REG_CLASS (regno
) == PR_REGS
6400 && ! rws_sum
[regno
].written_by_fp
)
6401 /* The predicates of a branch are available within the
6402 same insn group as long as the predicate was written by
6403 something other than a floating-point instruction. */
6407 if (flags
.is_and
&& rws_sum
[regno
].written_by_and
)
6409 if (flags
.is_or
&& rws_sum
[regno
].written_by_or
)
6412 switch (rws_sum
[regno
].write_count
)
6415 /* The register has not been written yet. */
6419 /* The register has been written via a predicate, assume we
6420 need a barrier (don't check for complementary regs). */
6425 /* The register has been unconditionally written already. We
6435 return need_barrier
;
6439 rws_access_reg (rtx reg
, struct reg_flags flags
, int pred
)
6441 int regno
= REGNO (reg
);
6442 int n
= REG_NREGS (reg
);
6445 return rws_access_regno (regno
, flags
, pred
);
6448 int need_barrier
= 0;
6450 need_barrier
|= rws_access_regno (regno
+ n
, flags
, pred
);
6451 return need_barrier
;
6455 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
6456 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
6459 update_set_flags (rtx x
, struct reg_flags
*pflags
)
6461 rtx src
= SET_SRC (x
);
6463 switch (GET_CODE (src
))
6469 /* There are four cases here:
6470 (1) The destination is (pc), in which case this is a branch,
6471 nothing here applies.
6472 (2) The destination is ar.lc, in which case this is a
6473 doloop_end_internal,
6474 (3) The destination is an fp register, in which case this is
6475 an fselect instruction.
6476 (4) The condition has (unspec [(reg)] UNSPEC_LDC), in which case
6477 this is a check load.
6478 In all cases, nothing we do in this function applies. */
6482 if (COMPARISON_P (src
)
6483 && SCALAR_FLOAT_MODE_P (GET_MODE (XEXP (src
, 0))))
6484 /* Set pflags->is_fp to 1 so that we know we're dealing
6485 with a floating point comparison when processing the
6486 destination of the SET. */
6489 /* Discover if this is a parallel comparison. We only handle
6490 and.orcm and or.andcm at present, since we must retain a
6491 strict inverse on the predicate pair. */
6492 else if (GET_CODE (src
) == AND
)
6494 else if (GET_CODE (src
) == IOR
)
6501 /* Subroutine of rtx_needs_barrier; this function determines whether the
6502 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
6503 are as in rtx_needs_barrier. COND is an rtx that holds the condition
6507 set_src_needs_barrier (rtx x
, struct reg_flags flags
, int pred
)
6509 int need_barrier
= 0;
6511 rtx src
= SET_SRC (x
);
6513 if (GET_CODE (src
) == CALL
)
6514 /* We don't need to worry about the result registers that
6515 get written by subroutine call. */
6516 return rtx_needs_barrier (src
, flags
, pred
);
6517 else if (SET_DEST (x
) == pc_rtx
)
6519 /* X is a conditional branch. */
6520 /* ??? This seems redundant, as the caller sets this bit for
6522 if (!ia64_spec_check_src_p (src
))
6523 flags
.is_branch
= 1;
6524 return rtx_needs_barrier (src
, flags
, pred
);
6527 if (ia64_spec_check_src_p (src
))
6528 /* Avoid checking one register twice (in condition
6529 and in 'then' section) for ldc pattern. */
6531 gcc_assert (REG_P (XEXP (src
, 2)));
6532 need_barrier
= rtx_needs_barrier (XEXP (src
, 2), flags
, pred
);
6534 /* We process MEM below. */
6535 src
= XEXP (src
, 1);
6538 need_barrier
|= rtx_needs_barrier (src
, flags
, pred
);
6541 if (GET_CODE (dst
) == ZERO_EXTRACT
)
6543 need_barrier
|= rtx_needs_barrier (XEXP (dst
, 1), flags
, pred
);
6544 need_barrier
|= rtx_needs_barrier (XEXP (dst
, 2), flags
, pred
);
6546 return need_barrier
;
6549 /* Handle an access to rtx X of type FLAGS using predicate register
6550 PRED. Return 1 if this access creates a dependency with an earlier
6551 instruction in the same group. */
6554 rtx_needs_barrier (rtx x
, struct reg_flags flags
, int pred
)
6557 int is_complemented
= 0;
6558 int need_barrier
= 0;
6559 const char *format_ptr
;
6560 struct reg_flags new_flags
;
6568 switch (GET_CODE (x
))
6571 update_set_flags (x
, &new_flags
);
6572 need_barrier
= set_src_needs_barrier (x
, new_flags
, pred
);
6573 if (GET_CODE (SET_SRC (x
)) != CALL
)
6575 new_flags
.is_write
= 1;
6576 need_barrier
|= rtx_needs_barrier (SET_DEST (x
), new_flags
, pred
);
6581 new_flags
.is_write
= 0;
6582 need_barrier
|= rws_access_regno (AR_EC_REGNUM
, new_flags
, pred
);
6584 /* Avoid multiple register writes, in case this is a pattern with
6585 multiple CALL rtx. This avoids a failure in rws_access_reg. */
6586 if (! flags
.is_sibcall
&& ! rws_insn_test (REG_AR_CFM
))
6588 new_flags
.is_write
= 1;
6589 need_barrier
|= rws_access_regno (REG_RP
, new_flags
, pred
);
6590 need_barrier
|= rws_access_regno (AR_PFS_REGNUM
, new_flags
, pred
);
6591 need_barrier
|= rws_access_regno (REG_AR_CFM
, new_flags
, pred
);
6596 /* X is a predicated instruction. */
6598 cond
= COND_EXEC_TEST (x
);
6600 need_barrier
= rtx_needs_barrier (cond
, flags
, 0);
6602 if (GET_CODE (cond
) == EQ
)
6603 is_complemented
= 1;
6604 cond
= XEXP (cond
, 0);
6605 gcc_assert (GET_CODE (cond
) == REG
6606 && REGNO_REG_CLASS (REGNO (cond
)) == PR_REGS
);
6607 pred
= REGNO (cond
);
6608 if (is_complemented
)
6611 need_barrier
|= rtx_needs_barrier (COND_EXEC_CODE (x
), flags
, pred
);
6612 return need_barrier
;
6616 /* Clobber & use are for earlier compiler-phases only. */
6621 /* We always emit stop bits for traditional asms. We emit stop bits
6622 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
6623 if (GET_CODE (x
) != ASM_OPERANDS
6624 || (MEM_VOLATILE_P (x
) && TARGET_VOL_ASM_STOP
))
6626 /* Avoid writing the register multiple times if we have multiple
6627 asm outputs. This avoids a failure in rws_access_reg. */
6628 if (! rws_insn_test (REG_VOLATILE
))
6630 new_flags
.is_write
= 1;
6631 rws_access_regno (REG_VOLATILE
, new_flags
, pred
);
6636 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
6637 We cannot just fall through here since then we would be confused
6638 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
6639 traditional asms unlike their normal usage. */
6641 for (i
= ASM_OPERANDS_INPUT_LENGTH (x
) - 1; i
>= 0; --i
)
6642 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x
, i
), flags
, pred
))
6647 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; --i
)
6649 rtx pat
= XVECEXP (x
, 0, i
);
6650 switch (GET_CODE (pat
))
6653 update_set_flags (pat
, &new_flags
);
6654 need_barrier
|= set_src_needs_barrier (pat
, new_flags
, pred
);
6661 need_barrier
|= rtx_needs_barrier (pat
, flags
, pred
);
6665 if (REG_P (XEXP (pat
, 0))
6666 && extract_asm_operands (x
) != NULL_RTX
6667 && REGNO (XEXP (pat
, 0)) != AR_UNAT_REGNUM
)
6669 new_flags
.is_write
= 1;
6670 need_barrier
|= rtx_needs_barrier (XEXP (pat
, 0),
6683 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; --i
)
6685 rtx pat
= XVECEXP (x
, 0, i
);
6686 if (GET_CODE (pat
) == SET
)
6688 if (GET_CODE (SET_SRC (pat
)) != CALL
)
6690 new_flags
.is_write
= 1;
6691 need_barrier
|= rtx_needs_barrier (SET_DEST (pat
), new_flags
,
6695 else if (GET_CODE (pat
) == CLOBBER
|| GET_CODE (pat
) == RETURN
)
6696 need_barrier
|= rtx_needs_barrier (pat
, flags
, pred
);
6701 need_barrier
|= rtx_needs_barrier (SUBREG_REG (x
), flags
, pred
);
6704 if (REGNO (x
) == AR_UNAT_REGNUM
)
6706 for (i
= 0; i
< 64; ++i
)
6707 need_barrier
|= rws_access_regno (AR_UNAT_BIT_0
+ i
, flags
, pred
);
6710 need_barrier
= rws_access_reg (x
, flags
, pred
);
6714 /* Find the regs used in memory address computation. */
6715 new_flags
.is_write
= 0;
6716 need_barrier
= rtx_needs_barrier (XEXP (x
, 0), new_flags
, pred
);
6719 case CONST_INT
: case CONST_DOUBLE
: case CONST_VECTOR
:
6720 case SYMBOL_REF
: case LABEL_REF
: case CONST
:
6723 /* Operators with side-effects. */
6724 case POST_INC
: case POST_DEC
:
6725 gcc_assert (GET_CODE (XEXP (x
, 0)) == REG
);
6727 new_flags
.is_write
= 0;
6728 need_barrier
= rws_access_reg (XEXP (x
, 0), new_flags
, pred
);
6729 new_flags
.is_write
= 1;
6730 need_barrier
|= rws_access_reg (XEXP (x
, 0), new_flags
, pred
);
6734 gcc_assert (GET_CODE (XEXP (x
, 0)) == REG
);
6736 new_flags
.is_write
= 0;
6737 need_barrier
= rws_access_reg (XEXP (x
, 0), new_flags
, pred
);
6738 need_barrier
|= rtx_needs_barrier (XEXP (x
, 1), new_flags
, pred
);
6739 new_flags
.is_write
= 1;
6740 need_barrier
|= rws_access_reg (XEXP (x
, 0), new_flags
, pred
);
6743 /* Handle common unary and binary ops for efficiency. */
6744 case COMPARE
: case PLUS
: case MINUS
: case MULT
: case DIV
:
6745 case MOD
: case UDIV
: case UMOD
: case AND
: case IOR
:
6746 case XOR
: case ASHIFT
: case ROTATE
: case ASHIFTRT
: case LSHIFTRT
:
6747 case ROTATERT
: case SMIN
: case SMAX
: case UMIN
: case UMAX
:
6748 case NE
: case EQ
: case GE
: case GT
: case LE
:
6749 case LT
: case GEU
: case GTU
: case LEU
: case LTU
:
6750 need_barrier
= rtx_needs_barrier (XEXP (x
, 0), new_flags
, pred
);
6751 need_barrier
|= rtx_needs_barrier (XEXP (x
, 1), new_flags
, pred
);
6754 case NEG
: case NOT
: case SIGN_EXTEND
: case ZERO_EXTEND
:
6755 case TRUNCATE
: case FLOAT_EXTEND
: case FLOAT_TRUNCATE
: case FLOAT
:
6756 case FIX
: case UNSIGNED_FLOAT
: case UNSIGNED_FIX
: case ABS
:
6757 case SQRT
: case FFS
: case POPCOUNT
:
6758 need_barrier
= rtx_needs_barrier (XEXP (x
, 0), flags
, pred
);
6762 /* VEC_SELECT's second argument is a PARALLEL with integers that
6763 describe the elements selected. On ia64, those integers are
6764 always constants. Avoid walking the PARALLEL so that we don't
6765 get confused with "normal" parallels and then die. */
6766 need_barrier
= rtx_needs_barrier (XEXP (x
, 0), flags
, pred
);
6770 switch (XINT (x
, 1))
6772 case UNSPEC_LTOFF_DTPMOD
:
6773 case UNSPEC_LTOFF_DTPREL
:
6775 case UNSPEC_LTOFF_TPREL
:
6777 case UNSPEC_PRED_REL_MUTEX
:
6778 case UNSPEC_PIC_CALL
:
6780 case UNSPEC_FETCHADD_ACQ
:
6781 case UNSPEC_FETCHADD_REL
:
6782 case UNSPEC_BSP_VALUE
:
6783 case UNSPEC_FLUSHRS
:
6784 case UNSPEC_BUNDLE_SELECTOR
:
6787 case UNSPEC_GR_SPILL
:
6788 case UNSPEC_GR_RESTORE
:
6790 HOST_WIDE_INT offset
= INTVAL (XVECEXP (x
, 0, 1));
6791 HOST_WIDE_INT bit
= (offset
>> 3) & 63;
6793 need_barrier
= rtx_needs_barrier (XVECEXP (x
, 0, 0), flags
, pred
);
6794 new_flags
.is_write
= (XINT (x
, 1) == UNSPEC_GR_SPILL
);
6795 need_barrier
|= rws_access_regno (AR_UNAT_BIT_0
+ bit
,
6800 case UNSPEC_FR_SPILL
:
6801 case UNSPEC_FR_RESTORE
:
6802 case UNSPEC_GETF_EXP
:
6803 case UNSPEC_SETF_EXP
:
6805 case UNSPEC_FR_SQRT_RECIP_APPROX
:
6806 case UNSPEC_FR_SQRT_RECIP_APPROX_RES
:
6811 case UNSPEC_CHKACLR
:
6813 need_barrier
= rtx_needs_barrier (XVECEXP (x
, 0, 0), flags
, pred
);
6816 case UNSPEC_FR_RECIP_APPROX
:
6818 case UNSPEC_COPYSIGN
:
6819 case UNSPEC_FR_RECIP_APPROX_RES
:
6820 need_barrier
= rtx_needs_barrier (XVECEXP (x
, 0, 0), flags
, pred
);
6821 need_barrier
|= rtx_needs_barrier (XVECEXP (x
, 0, 1), flags
, pred
);
6824 case UNSPEC_CMPXCHG_ACQ
:
6825 case UNSPEC_CMPXCHG_REL
:
6826 need_barrier
= rtx_needs_barrier (XVECEXP (x
, 0, 1), flags
, pred
);
6827 need_barrier
|= rtx_needs_barrier (XVECEXP (x
, 0, 2), flags
, pred
);
6835 case UNSPEC_VOLATILE
:
6836 switch (XINT (x
, 1))
6839 /* Alloc must always be the first instruction of a group.
6840 We force this by always returning true. */
6841 /* ??? We might get better scheduling if we explicitly check for
6842 input/local/output register dependencies, and modify the
6843 scheduler so that alloc is always reordered to the start of
6844 the current group. We could then eliminate all of the
6845 first_instruction code. */
6846 rws_access_regno (AR_PFS_REGNUM
, flags
, pred
);
6848 new_flags
.is_write
= 1;
6849 rws_access_regno (REG_AR_CFM
, new_flags
, pred
);
6852 case UNSPECV_SET_BSP
:
6853 case UNSPECV_PROBE_STACK_RANGE
:
6857 case UNSPECV_BLOCKAGE
:
6858 case UNSPECV_INSN_GROUP_BARRIER
:
6860 case UNSPECV_PSAC_ALL
:
6861 case UNSPECV_PSAC_NORMAL
:
6864 case UNSPECV_PROBE_STACK_ADDRESS
:
6865 need_barrier
= rtx_needs_barrier (XVECEXP (x
, 0, 0), flags
, pred
);
6874 new_flags
.is_write
= 0;
6875 need_barrier
= rws_access_regno (REG_RP
, flags
, pred
);
6876 need_barrier
|= rws_access_regno (AR_PFS_REGNUM
, flags
, pred
);
6878 new_flags
.is_write
= 1;
6879 need_barrier
|= rws_access_regno (AR_EC_REGNUM
, new_flags
, pred
);
6880 need_barrier
|= rws_access_regno (REG_AR_CFM
, new_flags
, pred
);
6884 format_ptr
= GET_RTX_FORMAT (GET_CODE (x
));
6885 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
6886 switch (format_ptr
[i
])
6888 case '0': /* unused field */
6889 case 'i': /* integer */
6890 case 'n': /* note */
6891 case 'w': /* wide integer */
6892 case 's': /* pointer to string */
6893 case 'S': /* optional pointer to string */
6897 if (rtx_needs_barrier (XEXP (x
, i
), flags
, pred
))
6902 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; --j
)
6903 if (rtx_needs_barrier (XVECEXP (x
, i
, j
), flags
, pred
))
6912 return need_barrier
;
6915 /* Clear out the state for group_barrier_needed at the start of a
6916 sequence of insns. */
6919 init_insn_group_barriers (void)
6921 memset (rws_sum
, 0, sizeof (rws_sum
));
6922 first_instruction
= 1;
6925 /* Given the current state, determine whether a group barrier (a stop bit) is
6926 necessary before INSN. Return nonzero if so. This modifies the state to
6927 include the effects of INSN as a side-effect. */
6930 group_barrier_needed (rtx_insn
*insn
)
6933 int need_barrier
= 0;
6934 struct reg_flags flags
;
6936 memset (&flags
, 0, sizeof (flags
));
6937 switch (GET_CODE (insn
))
6944 /* A barrier doesn't imply an instruction group boundary. */
6948 memset (rws_insn
, 0, sizeof (rws_insn
));
6952 flags
.is_branch
= 1;
6953 flags
.is_sibcall
= SIBLING_CALL_P (insn
);
6954 memset (rws_insn
, 0, sizeof (rws_insn
));
6956 /* Don't bundle a call following another call. */
6957 if ((pat
= prev_active_insn (insn
)) && CALL_P (pat
))
6963 need_barrier
= rtx_needs_barrier (PATTERN (insn
), flags
, 0);
6967 if (!ia64_spec_check_p (insn
))
6968 flags
.is_branch
= 1;
6970 /* Don't bundle a jump following a call. */
6971 if ((pat
= prev_active_insn (insn
)) && CALL_P (pat
))
6979 if (GET_CODE (PATTERN (insn
)) == USE
6980 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
6981 /* Don't care about USE and CLOBBER "insns"---those are used to
6982 indicate to the optimizer that it shouldn't get rid of
6983 certain operations. */
6986 pat
= PATTERN (insn
);
6988 /* Ug. Hack hacks hacked elsewhere. */
6989 switch (recog_memoized (insn
))
6991 /* We play dependency tricks with the epilogue in order
6992 to get proper schedules. Undo this for dv analysis. */
6993 case CODE_FOR_epilogue_deallocate_stack
:
6994 case CODE_FOR_prologue_allocate_stack
:
6995 pat
= XVECEXP (pat
, 0, 0);
6998 /* The pattern we use for br.cloop confuses the code above.
6999 The second element of the vector is representative. */
7000 case CODE_FOR_doloop_end_internal
:
7001 pat
= XVECEXP (pat
, 0, 1);
7004 /* Doesn't generate code. */
7005 case CODE_FOR_pred_rel_mutex
:
7006 case CODE_FOR_prologue_use
:
7013 memset (rws_insn
, 0, sizeof (rws_insn
));
7014 need_barrier
= rtx_needs_barrier (pat
, flags
, 0);
7016 /* Check to see if the previous instruction was a volatile
7019 need_barrier
= rws_access_regno (REG_VOLATILE
, flags
, 0);
7027 if (first_instruction
&& important_for_bundling_p (insn
))
7030 first_instruction
= 0;
7033 return need_barrier
;
7036 /* Like group_barrier_needed, but do not clobber the current state. */
7039 safe_group_barrier_needed (rtx_insn
*insn
)
7041 int saved_first_instruction
;
7044 saved_first_instruction
= first_instruction
;
7045 in_safe_group_barrier
= 1;
7047 t
= group_barrier_needed (insn
);
7049 first_instruction
= saved_first_instruction
;
7050 in_safe_group_barrier
= 0;
7055 /* Scan the current function and insert stop bits as necessary to
7056 eliminate dependencies. This function assumes that a final
7057 instruction scheduling pass has been run which has already
7058 inserted most of the necessary stop bits. This function only
7059 inserts new ones at basic block boundaries, since these are
7060 invisible to the scheduler. */
7063 emit_insn_group_barriers (FILE *dump
)
7066 rtx_insn
*last_label
= 0;
7067 int insns_since_last_label
= 0;
7069 init_insn_group_barriers ();
7071 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
7075 if (insns_since_last_label
)
7077 insns_since_last_label
= 0;
7079 else if (NOTE_P (insn
)
7080 && NOTE_KIND (insn
) == NOTE_INSN_BASIC_BLOCK
)
7082 if (insns_since_last_label
)
7084 insns_since_last_label
= 0;
7086 else if (NONJUMP_INSN_P (insn
)
7087 && GET_CODE (PATTERN (insn
)) == UNSPEC_VOLATILE
7088 && XINT (PATTERN (insn
), 1) == UNSPECV_INSN_GROUP_BARRIER
)
7090 init_insn_group_barriers ();
7093 else if (NONDEBUG_INSN_P (insn
))
7095 insns_since_last_label
= 1;
7097 if (group_barrier_needed (insn
))
7102 fprintf (dump
, "Emitting stop before label %d\n",
7103 INSN_UID (last_label
));
7104 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label
);
7107 init_insn_group_barriers ();
7115 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
7116 This function has to emit all necessary group barriers. */
7119 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED
)
7123 init_insn_group_barriers ();
7125 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
7127 if (BARRIER_P (insn
))
7129 rtx_insn
*last
= prev_active_insn (insn
);
7133 if (JUMP_TABLE_DATA_P (last
))
7134 last
= prev_active_insn (last
);
7135 if (recog_memoized (last
) != CODE_FOR_insn_group_barrier
)
7136 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last
);
7138 init_insn_group_barriers ();
7140 else if (NONDEBUG_INSN_P (insn
))
7142 if (recog_memoized (insn
) == CODE_FOR_insn_group_barrier
)
7143 init_insn_group_barriers ();
7144 else if (group_barrier_needed (insn
))
7146 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn
);
7147 init_insn_group_barriers ();
7148 group_barrier_needed (insn
);
7156 /* Instruction scheduling support. */
7158 #define NR_BUNDLES 10
7160 /* A list of names of all available bundles. */
7162 static const char *bundle_name
[NR_BUNDLES
] =
7168 #if NR_BUNDLES == 10
7178 /* Nonzero if we should insert stop bits into the schedule. */
7180 int ia64_final_schedule
= 0;
7182 /* Codes of the corresponding queried units: */
7184 static int _0mii_
, _0mmi_
, _0mfi_
, _0mmf_
;
7185 static int _0bbb_
, _0mbb_
, _0mib_
, _0mmb_
, _0mfb_
, _0mlx_
;
7187 static int _1mii_
, _1mmi_
, _1mfi_
, _1mmf_
;
7188 static int _1bbb_
, _1mbb_
, _1mib_
, _1mmb_
, _1mfb_
, _1mlx_
;
7190 static int pos_1
, pos_2
, pos_3
, pos_4
, pos_5
, pos_6
;
7192 /* The following variable value is an insn group barrier. */
7194 static rtx_insn
*dfa_stop_insn
;
7196 /* The following variable value is the last issued insn. */
7198 static rtx_insn
*last_scheduled_insn
;
7200 /* The following variable value is pointer to a DFA state used as
7201 temporary variable. */
7203 static state_t temp_dfa_state
= NULL
;
7205 /* The following variable value is DFA state after issuing the last
7208 static state_t prev_cycle_state
= NULL
;
7210 /* The following array element values are TRUE if the corresponding
7211 insn requires to add stop bits before it. */
7213 static char *stops_p
= NULL
;
7215 /* The following variable is used to set up the mentioned above array. */
7217 static int stop_before_p
= 0;
7219 /* The following variable value is length of the arrays `clocks' and
7222 static int clocks_length
;
7224 /* The following variable value is number of data speculations in progress. */
7225 static int pending_data_specs
= 0;
7227 /* Number of memory references on current and three future processor cycles. */
7228 static char mem_ops_in_group
[4];
7230 /* Number of current processor cycle (from scheduler's point of view). */
7231 static int current_cycle
;
7233 static rtx
ia64_single_set (rtx_insn
*);
7234 static void ia64_emit_insn_before (rtx
, rtx_insn
*);
7236 /* Map a bundle number to its pseudo-op. */
7239 get_bundle_name (int b
)
7241 return bundle_name
[b
];
7245 /* Return the maximum number of instructions a cpu can issue. */
7248 ia64_issue_rate (void)
7253 /* Helper function - like single_set, but look inside COND_EXEC. */
7256 ia64_single_set (rtx_insn
*insn
)
7258 rtx x
= PATTERN (insn
), ret
;
7259 if (GET_CODE (x
) == COND_EXEC
)
7260 x
= COND_EXEC_CODE (x
);
7261 if (GET_CODE (x
) == SET
)
7264 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
7265 Although they are not classical single set, the second set is there just
7266 to protect it from moving past FP-relative stack accesses. */
7267 switch (recog_memoized (insn
))
7269 case CODE_FOR_prologue_allocate_stack
:
7270 case CODE_FOR_prologue_allocate_stack_pr
:
7271 case CODE_FOR_epilogue_deallocate_stack
:
7272 case CODE_FOR_epilogue_deallocate_stack_pr
:
7273 ret
= XVECEXP (x
, 0, 0);
7277 ret
= single_set_2 (insn
, x
);
7284 /* Adjust the cost of a scheduling dependency.
7285 Return the new cost of a dependency of type DEP_TYPE or INSN on DEP_INSN.
7286 COST is the current cost, DW is dependency weakness. */
7288 ia64_adjust_cost (rtx_insn
*insn
, int dep_type1
, rtx_insn
*dep_insn
,
7291 enum reg_note dep_type
= (enum reg_note
) dep_type1
;
7292 enum attr_itanium_class dep_class
;
7293 enum attr_itanium_class insn_class
;
7295 insn_class
= ia64_safe_itanium_class (insn
);
7296 dep_class
= ia64_safe_itanium_class (dep_insn
);
7298 /* Treat true memory dependencies separately. Ignore apparent true
7299 dependence between store and call (call has a MEM inside a SYMBOL_REF). */
7300 if (dep_type
== REG_DEP_TRUE
7301 && (dep_class
== ITANIUM_CLASS_ST
|| dep_class
== ITANIUM_CLASS_STF
)
7302 && (insn_class
== ITANIUM_CLASS_BR
|| insn_class
== ITANIUM_CLASS_SCALL
))
7305 if (dw
== MIN_DEP_WEAK
)
7306 /* Store and load are likely to alias, use higher cost to avoid stall. */
7307 return PARAM_VALUE (PARAM_SCHED_MEM_TRUE_DEP_COST
);
7308 else if (dw
> MIN_DEP_WEAK
)
7310 /* Store and load are less likely to alias. */
7311 if (mflag_sched_fp_mem_deps_zero_cost
&& dep_class
== ITANIUM_CLASS_STF
)
7312 /* Assume there will be no cache conflict for floating-point data.
7313 For integer data, L1 conflict penalty is huge (17 cycles), so we
7314 never assume it will not cause a conflict. */
7320 if (dep_type
!= REG_DEP_OUTPUT
)
7323 if (dep_class
== ITANIUM_CLASS_ST
|| dep_class
== ITANIUM_CLASS_STF
7324 || insn_class
== ITANIUM_CLASS_ST
|| insn_class
== ITANIUM_CLASS_STF
)
7330 /* Like emit_insn_before, but skip cycle_display notes.
7331 ??? When cycle display notes are implemented, update this. */
7334 ia64_emit_insn_before (rtx insn
, rtx_insn
*before
)
7336 emit_insn_before (insn
, before
);
7339 /* The following function marks insns who produce addresses for load
7340 and store insns. Such insns will be placed into M slots because it
7341 decrease latency time for Itanium1 (see function
7342 `ia64_produce_address_p' and the DFA descriptions). */
7345 ia64_dependencies_evaluation_hook (rtx_insn
*head
, rtx_insn
*tail
)
7347 rtx_insn
*insn
, *next
, *next_tail
;
7349 /* Before reload, which_alternative is not set, which means that
7350 ia64_safe_itanium_class will produce wrong results for (at least)
7351 move instructions. */
7352 if (!reload_completed
)
7355 next_tail
= NEXT_INSN (tail
);
7356 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
7359 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
7361 && ia64_safe_itanium_class (insn
) == ITANIUM_CLASS_IALU
)
7363 sd_iterator_def sd_it
;
7365 bool has_mem_op_consumer_p
= false;
7367 FOR_EACH_DEP (insn
, SD_LIST_FORW
, sd_it
, dep
)
7369 enum attr_itanium_class c
;
7371 if (DEP_TYPE (dep
) != REG_DEP_TRUE
)
7374 next
= DEP_CON (dep
);
7375 c
= ia64_safe_itanium_class (next
);
7376 if ((c
== ITANIUM_CLASS_ST
7377 || c
== ITANIUM_CLASS_STF
)
7378 && ia64_st_address_bypass_p (insn
, next
))
7380 has_mem_op_consumer_p
= true;
7383 else if ((c
== ITANIUM_CLASS_LD
7384 || c
== ITANIUM_CLASS_FLD
7385 || c
== ITANIUM_CLASS_FLDP
)
7386 && ia64_ld_address_bypass_p (insn
, next
))
7388 has_mem_op_consumer_p
= true;
7393 insn
->call
= has_mem_op_consumer_p
;
7397 /* We're beginning a new block. Initialize data structures as necessary. */
7400 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED
,
7401 int sched_verbose ATTRIBUTE_UNUSED
,
7402 int max_ready ATTRIBUTE_UNUSED
)
7404 if (flag_checking
&& !sel_sched_p () && reload_completed
)
7406 for (rtx_insn
*insn
= NEXT_INSN (current_sched_info
->prev_head
);
7407 insn
!= current_sched_info
->next_tail
;
7408 insn
= NEXT_INSN (insn
))
7409 gcc_assert (!SCHED_GROUP_P (insn
));
7411 last_scheduled_insn
= NULL
;
7412 init_insn_group_barriers ();
7415 memset (mem_ops_in_group
, 0, sizeof (mem_ops_in_group
));
7418 /* We're beginning a scheduling pass. Check assertion. */
7421 ia64_sched_init_global (FILE *dump ATTRIBUTE_UNUSED
,
7422 int sched_verbose ATTRIBUTE_UNUSED
,
7423 int max_ready ATTRIBUTE_UNUSED
)
7425 gcc_assert (pending_data_specs
== 0);
7428 /* Scheduling pass is now finished. Free/reset static variable. */
7430 ia64_sched_finish_global (FILE *dump ATTRIBUTE_UNUSED
,
7431 int sched_verbose ATTRIBUTE_UNUSED
)
7433 gcc_assert (pending_data_specs
== 0);
7436 /* Return TRUE if INSN is a load (either normal or speculative, but not a
7437 speculation check), FALSE otherwise. */
7439 is_load_p (rtx_insn
*insn
)
7441 enum attr_itanium_class insn_class
= ia64_safe_itanium_class (insn
);
7444 ((insn_class
== ITANIUM_CLASS_LD
|| insn_class
== ITANIUM_CLASS_FLD
)
7445 && get_attr_check_load (insn
) == CHECK_LOAD_NO
);
7448 /* If INSN is a memory reference, memoize it in MEM_OPS_IN_GROUP global array
7449 (taking account for 3-cycle cache reference postponing for stores: Intel
7450 Itanium 2 Reference Manual for Software Development and Optimization,
7453 record_memory_reference (rtx_insn
*insn
)
7455 enum attr_itanium_class insn_class
= ia64_safe_itanium_class (insn
);
7457 switch (insn_class
) {
7458 case ITANIUM_CLASS_FLD
:
7459 case ITANIUM_CLASS_LD
:
7460 mem_ops_in_group
[current_cycle
% 4]++;
7462 case ITANIUM_CLASS_STF
:
7463 case ITANIUM_CLASS_ST
:
7464 mem_ops_in_group
[(current_cycle
+ 3) % 4]++;
7470 /* We are about to being issuing insns for this clock cycle.
7471 Override the default sort algorithm to better slot instructions. */
7474 ia64_dfa_sched_reorder (FILE *dump
, int sched_verbose
, rtx_insn
**ready
,
7475 int *pn_ready
, int clock_var
,
7479 int n_ready
= *pn_ready
;
7480 rtx_insn
**e_ready
= ready
+ n_ready
;
7484 fprintf (dump
, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type
);
7486 if (reorder_type
== 0)
7488 /* First, move all USEs, CLOBBERs and other crud out of the way. */
7490 for (insnp
= ready
; insnp
< e_ready
; insnp
++)
7491 if (insnp
< e_ready
)
7493 rtx_insn
*insn
= *insnp
;
7494 enum attr_type t
= ia64_safe_type (insn
);
7495 if (t
== TYPE_UNKNOWN
)
7497 if (GET_CODE (PATTERN (insn
)) == ASM_INPUT
7498 || asm_noperands (PATTERN (insn
)) >= 0)
7500 rtx_insn
*lowest
= ready
[n_asms
];
7501 ready
[n_asms
] = insn
;
7507 rtx_insn
*highest
= ready
[n_ready
- 1];
7508 ready
[n_ready
- 1] = insn
;
7515 if (n_asms
< n_ready
)
7517 /* Some normal insns to process. Skip the asms. */
7521 else if (n_ready
> 0)
7525 if (ia64_final_schedule
)
7528 int nr_need_stop
= 0;
7530 for (insnp
= ready
; insnp
< e_ready
; insnp
++)
7531 if (safe_group_barrier_needed (*insnp
))
7534 if (reorder_type
== 1 && n_ready
== nr_need_stop
)
7536 if (reorder_type
== 0)
7539 /* Move down everything that needs a stop bit, preserving
7541 while (insnp
-- > ready
+ deleted
)
7542 while (insnp
>= ready
+ deleted
)
7544 rtx_insn
*insn
= *insnp
;
7545 if (! safe_group_barrier_needed (insn
))
7547 memmove (ready
+ 1, ready
, (insnp
- ready
) * sizeof (rtx
));
7555 current_cycle
= clock_var
;
7556 if (reload_completed
&& mem_ops_in_group
[clock_var
% 4] >= ia64_max_memory_insns
)
7561 /* Move down loads/stores, preserving relative order. */
7562 while (insnp
-- > ready
+ moved
)
7563 while (insnp
>= ready
+ moved
)
7565 rtx_insn
*insn
= *insnp
;
7566 if (! is_load_p (insn
))
7568 memmove (ready
+ 1, ready
, (insnp
- ready
) * sizeof (rtx
));
7579 /* We are about to being issuing insns for this clock cycle. Override
7580 the default sort algorithm to better slot instructions. */
7583 ia64_sched_reorder (FILE *dump
, int sched_verbose
, rtx_insn
**ready
,
7584 int *pn_ready
, int clock_var
)
7586 return ia64_dfa_sched_reorder (dump
, sched_verbose
, ready
,
7587 pn_ready
, clock_var
, 0);
7590 /* Like ia64_sched_reorder, but called after issuing each insn.
7591 Override the default sort algorithm to better slot instructions. */
7594 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED
,
7595 int sched_verbose ATTRIBUTE_UNUSED
, rtx_insn
**ready
,
7596 int *pn_ready
, int clock_var
)
7598 return ia64_dfa_sched_reorder (dump
, sched_verbose
, ready
, pn_ready
,
7602 /* We are about to issue INSN. Return the number of insns left on the
7603 ready queue that can be issued this cycle. */
7606 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED
,
7607 int sched_verbose ATTRIBUTE_UNUSED
,
7609 int can_issue_more ATTRIBUTE_UNUSED
)
7611 if (sched_deps_info
->generate_spec_deps
&& !sel_sched_p ())
7612 /* Modulo scheduling does not extend h_i_d when emitting
7613 new instructions. Don't use h_i_d, if we don't have to. */
7615 if (DONE_SPEC (insn
) & BEGIN_DATA
)
7616 pending_data_specs
++;
7617 if (CHECK_SPEC (insn
) & BEGIN_DATA
)
7618 pending_data_specs
--;
7621 if (DEBUG_INSN_P (insn
))
7624 last_scheduled_insn
= insn
;
7625 memcpy (prev_cycle_state
, curr_state
, dfa_state_size
);
7626 if (reload_completed
)
7628 int needed
= group_barrier_needed (insn
);
7630 gcc_assert (!needed
);
7632 init_insn_group_barriers ();
7633 stops_p
[INSN_UID (insn
)] = stop_before_p
;
7636 record_memory_reference (insn
);
7641 /* We are choosing insn from the ready queue. Return zero if INSN
7645 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx_insn
*insn
, int ready_index
)
7647 gcc_assert (insn
&& INSN_P (insn
));
7649 /* Size of ALAT is 32. As far as we perform conservative
7650 data speculation, we keep ALAT half-empty. */
7651 if (pending_data_specs
>= 16 && (TODO_SPEC (insn
) & BEGIN_DATA
))
7652 return ready_index
== 0 ? -1 : 1;
7654 if (ready_index
== 0)
7657 if ((!reload_completed
7658 || !safe_group_barrier_needed (insn
))
7659 && (!mflag_sched_mem_insns_hard_limit
7660 || !is_load_p (insn
)
7661 || mem_ops_in_group
[current_cycle
% 4] < ia64_max_memory_insns
))
7667 /* The following variable value is pseudo-insn used by the DFA insn
7668 scheduler to change the DFA state when the simulated clock is
7671 static rtx_insn
*dfa_pre_cycle_insn
;
7673 /* Returns 1 when a meaningful insn was scheduled between the last group
7674 barrier and LAST. */
7676 scheduled_good_insn (rtx_insn
*last
)
7678 if (last
&& recog_memoized (last
) >= 0)
7682 last
!= NULL
&& !NOTE_INSN_BASIC_BLOCK_P (last
)
7683 && !stops_p
[INSN_UID (last
)];
7684 last
= PREV_INSN (last
))
7685 /* We could hit a NOTE_INSN_DELETED here which is actually outside
7686 the ebb we're scheduling. */
7687 if (INSN_P (last
) && recog_memoized (last
) >= 0)
7693 /* We are about to being issuing INSN. Return nonzero if we cannot
7694 issue it on given cycle CLOCK and return zero if we should not sort
7695 the ready queue on the next clock start. */
7698 ia64_dfa_new_cycle (FILE *dump
, int verbose
, rtx_insn
*insn
, int last_clock
,
7699 int clock
, int *sort_p
)
7701 gcc_assert (insn
&& INSN_P (insn
));
7703 if (DEBUG_INSN_P (insn
))
7706 /* When a group barrier is needed for insn, last_scheduled_insn
7708 gcc_assert (!(reload_completed
&& safe_group_barrier_needed (insn
))
7709 || last_scheduled_insn
);
7711 if ((reload_completed
7712 && (safe_group_barrier_needed (insn
)
7713 || (mflag_sched_stop_bits_after_every_cycle
7714 && last_clock
!= clock
7715 && last_scheduled_insn
7716 && scheduled_good_insn (last_scheduled_insn
))))
7717 || (last_scheduled_insn
7718 && (CALL_P (last_scheduled_insn
)
7719 || unknown_for_bundling_p (last_scheduled_insn
))))
7721 init_insn_group_barriers ();
7723 if (verbose
&& dump
)
7724 fprintf (dump
, "// Stop should be before %d%s\n", INSN_UID (insn
),
7725 last_clock
== clock
? " + cycle advance" : "");
7728 current_cycle
= clock
;
7729 mem_ops_in_group
[current_cycle
% 4] = 0;
7731 if (last_clock
== clock
)
7733 state_transition (curr_state
, dfa_stop_insn
);
7734 if (TARGET_EARLY_STOP_BITS
)
7735 *sort_p
= (last_scheduled_insn
== NULL_RTX
7736 || ! CALL_P (last_scheduled_insn
));
7742 if (last_scheduled_insn
)
7744 if (unknown_for_bundling_p (last_scheduled_insn
))
7745 state_reset (curr_state
);
7748 memcpy (curr_state
, prev_cycle_state
, dfa_state_size
);
7749 state_transition (curr_state
, dfa_stop_insn
);
7750 state_transition (curr_state
, dfa_pre_cycle_insn
);
7751 state_transition (curr_state
, NULL
);
7758 /* Implement targetm.sched.h_i_d_extended hook.
7759 Extend internal data structures. */
7761 ia64_h_i_d_extended (void)
7763 if (stops_p
!= NULL
)
7765 int new_clocks_length
= get_max_uid () * 3 / 2;
7766 stops_p
= (char *) xrecalloc (stops_p
, new_clocks_length
, clocks_length
, 1);
7767 clocks_length
= new_clocks_length
;
7772 /* This structure describes the data used by the backend to guide scheduling.
7773 When the current scheduling point is switched, this data should be saved
7774 and restored later, if the scheduler returns to this point. */
7775 struct _ia64_sched_context
7777 state_t prev_cycle_state
;
7778 rtx_insn
*last_scheduled_insn
;
7779 struct reg_write_state rws_sum
[NUM_REGS
];
7780 struct reg_write_state rws_insn
[NUM_REGS
];
7781 int first_instruction
;
7782 int pending_data_specs
;
7784 char mem_ops_in_group
[4];
7786 typedef struct _ia64_sched_context
*ia64_sched_context_t
;
7788 /* Allocates a scheduling context. */
7790 ia64_alloc_sched_context (void)
7792 return xmalloc (sizeof (struct _ia64_sched_context
));
7795 /* Initializes the _SC context with clean data, if CLEAN_P, and from
7796 the global context otherwise. */
7798 ia64_init_sched_context (void *_sc
, bool clean_p
)
7800 ia64_sched_context_t sc
= (ia64_sched_context_t
) _sc
;
7802 sc
->prev_cycle_state
= xmalloc (dfa_state_size
);
7805 state_reset (sc
->prev_cycle_state
);
7806 sc
->last_scheduled_insn
= NULL
;
7807 memset (sc
->rws_sum
, 0, sizeof (rws_sum
));
7808 memset (sc
->rws_insn
, 0, sizeof (rws_insn
));
7809 sc
->first_instruction
= 1;
7810 sc
->pending_data_specs
= 0;
7811 sc
->current_cycle
= 0;
7812 memset (sc
->mem_ops_in_group
, 0, sizeof (mem_ops_in_group
));
7816 memcpy (sc
->prev_cycle_state
, prev_cycle_state
, dfa_state_size
);
7817 sc
->last_scheduled_insn
= last_scheduled_insn
;
7818 memcpy (sc
->rws_sum
, rws_sum
, sizeof (rws_sum
));
7819 memcpy (sc
->rws_insn
, rws_insn
, sizeof (rws_insn
));
7820 sc
->first_instruction
= first_instruction
;
7821 sc
->pending_data_specs
= pending_data_specs
;
7822 sc
->current_cycle
= current_cycle
;
7823 memcpy (sc
->mem_ops_in_group
, mem_ops_in_group
, sizeof (mem_ops_in_group
));
7827 /* Sets the global scheduling context to the one pointed to by _SC. */
7829 ia64_set_sched_context (void *_sc
)
7831 ia64_sched_context_t sc
= (ia64_sched_context_t
) _sc
;
7833 gcc_assert (sc
!= NULL
);
7835 memcpy (prev_cycle_state
, sc
->prev_cycle_state
, dfa_state_size
);
7836 last_scheduled_insn
= sc
->last_scheduled_insn
;
7837 memcpy (rws_sum
, sc
->rws_sum
, sizeof (rws_sum
));
7838 memcpy (rws_insn
, sc
->rws_insn
, sizeof (rws_insn
));
7839 first_instruction
= sc
->first_instruction
;
7840 pending_data_specs
= sc
->pending_data_specs
;
7841 current_cycle
= sc
->current_cycle
;
7842 memcpy (mem_ops_in_group
, sc
->mem_ops_in_group
, sizeof (mem_ops_in_group
));
7845 /* Clears the data in the _SC scheduling context. */
7847 ia64_clear_sched_context (void *_sc
)
7849 ia64_sched_context_t sc
= (ia64_sched_context_t
) _sc
;
7851 free (sc
->prev_cycle_state
);
7852 sc
->prev_cycle_state
= NULL
;
7855 /* Frees the _SC scheduling context. */
7857 ia64_free_sched_context (void *_sc
)
7859 gcc_assert (_sc
!= NULL
);
7864 typedef rtx (* gen_func_t
) (rtx
, rtx
);
7866 /* Return a function that will generate a load of mode MODE_NO
7867 with speculation types TS. */
7869 get_spec_load_gen_function (ds_t ts
, int mode_no
)
7871 static gen_func_t gen_ld_
[] = {
7881 gen_zero_extendqidi2
,
7882 gen_zero_extendhidi2
,
7883 gen_zero_extendsidi2
,
7886 static gen_func_t gen_ld_a
[] = {
7896 gen_zero_extendqidi2_advanced
,
7897 gen_zero_extendhidi2_advanced
,
7898 gen_zero_extendsidi2_advanced
,
7900 static gen_func_t gen_ld_s
[] = {
7901 gen_movbi_speculative
,
7902 gen_movqi_speculative
,
7903 gen_movhi_speculative
,
7904 gen_movsi_speculative
,
7905 gen_movdi_speculative
,
7906 gen_movsf_speculative
,
7907 gen_movdf_speculative
,
7908 gen_movxf_speculative
,
7909 gen_movti_speculative
,
7910 gen_zero_extendqidi2_speculative
,
7911 gen_zero_extendhidi2_speculative
,
7912 gen_zero_extendsidi2_speculative
,
7914 static gen_func_t gen_ld_sa
[] = {
7915 gen_movbi_speculative_advanced
,
7916 gen_movqi_speculative_advanced
,
7917 gen_movhi_speculative_advanced
,
7918 gen_movsi_speculative_advanced
,
7919 gen_movdi_speculative_advanced
,
7920 gen_movsf_speculative_advanced
,
7921 gen_movdf_speculative_advanced
,
7922 gen_movxf_speculative_advanced
,
7923 gen_movti_speculative_advanced
,
7924 gen_zero_extendqidi2_speculative_advanced
,
7925 gen_zero_extendhidi2_speculative_advanced
,
7926 gen_zero_extendsidi2_speculative_advanced
,
7928 static gen_func_t gen_ld_s_a
[] = {
7929 gen_movbi_speculative_a
,
7930 gen_movqi_speculative_a
,
7931 gen_movhi_speculative_a
,
7932 gen_movsi_speculative_a
,
7933 gen_movdi_speculative_a
,
7934 gen_movsf_speculative_a
,
7935 gen_movdf_speculative_a
,
7936 gen_movxf_speculative_a
,
7937 gen_movti_speculative_a
,
7938 gen_zero_extendqidi2_speculative_a
,
7939 gen_zero_extendhidi2_speculative_a
,
7940 gen_zero_extendsidi2_speculative_a
,
7945 if (ts
& BEGIN_DATA
)
7947 if (ts
& BEGIN_CONTROL
)
7952 else if (ts
& BEGIN_CONTROL
)
7954 if ((spec_info
->flags
& SEL_SCHED_SPEC_DONT_CHECK_CONTROL
)
7955 || ia64_needs_block_p (ts
))
7958 gen_ld
= gen_ld_s_a
;
7965 return gen_ld
[mode_no
];
7968 /* Constants that help mapping 'machine_mode' to int. */
7971 SPEC_MODE_INVALID
= -1,
7972 SPEC_MODE_FIRST
= 0,
7973 SPEC_MODE_FOR_EXTEND_FIRST
= 1,
7974 SPEC_MODE_FOR_EXTEND_LAST
= 3,
7980 /* Offset to reach ZERO_EXTEND patterns. */
7981 SPEC_GEN_EXTEND_OFFSET
= SPEC_MODE_LAST
- SPEC_MODE_FOR_EXTEND_FIRST
+ 1
7984 /* Return index of the MODE. */
7986 ia64_mode_to_int (machine_mode mode
)
7990 case E_BImode
: return 0; /* SPEC_MODE_FIRST */
7991 case E_QImode
: return 1; /* SPEC_MODE_FOR_EXTEND_FIRST */
7992 case E_HImode
: return 2;
7993 case E_SImode
: return 3; /* SPEC_MODE_FOR_EXTEND_LAST */
7994 case E_DImode
: return 4;
7995 case E_SFmode
: return 5;
7996 case E_DFmode
: return 6;
7997 case E_XFmode
: return 7;
7999 /* ??? This mode needs testing. Bypasses for ldfp8 instruction are not
8000 mentioned in itanium[12].md. Predicate fp_register_operand also
8001 needs to be defined. Bottom line: better disable for now. */
8002 return SPEC_MODE_INVALID
;
8003 default: return SPEC_MODE_INVALID
;
8007 /* Provide information about speculation capabilities. */
8009 ia64_set_sched_flags (spec_info_t spec_info
)
8011 unsigned int *flags
= &(current_sched_info
->flags
);
8013 if (*flags
& SCHED_RGN
8014 || *flags
& SCHED_EBB
8015 || *flags
& SEL_SCHED
)
8019 if ((mflag_sched_br_data_spec
&& !reload_completed
&& optimize
> 0)
8020 || (mflag_sched_ar_data_spec
&& reload_completed
))
8025 && ((mflag_sched_br_in_data_spec
&& !reload_completed
)
8026 || (mflag_sched_ar_in_data_spec
&& reload_completed
)))
8030 if (mflag_sched_control_spec
8032 || reload_completed
))
8034 mask
|= BEGIN_CONTROL
;
8036 if (!sel_sched_p () && mflag_sched_in_control_spec
)
8037 mask
|= BE_IN_CONTROL
;
8040 spec_info
->mask
= mask
;
8044 *flags
|= USE_DEPS_LIST
| DO_SPECULATION
;
8046 if (mask
& BE_IN_SPEC
)
8049 spec_info
->flags
= 0;
8051 if ((mask
& CONTROL_SPEC
)
8052 && sel_sched_p () && mflag_sel_sched_dont_check_control_spec
)
8053 spec_info
->flags
|= SEL_SCHED_SPEC_DONT_CHECK_CONTROL
;
8055 if (sched_verbose
>= 1)
8056 spec_info
->dump
= sched_dump
;
8058 spec_info
->dump
= 0;
8060 if (mflag_sched_count_spec_in_critical_path
)
8061 spec_info
->flags
|= COUNT_SPEC_IN_CRITICAL_PATH
;
8065 spec_info
->mask
= 0;
8068 /* If INSN is an appropriate load return its mode.
8069 Return -1 otherwise. */
8071 get_mode_no_for_insn (rtx_insn
*insn
)
8073 rtx reg
, mem
, mode_rtx
;
8077 extract_insn_cached (insn
);
8079 /* We use WHICH_ALTERNATIVE only after reload. This will
8080 guarantee that reload won't touch a speculative insn. */
8082 if (recog_data
.n_operands
!= 2)
8085 reg
= recog_data
.operand
[0];
8086 mem
= recog_data
.operand
[1];
8088 /* We should use MEM's mode since REG's mode in presence of
8089 ZERO_EXTEND will always be DImode. */
8090 if (get_attr_speculable1 (insn
) == SPECULABLE1_YES
)
8091 /* Process non-speculative ld. */
8093 if (!reload_completed
)
8095 /* Do not speculate into regs like ar.lc. */
8096 if (!REG_P (reg
) || AR_REGNO_P (REGNO (reg
)))
8103 rtx mem_reg
= XEXP (mem
, 0);
8105 if (!REG_P (mem_reg
))
8111 else if (get_attr_speculable2 (insn
) == SPECULABLE2_YES
)
8113 gcc_assert (REG_P (reg
) && MEM_P (mem
));
8119 else if (get_attr_data_speculative (insn
) == DATA_SPECULATIVE_YES
8120 || get_attr_control_speculative (insn
) == CONTROL_SPECULATIVE_YES
8121 || get_attr_check_load (insn
) == CHECK_LOAD_YES
)
8122 /* Process speculative ld or ld.c. */
8124 gcc_assert (REG_P (reg
) && MEM_P (mem
));
8129 enum attr_itanium_class attr_class
= get_attr_itanium_class (insn
);
8131 if (attr_class
== ITANIUM_CLASS_CHK_A
8132 || attr_class
== ITANIUM_CLASS_CHK_S_I
8133 || attr_class
== ITANIUM_CLASS_CHK_S_F
)
8140 mode_no
= ia64_mode_to_int (GET_MODE (mode_rtx
));
8142 if (mode_no
== SPEC_MODE_INVALID
)
8145 extend_p
= (GET_MODE (reg
) != GET_MODE (mode_rtx
));
8149 if (!(SPEC_MODE_FOR_EXTEND_FIRST
<= mode_no
8150 && mode_no
<= SPEC_MODE_FOR_EXTEND_LAST
))
8153 mode_no
+= SPEC_GEN_EXTEND_OFFSET
;
8159 /* If X is an unspec part of a speculative load, return its code.
8160 Return -1 otherwise. */
8162 get_spec_unspec_code (const_rtx x
)
8164 if (GET_CODE (x
) != UNSPEC
)
8186 /* Implement skip_rtx_p hook. */
8188 ia64_skip_rtx_p (const_rtx x
)
8190 return get_spec_unspec_code (x
) != -1;
8193 /* If INSN is a speculative load, return its UNSPEC code.
8194 Return -1 otherwise. */
8196 get_insn_spec_code (const_rtx insn
)
8200 pat
= PATTERN (insn
);
8202 if (GET_CODE (pat
) == COND_EXEC
)
8203 pat
= COND_EXEC_CODE (pat
);
8205 if (GET_CODE (pat
) != SET
)
8208 reg
= SET_DEST (pat
);
8212 mem
= SET_SRC (pat
);
8213 if (GET_CODE (mem
) == ZERO_EXTEND
)
8214 mem
= XEXP (mem
, 0);
8216 return get_spec_unspec_code (mem
);
8219 /* If INSN is a speculative load, return a ds with the speculation types.
8220 Otherwise [if INSN is a normal instruction] return 0. */
8222 ia64_get_insn_spec_ds (rtx_insn
*insn
)
8224 int code
= get_insn_spec_code (insn
);
8233 return BEGIN_CONTROL
;
8236 return BEGIN_DATA
| BEGIN_CONTROL
;
8243 /* If INSN is a speculative load return a ds with the speculation types that
8245 Otherwise [if INSN is a normal instruction] return 0. */
8247 ia64_get_insn_checked_ds (rtx_insn
*insn
)
8249 int code
= get_insn_spec_code (insn
);
8254 return BEGIN_DATA
| BEGIN_CONTROL
;
8257 return BEGIN_CONTROL
;
8261 return BEGIN_DATA
| BEGIN_CONTROL
;
8268 /* If GEN_P is true, calculate the index of needed speculation check and return
8269 speculative pattern for INSN with speculative mode TS, machine mode
8270 MODE_NO and with ZERO_EXTEND (if EXTEND_P is true).
8271 If GEN_P is false, just calculate the index of needed speculation check. */
8273 ia64_gen_spec_load (rtx insn
, ds_t ts
, int mode_no
)
8276 gen_func_t gen_load
;
8278 gen_load
= get_spec_load_gen_function (ts
, mode_no
);
8280 new_pat
= gen_load (copy_rtx (recog_data
.operand
[0]),
8281 copy_rtx (recog_data
.operand
[1]));
8283 pat
= PATTERN (insn
);
8284 if (GET_CODE (pat
) == COND_EXEC
)
8285 new_pat
= gen_rtx_COND_EXEC (VOIDmode
, copy_rtx (COND_EXEC_TEST (pat
)),
8292 insn_can_be_in_speculative_p (rtx insn ATTRIBUTE_UNUSED
,
8293 ds_t ds ATTRIBUTE_UNUSED
)
8298 /* Implement targetm.sched.speculate_insn hook.
8299 Check if the INSN can be TS speculative.
8300 If 'no' - return -1.
8301 If 'yes' - generate speculative pattern in the NEW_PAT and return 1.
8302 If current pattern of the INSN already provides TS speculation,
8305 ia64_speculate_insn (rtx_insn
*insn
, ds_t ts
, rtx
*new_pat
)
8310 gcc_assert (!(ts
& ~SPECULATIVE
));
8312 if (ia64_spec_check_p (insn
))
8315 if ((ts
& BE_IN_SPEC
)
8316 && !insn_can_be_in_speculative_p (insn
, ts
))
8319 mode_no
= get_mode_no_for_insn (insn
);
8321 if (mode_no
!= SPEC_MODE_INVALID
)
8323 if (ia64_get_insn_spec_ds (insn
) == ds_get_speculation_types (ts
))
8328 *new_pat
= ia64_gen_spec_load (insn
, ts
, mode_no
);
8337 /* Return a function that will generate a check for speculation TS with mode
8339 If simple check is needed, pass true for SIMPLE_CHECK_P.
8340 If clearing check is needed, pass true for CLEARING_CHECK_P. */
8342 get_spec_check_gen_function (ds_t ts
, int mode_no
,
8343 bool simple_check_p
, bool clearing_check_p
)
8345 static gen_func_t gen_ld_c_clr
[] = {
8355 gen_zero_extendqidi2_clr
,
8356 gen_zero_extendhidi2_clr
,
8357 gen_zero_extendsidi2_clr
,
8359 static gen_func_t gen_ld_c_nc
[] = {
8369 gen_zero_extendqidi2_nc
,
8370 gen_zero_extendhidi2_nc
,
8371 gen_zero_extendsidi2_nc
,
8373 static gen_func_t gen_chk_a_clr
[] = {
8374 gen_advanced_load_check_clr_bi
,
8375 gen_advanced_load_check_clr_qi
,
8376 gen_advanced_load_check_clr_hi
,
8377 gen_advanced_load_check_clr_si
,
8378 gen_advanced_load_check_clr_di
,
8379 gen_advanced_load_check_clr_sf
,
8380 gen_advanced_load_check_clr_df
,
8381 gen_advanced_load_check_clr_xf
,
8382 gen_advanced_load_check_clr_ti
,
8383 gen_advanced_load_check_clr_di
,
8384 gen_advanced_load_check_clr_di
,
8385 gen_advanced_load_check_clr_di
,
8387 static gen_func_t gen_chk_a_nc
[] = {
8388 gen_advanced_load_check_nc_bi
,
8389 gen_advanced_load_check_nc_qi
,
8390 gen_advanced_load_check_nc_hi
,
8391 gen_advanced_load_check_nc_si
,
8392 gen_advanced_load_check_nc_di
,
8393 gen_advanced_load_check_nc_sf
,
8394 gen_advanced_load_check_nc_df
,
8395 gen_advanced_load_check_nc_xf
,
8396 gen_advanced_load_check_nc_ti
,
8397 gen_advanced_load_check_nc_di
,
8398 gen_advanced_load_check_nc_di
,
8399 gen_advanced_load_check_nc_di
,
8401 static gen_func_t gen_chk_s
[] = {
8402 gen_speculation_check_bi
,
8403 gen_speculation_check_qi
,
8404 gen_speculation_check_hi
,
8405 gen_speculation_check_si
,
8406 gen_speculation_check_di
,
8407 gen_speculation_check_sf
,
8408 gen_speculation_check_df
,
8409 gen_speculation_check_xf
,
8410 gen_speculation_check_ti
,
8411 gen_speculation_check_di
,
8412 gen_speculation_check_di
,
8413 gen_speculation_check_di
,
8416 gen_func_t
*gen_check
;
8418 if (ts
& BEGIN_DATA
)
8420 /* We don't need recovery because even if this is ld.sa
8421 ALAT entry will be allocated only if NAT bit is set to zero.
8422 So it is enough to use ld.c here. */
8426 gcc_assert (mflag_sched_spec_ldc
);
8428 if (clearing_check_p
)
8429 gen_check
= gen_ld_c_clr
;
8431 gen_check
= gen_ld_c_nc
;
8435 if (clearing_check_p
)
8436 gen_check
= gen_chk_a_clr
;
8438 gen_check
= gen_chk_a_nc
;
8441 else if (ts
& BEGIN_CONTROL
)
8444 /* We might want to use ld.sa -> ld.c instead of
8447 gcc_assert (!ia64_needs_block_p (ts
));
8449 if (clearing_check_p
)
8450 gen_check
= gen_ld_c_clr
;
8452 gen_check
= gen_ld_c_nc
;
8456 gen_check
= gen_chk_s
;
8462 gcc_assert (mode_no
>= 0);
8463 return gen_check
[mode_no
];
8466 /* Return nonzero, if INSN needs branchy recovery check. */
8468 ia64_needs_block_p (ds_t ts
)
8470 if (ts
& BEGIN_DATA
)
8471 return !mflag_sched_spec_ldc
;
8473 gcc_assert ((ts
& BEGIN_CONTROL
) != 0);
8475 return !(mflag_sched_spec_control_ldc
&& mflag_sched_spec_ldc
);
8478 /* Generate (or regenerate) a recovery check for INSN. */
8480 ia64_gen_spec_check (rtx_insn
*insn
, rtx_insn
*label
, ds_t ds
)
8482 rtx op1
, pat
, check_pat
;
8483 gen_func_t gen_check
;
8486 mode_no
= get_mode_no_for_insn (insn
);
8487 gcc_assert (mode_no
>= 0);
8493 gcc_assert (!ia64_needs_block_p (ds
));
8494 op1
= copy_rtx (recog_data
.operand
[1]);
8497 gen_check
= get_spec_check_gen_function (ds
, mode_no
, label
== NULL_RTX
,
8500 check_pat
= gen_check (copy_rtx (recog_data
.operand
[0]), op1
);
8502 pat
= PATTERN (insn
);
8503 if (GET_CODE (pat
) == COND_EXEC
)
8504 check_pat
= gen_rtx_COND_EXEC (VOIDmode
, copy_rtx (COND_EXEC_TEST (pat
)),
8510 /* Return nonzero, if X is branchy recovery check. */
8512 ia64_spec_check_p (rtx x
)
8515 if (GET_CODE (x
) == COND_EXEC
)
8516 x
= COND_EXEC_CODE (x
);
8517 if (GET_CODE (x
) == SET
)
8518 return ia64_spec_check_src_p (SET_SRC (x
));
8522 /* Return nonzero, if SRC belongs to recovery check. */
8524 ia64_spec_check_src_p (rtx src
)
8526 if (GET_CODE (src
) == IF_THEN_ELSE
)
8531 if (GET_CODE (t
) == NE
)
8535 if (GET_CODE (t
) == UNSPEC
)
8541 if (code
== UNSPEC_LDCCLR
8542 || code
== UNSPEC_LDCNC
8543 || code
== UNSPEC_CHKACLR
8544 || code
== UNSPEC_CHKANC
8545 || code
== UNSPEC_CHKS
)
8547 gcc_assert (code
!= 0);
8557 /* The following page contains abstract data `bundle states' which are
8558 used for bundling insns (inserting nops and template generation). */
8560 /* The following describes state of insn bundling. */
8564 /* Unique bundle state number to identify them in the debugging
8567 rtx_insn
*insn
; /* corresponding insn, NULL for the 1st and the last state */
8568 /* number nops before and after the insn */
8569 short before_nops_num
, after_nops_num
;
8570 int insn_num
; /* insn number (0 - for initial state, 1 - for the 1st
8572 int cost
; /* cost of the state in cycles */
8573 int accumulated_insns_num
; /* number of all previous insns including
8574 nops. L is considered as 2 insns */
8575 int branch_deviation
; /* deviation of previous branches from 3rd slots */
8576 int middle_bundle_stops
; /* number of stop bits in the middle of bundles */
8577 struct bundle_state
*next
; /* next state with the same insn_num */
8578 struct bundle_state
*originator
; /* originator (previous insn state) */
8579 /* All bundle states are in the following chain. */
8580 struct bundle_state
*allocated_states_chain
;
8581 /* The DFA State after issuing the insn and the nops. */
8585 /* The following is map insn number to the corresponding bundle state. */
8587 static struct bundle_state
**index_to_bundle_states
;
8589 /* The unique number of next bundle state. */
8591 static int bundle_states_num
;
8593 /* All allocated bundle states are in the following chain. */
8595 static struct bundle_state
*allocated_bundle_states_chain
;
8597 /* All allocated but not used bundle states are in the following
8600 static struct bundle_state
*free_bundle_state_chain
;
8603 /* The following function returns a free bundle state. */
8605 static struct bundle_state
*
8606 get_free_bundle_state (void)
8608 struct bundle_state
*result
;
8610 if (free_bundle_state_chain
!= NULL
)
8612 result
= free_bundle_state_chain
;
8613 free_bundle_state_chain
= result
->next
;
8617 result
= XNEW (struct bundle_state
);
8618 result
->dfa_state
= xmalloc (dfa_state_size
);
8619 result
->allocated_states_chain
= allocated_bundle_states_chain
;
8620 allocated_bundle_states_chain
= result
;
8622 result
->unique_num
= bundle_states_num
++;
8627 /* The following function frees given bundle state. */
8630 free_bundle_state (struct bundle_state
*state
)
8632 state
->next
= free_bundle_state_chain
;
8633 free_bundle_state_chain
= state
;
8636 /* Start work with abstract data `bundle states'. */
8639 initiate_bundle_states (void)
8641 bundle_states_num
= 0;
8642 free_bundle_state_chain
= NULL
;
8643 allocated_bundle_states_chain
= NULL
;
8646 /* Finish work with abstract data `bundle states'. */
8649 finish_bundle_states (void)
8651 struct bundle_state
*curr_state
, *next_state
;
8653 for (curr_state
= allocated_bundle_states_chain
;
8655 curr_state
= next_state
)
8657 next_state
= curr_state
->allocated_states_chain
;
8658 free (curr_state
->dfa_state
);
8663 /* Hashtable helpers. */
8665 struct bundle_state_hasher
: nofree_ptr_hash
<bundle_state
>
8667 static inline hashval_t
hash (const bundle_state
*);
8668 static inline bool equal (const bundle_state
*, const bundle_state
*);
8671 /* The function returns hash of BUNDLE_STATE. */
8674 bundle_state_hasher::hash (const bundle_state
*state
)
8678 for (result
= i
= 0; i
< dfa_state_size
; i
++)
8679 result
+= (((unsigned char *) state
->dfa_state
) [i
]
8680 << ((i
% CHAR_BIT
) * 3 + CHAR_BIT
));
8681 return result
+ state
->insn_num
;
8684 /* The function returns nonzero if the bundle state keys are equal. */
8687 bundle_state_hasher::equal (const bundle_state
*state1
,
8688 const bundle_state
*state2
)
8690 return (state1
->insn_num
== state2
->insn_num
8691 && memcmp (state1
->dfa_state
, state2
->dfa_state
,
8692 dfa_state_size
) == 0);
8695 /* Hash table of the bundle states. The key is dfa_state and insn_num
8696 of the bundle states. */
8698 static hash_table
<bundle_state_hasher
> *bundle_state_table
;
8700 /* The function inserts the BUNDLE_STATE into the hash table. The
8701 function returns nonzero if the bundle has been inserted into the
8702 table. The table contains the best bundle state with given key. */
8705 insert_bundle_state (struct bundle_state
*bundle_state
)
8707 struct bundle_state
**entry_ptr
;
8709 entry_ptr
= bundle_state_table
->find_slot (bundle_state
, INSERT
);
8710 if (*entry_ptr
== NULL
)
8712 bundle_state
->next
= index_to_bundle_states
[bundle_state
->insn_num
];
8713 index_to_bundle_states
[bundle_state
->insn_num
] = bundle_state
;
8714 *entry_ptr
= bundle_state
;
8717 else if (bundle_state
->cost
< (*entry_ptr
)->cost
8718 || (bundle_state
->cost
== (*entry_ptr
)->cost
8719 && ((*entry_ptr
)->accumulated_insns_num
8720 > bundle_state
->accumulated_insns_num
8721 || ((*entry_ptr
)->accumulated_insns_num
8722 == bundle_state
->accumulated_insns_num
8723 && ((*entry_ptr
)->branch_deviation
8724 > bundle_state
->branch_deviation
8725 || ((*entry_ptr
)->branch_deviation
8726 == bundle_state
->branch_deviation
8727 && (*entry_ptr
)->middle_bundle_stops
8728 > bundle_state
->middle_bundle_stops
))))))
8731 struct bundle_state temp
;
8734 **entry_ptr
= *bundle_state
;
8735 (*entry_ptr
)->next
= temp
.next
;
8736 *bundle_state
= temp
;
8741 /* Start work with the hash table. */
8744 initiate_bundle_state_table (void)
8746 bundle_state_table
= new hash_table
<bundle_state_hasher
> (50);
8749 /* Finish work with the hash table. */
8752 finish_bundle_state_table (void)
8754 delete bundle_state_table
;
8755 bundle_state_table
= NULL
;
8760 /* The following variable is a insn `nop' used to check bundle states
8761 with different number of inserted nops. */
8763 static rtx_insn
*ia64_nop
;
8765 /* The following function tries to issue NOPS_NUM nops for the current
8766 state without advancing processor cycle. If it failed, the
8767 function returns FALSE and frees the current state. */
8770 try_issue_nops (struct bundle_state
*curr_state
, int nops_num
)
8774 for (i
= 0; i
< nops_num
; i
++)
8775 if (state_transition (curr_state
->dfa_state
, ia64_nop
) >= 0)
8777 free_bundle_state (curr_state
);
8783 /* The following function tries to issue INSN for the current
8784 state without advancing processor cycle. If it failed, the
8785 function returns FALSE and frees the current state. */
8788 try_issue_insn (struct bundle_state
*curr_state
, rtx insn
)
8790 if (insn
&& state_transition (curr_state
->dfa_state
, insn
) >= 0)
8792 free_bundle_state (curr_state
);
8798 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
8799 starting with ORIGINATOR without advancing processor cycle. If
8800 TRY_BUNDLE_END_P is TRUE, the function also/only (if
8801 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
8802 If it was successful, the function creates new bundle state and
8803 insert into the hash table and into `index_to_bundle_states'. */
8806 issue_nops_and_insn (struct bundle_state
*originator
, int before_nops_num
,
8807 rtx_insn
*insn
, int try_bundle_end_p
,
8808 int only_bundle_end_p
)
8810 struct bundle_state
*curr_state
;
8812 curr_state
= get_free_bundle_state ();
8813 memcpy (curr_state
->dfa_state
, originator
->dfa_state
, dfa_state_size
);
8814 curr_state
->insn
= insn
;
8815 curr_state
->insn_num
= originator
->insn_num
+ 1;
8816 curr_state
->cost
= originator
->cost
;
8817 curr_state
->originator
= originator
;
8818 curr_state
->before_nops_num
= before_nops_num
;
8819 curr_state
->after_nops_num
= 0;
8820 curr_state
->accumulated_insns_num
8821 = originator
->accumulated_insns_num
+ before_nops_num
;
8822 curr_state
->branch_deviation
= originator
->branch_deviation
;
8823 curr_state
->middle_bundle_stops
= originator
->middle_bundle_stops
;
8825 if (INSN_CODE (insn
) == CODE_FOR_insn_group_barrier
)
8827 gcc_assert (GET_MODE (insn
) != TImode
);
8828 if (!try_issue_nops (curr_state
, before_nops_num
))
8830 if (!try_issue_insn (curr_state
, insn
))
8832 memcpy (temp_dfa_state
, curr_state
->dfa_state
, dfa_state_size
);
8833 if (curr_state
->accumulated_insns_num
% 3 != 0)
8834 curr_state
->middle_bundle_stops
++;
8835 if (state_transition (temp_dfa_state
, dfa_pre_cycle_insn
) >= 0
8836 && curr_state
->accumulated_insns_num
% 3 != 0)
8838 free_bundle_state (curr_state
);
8842 else if (GET_MODE (insn
) != TImode
)
8844 if (!try_issue_nops (curr_state
, before_nops_num
))
8846 if (!try_issue_insn (curr_state
, insn
))
8848 curr_state
->accumulated_insns_num
++;
8849 gcc_assert (!unknown_for_bundling_p (insn
));
8851 if (ia64_safe_type (insn
) == TYPE_L
)
8852 curr_state
->accumulated_insns_num
++;
8856 /* If this is an insn that must be first in a group, then don't allow
8857 nops to be emitted before it. Currently, alloc is the only such
8858 supported instruction. */
8859 /* ??? The bundling automatons should handle this for us, but they do
8860 not yet have support for the first_insn attribute. */
8861 if (before_nops_num
> 0 && get_attr_first_insn (insn
) == FIRST_INSN_YES
)
8863 free_bundle_state (curr_state
);
8867 state_transition (curr_state
->dfa_state
, dfa_pre_cycle_insn
);
8868 state_transition (curr_state
->dfa_state
, NULL
);
8870 if (!try_issue_nops (curr_state
, before_nops_num
))
8872 if (!try_issue_insn (curr_state
, insn
))
8874 curr_state
->accumulated_insns_num
++;
8875 if (unknown_for_bundling_p (insn
))
8877 /* Finish bundle containing asm insn. */
8878 curr_state
->after_nops_num
8879 = 3 - curr_state
->accumulated_insns_num
% 3;
8880 curr_state
->accumulated_insns_num
8881 += 3 - curr_state
->accumulated_insns_num
% 3;
8883 else if (ia64_safe_type (insn
) == TYPE_L
)
8884 curr_state
->accumulated_insns_num
++;
8886 if (ia64_safe_type (insn
) == TYPE_B
)
8887 curr_state
->branch_deviation
8888 += 2 - (curr_state
->accumulated_insns_num
- 1) % 3;
8889 if (try_bundle_end_p
&& curr_state
->accumulated_insns_num
% 3 != 0)
8891 if (!only_bundle_end_p
&& insert_bundle_state (curr_state
))
8894 struct bundle_state
*curr_state1
;
8895 struct bundle_state
*allocated_states_chain
;
8897 curr_state1
= get_free_bundle_state ();
8898 dfa_state
= curr_state1
->dfa_state
;
8899 allocated_states_chain
= curr_state1
->allocated_states_chain
;
8900 *curr_state1
= *curr_state
;
8901 curr_state1
->dfa_state
= dfa_state
;
8902 curr_state1
->allocated_states_chain
= allocated_states_chain
;
8903 memcpy (curr_state1
->dfa_state
, curr_state
->dfa_state
,
8905 curr_state
= curr_state1
;
8907 if (!try_issue_nops (curr_state
,
8908 3 - curr_state
->accumulated_insns_num
% 3))
8910 curr_state
->after_nops_num
8911 = 3 - curr_state
->accumulated_insns_num
% 3;
8912 curr_state
->accumulated_insns_num
8913 += 3 - curr_state
->accumulated_insns_num
% 3;
8915 if (!insert_bundle_state (curr_state
))
8916 free_bundle_state (curr_state
);
8920 /* The following function returns position in the two window bundle
8924 get_max_pos (state_t state
)
8926 if (cpu_unit_reservation_p (state
, pos_6
))
8928 else if (cpu_unit_reservation_p (state
, pos_5
))
8930 else if (cpu_unit_reservation_p (state
, pos_4
))
8932 else if (cpu_unit_reservation_p (state
, pos_3
))
8934 else if (cpu_unit_reservation_p (state
, pos_2
))
8936 else if (cpu_unit_reservation_p (state
, pos_1
))
8942 /* The function returns code of a possible template for given position
8943 and state. The function should be called only with 2 values of
8944 position equal to 3 or 6. We avoid generating F NOPs by putting
8945 templates containing F insns at the end of the template search
8946 because undocumented anomaly in McKinley derived cores which can
8947 cause stalls if an F-unit insn (including a NOP) is issued within a
8948 six-cycle window after reading certain application registers (such
8949 as ar.bsp). Furthermore, power-considerations also argue against
8950 the use of F-unit instructions unless they're really needed. */
8953 get_template (state_t state
, int pos
)
8958 if (cpu_unit_reservation_p (state
, _0mmi_
))
8960 else if (cpu_unit_reservation_p (state
, _0mii_
))
8962 else if (cpu_unit_reservation_p (state
, _0mmb_
))
8964 else if (cpu_unit_reservation_p (state
, _0mib_
))
8966 else if (cpu_unit_reservation_p (state
, _0mbb_
))
8968 else if (cpu_unit_reservation_p (state
, _0bbb_
))
8970 else if (cpu_unit_reservation_p (state
, _0mmf_
))
8972 else if (cpu_unit_reservation_p (state
, _0mfi_
))
8974 else if (cpu_unit_reservation_p (state
, _0mfb_
))
8976 else if (cpu_unit_reservation_p (state
, _0mlx_
))
8981 if (cpu_unit_reservation_p (state
, _1mmi_
))
8983 else if (cpu_unit_reservation_p (state
, _1mii_
))
8985 else if (cpu_unit_reservation_p (state
, _1mmb_
))
8987 else if (cpu_unit_reservation_p (state
, _1mib_
))
8989 else if (cpu_unit_reservation_p (state
, _1mbb_
))
8991 else if (cpu_unit_reservation_p (state
, _1bbb_
))
8993 else if (_1mmf_
>= 0 && cpu_unit_reservation_p (state
, _1mmf_
))
8995 else if (cpu_unit_reservation_p (state
, _1mfi_
))
8997 else if (cpu_unit_reservation_p (state
, _1mfb_
))
8999 else if (cpu_unit_reservation_p (state
, _1mlx_
))
9008 /* True when INSN is important for bundling. */
9011 important_for_bundling_p (rtx_insn
*insn
)
9013 return (INSN_P (insn
)
9014 && ia64_safe_itanium_class (insn
) != ITANIUM_CLASS_IGNORE
9015 && GET_CODE (PATTERN (insn
)) != USE
9016 && GET_CODE (PATTERN (insn
)) != CLOBBER
);
9019 /* The following function returns an insn important for insn bundling
9020 followed by INSN and before TAIL. */
9023 get_next_important_insn (rtx_insn
*insn
, rtx_insn
*tail
)
9025 for (; insn
&& insn
!= tail
; insn
= NEXT_INSN (insn
))
9026 if (important_for_bundling_p (insn
))
9031 /* True when INSN is unknown, but important, for bundling. */
9034 unknown_for_bundling_p (rtx_insn
*insn
)
9036 return (INSN_P (insn
)
9037 && ia64_safe_itanium_class (insn
) == ITANIUM_CLASS_UNKNOWN
9038 && GET_CODE (PATTERN (insn
)) != USE
9039 && GET_CODE (PATTERN (insn
)) != CLOBBER
);
9042 /* Add a bundle selector TEMPLATE0 before INSN. */
9045 ia64_add_bundle_selector_before (int template0
, rtx_insn
*insn
)
9047 rtx b
= gen_bundle_selector (GEN_INT (template0
));
9049 ia64_emit_insn_before (b
, insn
);
9050 #if NR_BUNDLES == 10
9051 if ((template0
== 4 || template0
== 5)
9052 && ia64_except_unwind_info (&global_options
) == UI_TARGET
)
9055 rtx note
= NULL_RTX
;
9057 /* In .mbb and .bbb bundles, check if CALL_INSN isn't in the
9058 first or second slot. If it is and has REG_EH_NOTE set, copy it
9059 to following nops, as br.call sets rp to the address of following
9060 bundle and therefore an EH region end must be on a bundle
9062 insn
= PREV_INSN (insn
);
9063 for (i
= 0; i
< 3; i
++)
9066 insn
= next_active_insn (insn
);
9067 while (NONJUMP_INSN_P (insn
)
9068 && get_attr_empty (insn
) == EMPTY_YES
);
9070 note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
9075 gcc_assert ((code
= recog_memoized (insn
)) == CODE_FOR_nop
9076 || code
== CODE_FOR_nop_b
);
9077 if (find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
))
9080 add_reg_note (insn
, REG_EH_REGION
, XEXP (note
, 0));
9087 /* The following function does insn bundling. Bundling means
9088 inserting templates and nop insns to fit insn groups into permitted
9089 templates. Instruction scheduling uses NDFA (non-deterministic
9090 finite automata) encoding informations about the templates and the
9091 inserted nops. Nondeterminism of the automata permits follows
9092 all possible insn sequences very fast.
9094 Unfortunately it is not possible to get information about inserting
9095 nop insns and used templates from the automata states. The
9096 automata only says that we can issue an insn possibly inserting
9097 some nops before it and using some template. Therefore insn
9098 bundling in this function is implemented by using DFA
9099 (deterministic finite automata). We follow all possible insn
9100 sequences by inserting 0-2 nops (that is what the NDFA describe for
9101 insn scheduling) before/after each insn being bundled. We know the
9102 start of simulated processor cycle from insn scheduling (insn
9103 starting a new cycle has TImode).
9105 Simple implementation of insn bundling would create enormous
9106 number of possible insn sequences satisfying information about new
9107 cycle ticks taken from the insn scheduling. To make the algorithm
9108 practical we use dynamic programming. Each decision (about
9109 inserting nops and implicitly about previous decisions) is described
9110 by structure bundle_state (see above). If we generate the same
9111 bundle state (key is automaton state after issuing the insns and
9112 nops for it), we reuse already generated one. As consequence we
9113 reject some decisions which cannot improve the solution and
9114 reduce memory for the algorithm.
9116 When we reach the end of EBB (extended basic block), we choose the
9117 best sequence and then, moving back in EBB, insert templates for
9118 the best alternative. The templates are taken from querying
9119 automaton state for each insn in chosen bundle states.
9121 So the algorithm makes two (forward and backward) passes through
9125 bundling (FILE *dump
, int verbose
, rtx_insn
*prev_head_insn
, rtx_insn
*tail
)
9127 struct bundle_state
*curr_state
, *next_state
, *best_state
;
9128 rtx_insn
*insn
, *next_insn
;
9130 int i
, bundle_end_p
, only_bundle_end_p
, asm_p
;
9131 int pos
= 0, max_pos
, template0
, template1
;
9133 enum attr_type type
;
9136 /* Count insns in the EBB. */
9137 for (insn
= NEXT_INSN (prev_head_insn
);
9138 insn
&& insn
!= tail
;
9139 insn
= NEXT_INSN (insn
))
9145 dfa_clean_insn_cache ();
9146 initiate_bundle_state_table ();
9147 index_to_bundle_states
= XNEWVEC (struct bundle_state
*, insn_num
+ 2);
9148 /* First (forward) pass -- generation of bundle states. */
9149 curr_state
= get_free_bundle_state ();
9150 curr_state
->insn
= NULL
;
9151 curr_state
->before_nops_num
= 0;
9152 curr_state
->after_nops_num
= 0;
9153 curr_state
->insn_num
= 0;
9154 curr_state
->cost
= 0;
9155 curr_state
->accumulated_insns_num
= 0;
9156 curr_state
->branch_deviation
= 0;
9157 curr_state
->middle_bundle_stops
= 0;
9158 curr_state
->next
= NULL
;
9159 curr_state
->originator
= NULL
;
9160 state_reset (curr_state
->dfa_state
);
9161 index_to_bundle_states
[0] = curr_state
;
9163 /* Shift cycle mark if it is put on insn which could be ignored. */
9164 for (insn
= NEXT_INSN (prev_head_insn
);
9166 insn
= NEXT_INSN (insn
))
9168 && !important_for_bundling_p (insn
)
9169 && GET_MODE (insn
) == TImode
)
9171 PUT_MODE (insn
, VOIDmode
);
9172 for (next_insn
= NEXT_INSN (insn
);
9174 next_insn
= NEXT_INSN (next_insn
))
9175 if (important_for_bundling_p (next_insn
)
9176 && INSN_CODE (next_insn
) != CODE_FOR_insn_group_barrier
)
9178 PUT_MODE (next_insn
, TImode
);
9182 /* Forward pass: generation of bundle states. */
9183 for (insn
= get_next_important_insn (NEXT_INSN (prev_head_insn
), tail
);
9187 gcc_assert (important_for_bundling_p (insn
));
9188 type
= ia64_safe_type (insn
);
9189 next_insn
= get_next_important_insn (NEXT_INSN (insn
), tail
);
9191 index_to_bundle_states
[insn_num
] = NULL
;
9192 for (curr_state
= index_to_bundle_states
[insn_num
- 1];
9194 curr_state
= next_state
)
9196 pos
= curr_state
->accumulated_insns_num
% 3;
9197 next_state
= curr_state
->next
;
9198 /* We must fill up the current bundle in order to start a
9199 subsequent asm insn in a new bundle. Asm insn is always
9200 placed in a separate bundle. */
9202 = (next_insn
!= NULL_RTX
9203 && INSN_CODE (insn
) == CODE_FOR_insn_group_barrier
9204 && unknown_for_bundling_p (next_insn
));
9205 /* We may fill up the current bundle if it is the cycle end
9206 without a group barrier. */
9208 = (only_bundle_end_p
|| next_insn
== NULL_RTX
9209 || (GET_MODE (next_insn
) == TImode
9210 && INSN_CODE (insn
) != CODE_FOR_insn_group_barrier
));
9211 if (type
== TYPE_F
|| type
== TYPE_B
|| type
== TYPE_L
9213 issue_nops_and_insn (curr_state
, 2, insn
, bundle_end_p
,
9215 issue_nops_and_insn (curr_state
, 1, insn
, bundle_end_p
,
9217 issue_nops_and_insn (curr_state
, 0, insn
, bundle_end_p
,
9220 gcc_assert (index_to_bundle_states
[insn_num
]);
9221 for (curr_state
= index_to_bundle_states
[insn_num
];
9223 curr_state
= curr_state
->next
)
9224 if (verbose
>= 2 && dump
)
9226 /* This structure is taken from generated code of the
9227 pipeline hazard recognizer (see file insn-attrtab.c).
9228 Please don't forget to change the structure if a new
9229 automaton is added to .md file. */
9232 unsigned short one_automaton_state
;
9233 unsigned short oneb_automaton_state
;
9234 unsigned short two_automaton_state
;
9235 unsigned short twob_automaton_state
;
9240 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, mid.stops %d state %d) for %d\n",
9241 curr_state
->unique_num
,
9242 (curr_state
->originator
== NULL
9243 ? -1 : curr_state
->originator
->unique_num
),
9245 curr_state
->before_nops_num
, curr_state
->after_nops_num
,
9246 curr_state
->accumulated_insns_num
, curr_state
->branch_deviation
,
9247 curr_state
->middle_bundle_stops
,
9248 ((struct DFA_chip
*) curr_state
->dfa_state
)->twob_automaton_state
,
9253 /* We should find a solution because the 2nd insn scheduling has
9255 gcc_assert (index_to_bundle_states
[insn_num
]);
9256 /* Find a state corresponding to the best insn sequence. */
9258 for (curr_state
= index_to_bundle_states
[insn_num
];
9260 curr_state
= curr_state
->next
)
9261 /* We are just looking at the states with fully filled up last
9262 bundle. The first we prefer insn sequences with minimal cost
9263 then with minimal inserted nops and finally with branch insns
9264 placed in the 3rd slots. */
9265 if (curr_state
->accumulated_insns_num
% 3 == 0
9266 && (best_state
== NULL
|| best_state
->cost
> curr_state
->cost
9267 || (best_state
->cost
== curr_state
->cost
9268 && (curr_state
->accumulated_insns_num
9269 < best_state
->accumulated_insns_num
9270 || (curr_state
->accumulated_insns_num
9271 == best_state
->accumulated_insns_num
9272 && (curr_state
->branch_deviation
9273 < best_state
->branch_deviation
9274 || (curr_state
->branch_deviation
9275 == best_state
->branch_deviation
9276 && curr_state
->middle_bundle_stops
9277 < best_state
->middle_bundle_stops
)))))))
9278 best_state
= curr_state
;
9279 /* Second (backward) pass: adding nops and templates. */
9280 gcc_assert (best_state
);
9281 insn_num
= best_state
->before_nops_num
;
9282 template0
= template1
= -1;
9283 for (curr_state
= best_state
;
9284 curr_state
->originator
!= NULL
;
9285 curr_state
= curr_state
->originator
)
9287 insn
= curr_state
->insn
;
9288 asm_p
= unknown_for_bundling_p (insn
);
9290 if (verbose
>= 2 && dump
)
9294 unsigned short one_automaton_state
;
9295 unsigned short oneb_automaton_state
;
9296 unsigned short two_automaton_state
;
9297 unsigned short twob_automaton_state
;
9302 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, mid.stops %d, state %d) for %d\n",
9303 curr_state
->unique_num
,
9304 (curr_state
->originator
== NULL
9305 ? -1 : curr_state
->originator
->unique_num
),
9307 curr_state
->before_nops_num
, curr_state
->after_nops_num
,
9308 curr_state
->accumulated_insns_num
, curr_state
->branch_deviation
,
9309 curr_state
->middle_bundle_stops
,
9310 ((struct DFA_chip
*) curr_state
->dfa_state
)->twob_automaton_state
,
9313 /* Find the position in the current bundle window. The window can
9314 contain at most two bundles. Two bundle window means that
9315 the processor will make two bundle rotation. */
9316 max_pos
= get_max_pos (curr_state
->dfa_state
);
9318 /* The following (negative template number) means that the
9319 processor did one bundle rotation. */
9320 || (max_pos
== 3 && template0
< 0))
9322 /* We are at the end of the window -- find template(s) for
9326 template0
= get_template (curr_state
->dfa_state
, 3);
9329 template1
= get_template (curr_state
->dfa_state
, 3);
9330 template0
= get_template (curr_state
->dfa_state
, 6);
9333 if (max_pos
> 3 && template1
< 0)
9334 /* It may happen when we have the stop inside a bundle. */
9336 gcc_assert (pos
<= 3);
9337 template1
= get_template (curr_state
->dfa_state
, 3);
9341 /* Emit nops after the current insn. */
9342 for (i
= 0; i
< curr_state
->after_nops_num
; i
++)
9344 rtx nop_pat
= gen_nop ();
9345 rtx_insn
*nop
= emit_insn_after (nop_pat
, insn
);
9347 gcc_assert (pos
>= 0);
9350 /* We are at the start of a bundle: emit the template
9351 (it should be defined). */
9352 gcc_assert (template0
>= 0);
9353 ia64_add_bundle_selector_before (template0
, nop
);
9354 /* If we have two bundle window, we make one bundle
9355 rotation. Otherwise template0 will be undefined
9356 (negative value). */
9357 template0
= template1
;
9361 /* Move the position backward in the window. Group barrier has
9362 no slot. Asm insn takes all bundle. */
9363 if (INSN_CODE (insn
) != CODE_FOR_insn_group_barrier
9364 && !unknown_for_bundling_p (insn
))
9366 /* Long insn takes 2 slots. */
9367 if (ia64_safe_type (insn
) == TYPE_L
)
9369 gcc_assert (pos
>= 0);
9371 && INSN_CODE (insn
) != CODE_FOR_insn_group_barrier
9372 && !unknown_for_bundling_p (insn
))
9374 /* The current insn is at the bundle start: emit the
9376 gcc_assert (template0
>= 0);
9377 ia64_add_bundle_selector_before (template0
, insn
);
9378 b
= PREV_INSN (insn
);
9380 /* See comment above in analogous place for emitting nops
9382 template0
= template1
;
9385 /* Emit nops after the current insn. */
9386 for (i
= 0; i
< curr_state
->before_nops_num
; i
++)
9388 rtx nop_pat
= gen_nop ();
9389 ia64_emit_insn_before (nop_pat
, insn
);
9390 rtx_insn
*nop
= PREV_INSN (insn
);
9393 gcc_assert (pos
>= 0);
9396 /* See comment above in analogous place for emitting nops
9398 gcc_assert (template0
>= 0);
9399 ia64_add_bundle_selector_before (template0
, insn
);
9400 b
= PREV_INSN (insn
);
9402 template0
= template1
;
9410 /* Assert right calculation of middle_bundle_stops. */
9411 int num
= best_state
->middle_bundle_stops
;
9412 bool start_bundle
= true, end_bundle
= false;
9414 for (insn
= NEXT_INSN (prev_head_insn
);
9415 insn
&& insn
!= tail
;
9416 insn
= NEXT_INSN (insn
))
9420 if (recog_memoized (insn
) == CODE_FOR_bundle_selector
)
9421 start_bundle
= true;
9424 rtx_insn
*next_insn
;
9426 for (next_insn
= NEXT_INSN (insn
);
9427 next_insn
&& next_insn
!= tail
;
9428 next_insn
= NEXT_INSN (next_insn
))
9429 if (INSN_P (next_insn
)
9430 && (ia64_safe_itanium_class (next_insn
)
9431 != ITANIUM_CLASS_IGNORE
9432 || recog_memoized (next_insn
)
9433 == CODE_FOR_bundle_selector
)
9434 && GET_CODE (PATTERN (next_insn
)) != USE
9435 && GET_CODE (PATTERN (next_insn
)) != CLOBBER
)
9438 end_bundle
= next_insn
== NULL_RTX
9439 || next_insn
== tail
9440 || (INSN_P (next_insn
)
9441 && recog_memoized (next_insn
) == CODE_FOR_bundle_selector
);
9442 if (recog_memoized (insn
) == CODE_FOR_insn_group_barrier
9443 && !start_bundle
&& !end_bundle
9445 && !unknown_for_bundling_p (next_insn
))
9448 start_bundle
= false;
9452 gcc_assert (num
== 0);
9455 free (index_to_bundle_states
);
9456 finish_bundle_state_table ();
9458 dfa_clean_insn_cache ();
9461 /* The following function is called at the end of scheduling BB or
9462 EBB. After reload, it inserts stop bits and does insn bundling. */
9465 ia64_sched_finish (FILE *dump
, int sched_verbose
)
9468 fprintf (dump
, "// Finishing schedule.\n");
9469 if (!reload_completed
)
9471 if (reload_completed
)
9473 final_emit_insn_group_barriers (dump
);
9474 bundling (dump
, sched_verbose
, current_sched_info
->prev_head
,
9475 current_sched_info
->next_tail
);
9476 if (sched_verbose
&& dump
)
9477 fprintf (dump
, "// finishing %d-%d\n",
9478 INSN_UID (NEXT_INSN (current_sched_info
->prev_head
)),
9479 INSN_UID (PREV_INSN (current_sched_info
->next_tail
)));
9485 /* The following function inserts stop bits in scheduled BB or EBB. */
9488 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED
)
9491 int need_barrier_p
= 0;
9492 int seen_good_insn
= 0;
9494 init_insn_group_barriers ();
9496 for (insn
= NEXT_INSN (current_sched_info
->prev_head
);
9497 insn
!= current_sched_info
->next_tail
;
9498 insn
= NEXT_INSN (insn
))
9500 if (BARRIER_P (insn
))
9502 rtx_insn
*last
= prev_active_insn (insn
);
9506 if (JUMP_TABLE_DATA_P (last
))
9507 last
= prev_active_insn (last
);
9508 if (recog_memoized (last
) != CODE_FOR_insn_group_barrier
)
9509 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last
);
9511 init_insn_group_barriers ();
9515 else if (NONDEBUG_INSN_P (insn
))
9517 if (recog_memoized (insn
) == CODE_FOR_insn_group_barrier
)
9519 init_insn_group_barriers ();
9523 else if (need_barrier_p
|| group_barrier_needed (insn
)
9524 || (mflag_sched_stop_bits_after_every_cycle
9525 && GET_MODE (insn
) == TImode
9528 if (TARGET_EARLY_STOP_BITS
)
9533 last
!= current_sched_info
->prev_head
;
9534 last
= PREV_INSN (last
))
9535 if (INSN_P (last
) && GET_MODE (last
) == TImode
9536 && stops_p
[INSN_UID (last
)])
9538 if (last
== current_sched_info
->prev_head
)
9540 last
= prev_active_insn (last
);
9542 && recog_memoized (last
) != CODE_FOR_insn_group_barrier
)
9543 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
9545 init_insn_group_barriers ();
9546 for (last
= NEXT_INSN (last
);
9548 last
= NEXT_INSN (last
))
9551 group_barrier_needed (last
);
9552 if (recog_memoized (last
) >= 0
9553 && important_for_bundling_p (last
))
9559 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
9561 init_insn_group_barriers ();
9564 group_barrier_needed (insn
);
9565 if (recog_memoized (insn
) >= 0
9566 && important_for_bundling_p (insn
))
9569 else if (recog_memoized (insn
) >= 0
9570 && important_for_bundling_p (insn
))
9572 need_barrier_p
= (CALL_P (insn
) || unknown_for_bundling_p (insn
));
9579 /* If the following function returns TRUE, we will use the DFA
9583 ia64_first_cycle_multipass_dfa_lookahead (void)
9585 return (reload_completed
? 6 : 4);
9588 /* The following function initiates variable `dfa_pre_cycle_insn'. */
9591 ia64_init_dfa_pre_cycle_insn (void)
9593 if (temp_dfa_state
== NULL
)
9595 dfa_state_size
= state_size ();
9596 temp_dfa_state
= xmalloc (dfa_state_size
);
9597 prev_cycle_state
= xmalloc (dfa_state_size
);
9599 dfa_pre_cycle_insn
= make_insn_raw (gen_pre_cycle ());
9600 SET_PREV_INSN (dfa_pre_cycle_insn
) = SET_NEXT_INSN (dfa_pre_cycle_insn
) = NULL_RTX
;
9601 recog_memoized (dfa_pre_cycle_insn
);
9602 dfa_stop_insn
= make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
9603 SET_PREV_INSN (dfa_stop_insn
) = SET_NEXT_INSN (dfa_stop_insn
) = NULL_RTX
;
9604 recog_memoized (dfa_stop_insn
);
9607 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
9608 used by the DFA insn scheduler. */
9611 ia64_dfa_pre_cycle_insn (void)
9613 return dfa_pre_cycle_insn
;
9616 /* The following function returns TRUE if PRODUCER (of type ilog or
9617 ld) produces address for CONSUMER (of type st or stf). */
9620 ia64_st_address_bypass_p (rtx_insn
*producer
, rtx_insn
*consumer
)
9624 gcc_assert (producer
&& consumer
);
9625 dest
= ia64_single_set (producer
);
9627 reg
= SET_DEST (dest
);
9629 if (GET_CODE (reg
) == SUBREG
)
9630 reg
= SUBREG_REG (reg
);
9631 gcc_assert (GET_CODE (reg
) == REG
);
9633 dest
= ia64_single_set (consumer
);
9635 mem
= SET_DEST (dest
);
9636 gcc_assert (mem
&& GET_CODE (mem
) == MEM
);
9637 return reg_mentioned_p (reg
, mem
);
9640 /* The following function returns TRUE if PRODUCER (of type ilog or
9641 ld) produces address for CONSUMER (of type ld or fld). */
9644 ia64_ld_address_bypass_p (rtx_insn
*producer
, rtx_insn
*consumer
)
9646 rtx dest
, src
, reg
, mem
;
9648 gcc_assert (producer
&& consumer
);
9649 dest
= ia64_single_set (producer
);
9651 reg
= SET_DEST (dest
);
9653 if (GET_CODE (reg
) == SUBREG
)
9654 reg
= SUBREG_REG (reg
);
9655 gcc_assert (GET_CODE (reg
) == REG
);
9657 src
= ia64_single_set (consumer
);
9659 mem
= SET_SRC (src
);
9662 if (GET_CODE (mem
) == UNSPEC
&& XVECLEN (mem
, 0) > 0)
9663 mem
= XVECEXP (mem
, 0, 0);
9664 else if (GET_CODE (mem
) == IF_THEN_ELSE
)
9665 /* ??? Is this bypass necessary for ld.c? */
9667 gcc_assert (XINT (XEXP (XEXP (mem
, 0), 0), 1) == UNSPEC_LDCCLR
);
9668 mem
= XEXP (mem
, 1);
9671 while (GET_CODE (mem
) == SUBREG
|| GET_CODE (mem
) == ZERO_EXTEND
)
9672 mem
= XEXP (mem
, 0);
9674 if (GET_CODE (mem
) == UNSPEC
)
9676 int c
= XINT (mem
, 1);
9678 gcc_assert (c
== UNSPEC_LDA
|| c
== UNSPEC_LDS
|| c
== UNSPEC_LDS_A
9679 || c
== UNSPEC_LDSA
);
9680 mem
= XVECEXP (mem
, 0, 0);
9683 /* Note that LO_SUM is used for GOT loads. */
9684 gcc_assert (GET_CODE (mem
) == LO_SUM
|| GET_CODE (mem
) == MEM
);
9686 return reg_mentioned_p (reg
, mem
);
9689 /* The following function returns TRUE if INSN produces address for a
9690 load/store insn. We will place such insns into M slot because it
9691 decreases its latency time. */
9694 ia64_produce_address_p (rtx insn
)
9700 /* Emit pseudo-ops for the assembler to describe predicate relations.
9701 At present this assumes that we only consider predicate pairs to
9702 be mutex, and that the assembler can deduce proper values from
9703 straight-line code. */
9706 emit_predicate_relation_info (void)
9710 FOR_EACH_BB_REVERSE_FN (bb
, cfun
)
9713 rtx_insn
*head
= BB_HEAD (bb
);
9715 /* We only need such notes at code labels. */
9716 if (! LABEL_P (head
))
9718 if (NOTE_INSN_BASIC_BLOCK_P (NEXT_INSN (head
)))
9719 head
= NEXT_INSN (head
);
9721 /* Skip p0, which may be thought to be live due to (reg:DI p0)
9722 grabbing the entire block of predicate registers. */
9723 for (r
= PR_REG (2); r
< PR_REG (64); r
+= 2)
9724 if (REGNO_REG_SET_P (df_get_live_in (bb
), r
))
9726 rtx p
= gen_rtx_REG (BImode
, r
);
9727 rtx_insn
*n
= emit_insn_after (gen_pred_rel_mutex (p
), head
);
9728 if (head
== BB_END (bb
))
9734 /* Look for conditional calls that do not return, and protect predicate
9735 relations around them. Otherwise the assembler will assume the call
9736 returns, and complain about uses of call-clobbered predicates after
9738 FOR_EACH_BB_REVERSE_FN (bb
, cfun
)
9740 rtx_insn
*insn
= BB_HEAD (bb
);
9745 && GET_CODE (PATTERN (insn
)) == COND_EXEC
9746 && find_reg_note (insn
, REG_NORETURN
, NULL_RTX
))
9749 emit_insn_before (gen_safe_across_calls_all (), insn
);
9750 rtx_insn
*a
= emit_insn_after (gen_safe_across_calls_normal (), insn
);
9751 if (BB_HEAD (bb
) == insn
)
9753 if (BB_END (bb
) == insn
)
9757 if (insn
== BB_END (bb
))
9759 insn
= NEXT_INSN (insn
);
9764 /* Perform machine dependent operations on the rtl chain INSNS. */
9769 /* We are freeing block_for_insn in the toplev to keep compatibility
9770 with old MDEP_REORGS that are not CFG based. Recompute it now. */
9771 compute_bb_for_insn ();
9773 /* If optimizing, we'll have split before scheduling. */
9777 if (optimize
&& flag_schedule_insns_after_reload
9778 && dbg_cnt (ia64_sched2
))
9781 timevar_push (TV_SCHED2
);
9782 ia64_final_schedule
= 1;
9784 /* We can't let modulo-sched prevent us from scheduling any bbs,
9785 since we need the final schedule to produce bundle information. */
9786 FOR_EACH_BB_FN (bb
, cfun
)
9787 bb
->flags
&= ~BB_DISABLE_SCHEDULE
;
9789 initiate_bundle_states ();
9790 ia64_nop
= make_insn_raw (gen_nop ());
9791 SET_PREV_INSN (ia64_nop
) = SET_NEXT_INSN (ia64_nop
) = NULL_RTX
;
9792 recog_memoized (ia64_nop
);
9793 clocks_length
= get_max_uid () + 1;
9794 stops_p
= XCNEWVEC (char, clocks_length
);
9796 if (ia64_tune
== PROCESSOR_ITANIUM2
)
9798 pos_1
= get_cpu_unit_code ("2_1");
9799 pos_2
= get_cpu_unit_code ("2_2");
9800 pos_3
= get_cpu_unit_code ("2_3");
9801 pos_4
= get_cpu_unit_code ("2_4");
9802 pos_5
= get_cpu_unit_code ("2_5");
9803 pos_6
= get_cpu_unit_code ("2_6");
9804 _0mii_
= get_cpu_unit_code ("2b_0mii.");
9805 _0mmi_
= get_cpu_unit_code ("2b_0mmi.");
9806 _0mfi_
= get_cpu_unit_code ("2b_0mfi.");
9807 _0mmf_
= get_cpu_unit_code ("2b_0mmf.");
9808 _0bbb_
= get_cpu_unit_code ("2b_0bbb.");
9809 _0mbb_
= get_cpu_unit_code ("2b_0mbb.");
9810 _0mib_
= get_cpu_unit_code ("2b_0mib.");
9811 _0mmb_
= get_cpu_unit_code ("2b_0mmb.");
9812 _0mfb_
= get_cpu_unit_code ("2b_0mfb.");
9813 _0mlx_
= get_cpu_unit_code ("2b_0mlx.");
9814 _1mii_
= get_cpu_unit_code ("2b_1mii.");
9815 _1mmi_
= get_cpu_unit_code ("2b_1mmi.");
9816 _1mfi_
= get_cpu_unit_code ("2b_1mfi.");
9817 _1mmf_
= get_cpu_unit_code ("2b_1mmf.");
9818 _1bbb_
= get_cpu_unit_code ("2b_1bbb.");
9819 _1mbb_
= get_cpu_unit_code ("2b_1mbb.");
9820 _1mib_
= get_cpu_unit_code ("2b_1mib.");
9821 _1mmb_
= get_cpu_unit_code ("2b_1mmb.");
9822 _1mfb_
= get_cpu_unit_code ("2b_1mfb.");
9823 _1mlx_
= get_cpu_unit_code ("2b_1mlx.");
9827 pos_1
= get_cpu_unit_code ("1_1");
9828 pos_2
= get_cpu_unit_code ("1_2");
9829 pos_3
= get_cpu_unit_code ("1_3");
9830 pos_4
= get_cpu_unit_code ("1_4");
9831 pos_5
= get_cpu_unit_code ("1_5");
9832 pos_6
= get_cpu_unit_code ("1_6");
9833 _0mii_
= get_cpu_unit_code ("1b_0mii.");
9834 _0mmi_
= get_cpu_unit_code ("1b_0mmi.");
9835 _0mfi_
= get_cpu_unit_code ("1b_0mfi.");
9836 _0mmf_
= get_cpu_unit_code ("1b_0mmf.");
9837 _0bbb_
= get_cpu_unit_code ("1b_0bbb.");
9838 _0mbb_
= get_cpu_unit_code ("1b_0mbb.");
9839 _0mib_
= get_cpu_unit_code ("1b_0mib.");
9840 _0mmb_
= get_cpu_unit_code ("1b_0mmb.");
9841 _0mfb_
= get_cpu_unit_code ("1b_0mfb.");
9842 _0mlx_
= get_cpu_unit_code ("1b_0mlx.");
9843 _1mii_
= get_cpu_unit_code ("1b_1mii.");
9844 _1mmi_
= get_cpu_unit_code ("1b_1mmi.");
9845 _1mfi_
= get_cpu_unit_code ("1b_1mfi.");
9846 _1mmf_
= get_cpu_unit_code ("1b_1mmf.");
9847 _1bbb_
= get_cpu_unit_code ("1b_1bbb.");
9848 _1mbb_
= get_cpu_unit_code ("1b_1mbb.");
9849 _1mib_
= get_cpu_unit_code ("1b_1mib.");
9850 _1mmb_
= get_cpu_unit_code ("1b_1mmb.");
9851 _1mfb_
= get_cpu_unit_code ("1b_1mfb.");
9852 _1mlx_
= get_cpu_unit_code ("1b_1mlx.");
9855 if (flag_selective_scheduling2
9856 && !maybe_skip_selective_scheduling ())
9857 run_selective_scheduling ();
9861 /* Redo alignment computation, as it might gone wrong. */
9862 compute_alignments ();
9864 /* We cannot reuse this one because it has been corrupted by the
9866 finish_bundle_states ();
9869 emit_insn_group_barriers (dump_file
);
9871 ia64_final_schedule
= 0;
9872 timevar_pop (TV_SCHED2
);
9875 emit_all_insn_group_barriers (dump_file
);
9879 /* A call must not be the last instruction in a function, so that the
9880 return address is still within the function, so that unwinding works
9881 properly. Note that IA-64 differs from dwarf2 on this point. */
9882 if (ia64_except_unwind_info (&global_options
) == UI_TARGET
)
9887 insn
= get_last_insn ();
9888 if (! INSN_P (insn
))
9889 insn
= prev_active_insn (insn
);
9892 /* Skip over insns that expand to nothing. */
9893 while (NONJUMP_INSN_P (insn
)
9894 && get_attr_empty (insn
) == EMPTY_YES
)
9896 if (GET_CODE (PATTERN (insn
)) == UNSPEC_VOLATILE
9897 && XINT (PATTERN (insn
), 1) == UNSPECV_INSN_GROUP_BARRIER
)
9899 insn
= prev_active_insn (insn
);
9904 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
9905 emit_insn (gen_break_f ());
9906 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
9911 emit_predicate_relation_info ();
9913 if (flag_var_tracking
)
9915 timevar_push (TV_VAR_TRACKING
);
9916 variable_tracking_main ();
9917 timevar_pop (TV_VAR_TRACKING
);
9919 df_finish_pass (false);
9922 /* Return true if REGNO is used by the epilogue. */
9925 ia64_epilogue_uses (int regno
)
9930 /* With a call to a function in another module, we will write a new
9931 value to "gp". After returning from such a call, we need to make
9932 sure the function restores the original gp-value, even if the
9933 function itself does not use the gp anymore. */
9934 return !(TARGET_AUTO_PIC
|| TARGET_NO_PIC
);
9936 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
9937 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
9938 /* For functions defined with the syscall_linkage attribute, all
9939 input registers are marked as live at all function exits. This
9940 prevents the register allocator from using the input registers,
9941 which in turn makes it possible to restart a system call after
9942 an interrupt without having to save/restore the input registers.
9943 This also prevents kernel data from leaking to application code. */
9944 return lookup_attribute ("syscall_linkage",
9945 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl
))) != NULL
;
9948 /* Conditional return patterns can't represent the use of `b0' as
9949 the return address, so we force the value live this way. */
9953 /* Likewise for ar.pfs, which is used by br.ret. */
9961 /* Return true if REGNO is used by the frame unwinder. */
9964 ia64_eh_uses (int regno
)
9968 if (! reload_completed
)
9974 for (r
= reg_save_b0
; r
<= reg_save_ar_lc
; r
++)
9975 if (regno
== current_frame_info
.r
[r
]
9976 || regno
== emitted_frame_related_regs
[r
])
9982 /* Return true if this goes in small data/bss. */
9984 /* ??? We could also support own long data here. Generating movl/add/ld8
9985 instead of addl,ld8/ld8. This makes the code bigger, but should make the
9986 code faster because there is one less load. This also includes incomplete
9987 types which can't go in sdata/sbss. */
9990 ia64_in_small_data_p (const_tree exp
)
9992 if (TARGET_NO_SDATA
)
9995 /* We want to merge strings, so we never consider them small data. */
9996 if (TREE_CODE (exp
) == STRING_CST
)
9999 /* Functions are never small data. */
10000 if (TREE_CODE (exp
) == FUNCTION_DECL
)
10003 if (TREE_CODE (exp
) == VAR_DECL
&& DECL_SECTION_NAME (exp
))
10005 const char *section
= DECL_SECTION_NAME (exp
);
10007 if (strcmp (section
, ".sdata") == 0
10008 || strncmp (section
, ".sdata.", 7) == 0
10009 || strncmp (section
, ".gnu.linkonce.s.", 16) == 0
10010 || strcmp (section
, ".sbss") == 0
10011 || strncmp (section
, ".sbss.", 6) == 0
10012 || strncmp (section
, ".gnu.linkonce.sb.", 17) == 0)
10017 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (exp
));
10019 /* If this is an incomplete type with size 0, then we can't put it
10020 in sdata because it might be too big when completed. */
10021 if (size
> 0 && size
<= ia64_section_threshold
)
10028 /* Output assembly directives for prologue regions. */
10030 /* The current basic block number. */
10032 static bool last_block
;
10034 /* True if we need a copy_state command at the start of the next block. */
10036 static bool need_copy_state
;
10038 #ifndef MAX_ARTIFICIAL_LABEL_BYTES
10039 # define MAX_ARTIFICIAL_LABEL_BYTES 30
10042 /* The function emits unwind directives for the start of an epilogue. */
10045 process_epilogue (FILE *asm_out_file
, rtx insn ATTRIBUTE_UNUSED
,
10046 bool unwind
, bool frame ATTRIBUTE_UNUSED
)
10048 /* If this isn't the last block of the function, then we need to label the
10049 current state, and copy it back in at the start of the next block. */
10054 fprintf (asm_out_file
, "\t.label_state %d\n",
10055 ++cfun
->machine
->state_num
);
10056 need_copy_state
= true;
10060 fprintf (asm_out_file
, "\t.restore sp\n");
10063 /* This function processes a SET pattern for REG_CFA_ADJUST_CFA. */
10066 process_cfa_adjust_cfa (FILE *asm_out_file
, rtx pat
, rtx insn
,
10067 bool unwind
, bool frame
)
10069 rtx dest
= SET_DEST (pat
);
10070 rtx src
= SET_SRC (pat
);
10072 if (dest
== stack_pointer_rtx
)
10074 if (GET_CODE (src
) == PLUS
)
10076 rtx op0
= XEXP (src
, 0);
10077 rtx op1
= XEXP (src
, 1);
10079 gcc_assert (op0
== dest
&& GET_CODE (op1
) == CONST_INT
);
10081 if (INTVAL (op1
) < 0)
10083 gcc_assert (!frame_pointer_needed
);
10085 fprintf (asm_out_file
,
10086 "\t.fframe " HOST_WIDE_INT_PRINT_DEC
"\n",
10090 process_epilogue (asm_out_file
, insn
, unwind
, frame
);
10094 gcc_assert (src
== hard_frame_pointer_rtx
);
10095 process_epilogue (asm_out_file
, insn
, unwind
, frame
);
10098 else if (dest
== hard_frame_pointer_rtx
)
10100 gcc_assert (src
== stack_pointer_rtx
);
10101 gcc_assert (frame_pointer_needed
);
10104 fprintf (asm_out_file
, "\t.vframe r%d\n",
10105 ia64_dbx_register_number (REGNO (dest
)));
10108 gcc_unreachable ();
10111 /* This function processes a SET pattern for REG_CFA_REGISTER. */
10114 process_cfa_register (FILE *asm_out_file
, rtx pat
, bool unwind
)
10116 rtx dest
= SET_DEST (pat
);
10117 rtx src
= SET_SRC (pat
);
10118 int dest_regno
= REGNO (dest
);
10123 /* Saving return address pointer. */
10125 fprintf (asm_out_file
, "\t.save rp, r%d\n",
10126 ia64_dbx_register_number (dest_regno
));
10130 src_regno
= REGNO (src
);
10135 gcc_assert (dest_regno
== current_frame_info
.r
[reg_save_pr
]);
10137 fprintf (asm_out_file
, "\t.save pr, r%d\n",
10138 ia64_dbx_register_number (dest_regno
));
10141 case AR_UNAT_REGNUM
:
10142 gcc_assert (dest_regno
== current_frame_info
.r
[reg_save_ar_unat
]);
10144 fprintf (asm_out_file
, "\t.save ar.unat, r%d\n",
10145 ia64_dbx_register_number (dest_regno
));
10149 gcc_assert (dest_regno
== current_frame_info
.r
[reg_save_ar_lc
]);
10151 fprintf (asm_out_file
, "\t.save ar.lc, r%d\n",
10152 ia64_dbx_register_number (dest_regno
));
10156 /* Everything else should indicate being stored to memory. */
10157 gcc_unreachable ();
10161 /* This function processes a SET pattern for REG_CFA_OFFSET. */
10164 process_cfa_offset (FILE *asm_out_file
, rtx pat
, bool unwind
)
10166 rtx dest
= SET_DEST (pat
);
10167 rtx src
= SET_SRC (pat
);
10168 int src_regno
= REGNO (src
);
10169 const char *saveop
;
10173 gcc_assert (MEM_P (dest
));
10174 if (GET_CODE (XEXP (dest
, 0)) == REG
)
10176 base
= XEXP (dest
, 0);
10181 gcc_assert (GET_CODE (XEXP (dest
, 0)) == PLUS
10182 && GET_CODE (XEXP (XEXP (dest
, 0), 1)) == CONST_INT
);
10183 base
= XEXP (XEXP (dest
, 0), 0);
10184 off
= INTVAL (XEXP (XEXP (dest
, 0), 1));
10187 if (base
== hard_frame_pointer_rtx
)
10189 saveop
= ".savepsp";
10194 gcc_assert (base
== stack_pointer_rtx
);
10195 saveop
= ".savesp";
10198 src_regno
= REGNO (src
);
10202 gcc_assert (!current_frame_info
.r
[reg_save_b0
]);
10204 fprintf (asm_out_file
, "\t%s rp, " HOST_WIDE_INT_PRINT_DEC
"\n",
10209 gcc_assert (!current_frame_info
.r
[reg_save_pr
]);
10211 fprintf (asm_out_file
, "\t%s pr, " HOST_WIDE_INT_PRINT_DEC
"\n",
10216 gcc_assert (!current_frame_info
.r
[reg_save_ar_lc
]);
10218 fprintf (asm_out_file
, "\t%s ar.lc, " HOST_WIDE_INT_PRINT_DEC
"\n",
10222 case AR_PFS_REGNUM
:
10223 gcc_assert (!current_frame_info
.r
[reg_save_ar_pfs
]);
10225 fprintf (asm_out_file
, "\t%s ar.pfs, " HOST_WIDE_INT_PRINT_DEC
"\n",
10229 case AR_UNAT_REGNUM
:
10230 gcc_assert (!current_frame_info
.r
[reg_save_ar_unat
]);
10232 fprintf (asm_out_file
, "\t%s ar.unat, " HOST_WIDE_INT_PRINT_DEC
"\n",
10241 fprintf (asm_out_file
, "\t.save.g 0x%x\n",
10242 1 << (src_regno
- GR_REG (4)));
10251 fprintf (asm_out_file
, "\t.save.b 0x%x\n",
10252 1 << (src_regno
- BR_REG (1)));
10260 fprintf (asm_out_file
, "\t.save.f 0x%x\n",
10261 1 << (src_regno
- FR_REG (2)));
10264 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
10265 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
10266 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
10267 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
10269 fprintf (asm_out_file
, "\t.save.gf 0x0, 0x%x\n",
10270 1 << (src_regno
- FR_REG (12)));
10274 /* ??? For some reason we mark other general registers, even those
10275 we can't represent in the unwind info. Ignore them. */
10280 /* This function looks at a single insn and emits any directives
10281 required to unwind this insn. */
10284 ia64_asm_unwind_emit (FILE *asm_out_file
, rtx_insn
*insn
)
10286 bool unwind
= ia64_except_unwind_info (&global_options
) == UI_TARGET
;
10287 bool frame
= dwarf2out_do_frame ();
10291 if (!unwind
&& !frame
)
10294 if (NOTE_INSN_BASIC_BLOCK_P (insn
))
10296 last_block
= NOTE_BASIC_BLOCK (insn
)->next_bb
10297 == EXIT_BLOCK_PTR_FOR_FN (cfun
);
10299 /* Restore unwind state from immediately before the epilogue. */
10300 if (need_copy_state
)
10304 fprintf (asm_out_file
, "\t.body\n");
10305 fprintf (asm_out_file
, "\t.copy_state %d\n",
10306 cfun
->machine
->state_num
);
10308 need_copy_state
= false;
10312 if (NOTE_P (insn
) || ! RTX_FRAME_RELATED_P (insn
))
10315 /* Look for the ALLOC insn. */
10316 if (INSN_CODE (insn
) == CODE_FOR_alloc
)
10318 rtx dest
= SET_DEST (XVECEXP (PATTERN (insn
), 0, 0));
10319 int dest_regno
= REGNO (dest
);
10321 /* If this is the final destination for ar.pfs, then this must
10322 be the alloc in the prologue. */
10323 if (dest_regno
== current_frame_info
.r
[reg_save_ar_pfs
])
10326 fprintf (asm_out_file
, "\t.save ar.pfs, r%d\n",
10327 ia64_dbx_register_number (dest_regno
));
10331 /* This must be an alloc before a sibcall. We must drop the
10332 old frame info. The easiest way to drop the old frame
10333 info is to ensure we had a ".restore sp" directive
10334 followed by a new prologue. If the procedure doesn't
10335 have a memory-stack frame, we'll issue a dummy ".restore
10337 if (current_frame_info
.total_size
== 0 && !frame_pointer_needed
)
10338 /* if haven't done process_epilogue() yet, do it now */
10339 process_epilogue (asm_out_file
, insn
, unwind
, frame
);
10341 fprintf (asm_out_file
, "\t.prologue\n");
10346 handled_one
= false;
10347 for (note
= REG_NOTES (insn
); note
; note
= XEXP (note
, 1))
10348 switch (REG_NOTE_KIND (note
))
10350 case REG_CFA_ADJUST_CFA
:
10351 pat
= XEXP (note
, 0);
10353 pat
= PATTERN (insn
);
10354 process_cfa_adjust_cfa (asm_out_file
, pat
, insn
, unwind
, frame
);
10355 handled_one
= true;
10358 case REG_CFA_OFFSET
:
10359 pat
= XEXP (note
, 0);
10361 pat
= PATTERN (insn
);
10362 process_cfa_offset (asm_out_file
, pat
, unwind
);
10363 handled_one
= true;
10366 case REG_CFA_REGISTER
:
10367 pat
= XEXP (note
, 0);
10369 pat
= PATTERN (insn
);
10370 process_cfa_register (asm_out_file
, pat
, unwind
);
10371 handled_one
= true;
10374 case REG_FRAME_RELATED_EXPR
:
10375 case REG_CFA_DEF_CFA
:
10376 case REG_CFA_EXPRESSION
:
10377 case REG_CFA_RESTORE
:
10378 case REG_CFA_SET_VDRAP
:
10379 /* Not used in the ia64 port. */
10380 gcc_unreachable ();
10383 /* Not a frame-related note. */
10387 /* All REG_FRAME_RELATED_P insns, besides ALLOC, are marked with the
10388 explicit action to take. No guessing required. */
10389 gcc_assert (handled_one
);
10392 /* Implement TARGET_ASM_EMIT_EXCEPT_PERSONALITY. */
10395 ia64_asm_emit_except_personality (rtx personality
)
10397 fputs ("\t.personality\t", asm_out_file
);
10398 output_addr_const (asm_out_file
, personality
);
10399 fputc ('\n', asm_out_file
);
10402 /* Implement TARGET_ASM_INITIALIZE_SECTIONS. */
10405 ia64_asm_init_sections (void)
10407 exception_section
= get_unnamed_section (0, output_section_asm_op
,
10411 /* Implement TARGET_DEBUG_UNWIND_INFO. */
10413 static enum unwind_info_type
10414 ia64_debug_unwind_info (void)
10422 IA64_BUILTIN_COPYSIGNQ
,
10423 IA64_BUILTIN_FABSQ
,
10424 IA64_BUILTIN_FLUSHRS
,
10426 IA64_BUILTIN_HUGE_VALQ
,
10428 IA64_BUILTIN_NANSQ
,
10432 static GTY(()) tree ia64_builtins
[(int) IA64_BUILTIN_max
];
10435 ia64_init_builtins (void)
10441 /* The __fpreg type. */
10442 fpreg_type
= make_node (REAL_TYPE
);
10443 TYPE_PRECISION (fpreg_type
) = 82;
10444 layout_type (fpreg_type
);
10445 (*lang_hooks
.types
.register_builtin_type
) (fpreg_type
, "__fpreg");
10447 /* The __float80 type. */
10448 if (float64x_type_node
!= NULL_TREE
10449 && TYPE_MODE (float64x_type_node
) == XFmode
)
10450 float80_type
= float64x_type_node
;
10453 float80_type
= make_node (REAL_TYPE
);
10454 TYPE_PRECISION (float80_type
) = 80;
10455 layout_type (float80_type
);
10457 (*lang_hooks
.types
.register_builtin_type
) (float80_type
, "__float80");
10459 /* The __float128 type. */
10463 tree const_string_type
10464 = build_pointer_type (build_qualified_type
10465 (char_type_node
, TYPE_QUAL_CONST
));
10467 (*lang_hooks
.types
.register_builtin_type
) (float128_type_node
,
10470 /* TFmode support builtins. */
10471 ftype
= build_function_type_list (float128_type_node
, NULL_TREE
);
10472 decl
= add_builtin_function ("__builtin_infq", ftype
,
10473 IA64_BUILTIN_INFQ
, BUILT_IN_MD
,
10475 ia64_builtins
[IA64_BUILTIN_INFQ
] = decl
;
10477 decl
= add_builtin_function ("__builtin_huge_valq", ftype
,
10478 IA64_BUILTIN_HUGE_VALQ
, BUILT_IN_MD
,
10480 ia64_builtins
[IA64_BUILTIN_HUGE_VALQ
] = decl
;
10482 ftype
= build_function_type_list (float128_type_node
,
10485 decl
= add_builtin_function ("__builtin_nanq", ftype
,
10486 IA64_BUILTIN_NANQ
, BUILT_IN_MD
,
10487 "nanq", NULL_TREE
);
10488 TREE_READONLY (decl
) = 1;
10489 ia64_builtins
[IA64_BUILTIN_NANQ
] = decl
;
10491 decl
= add_builtin_function ("__builtin_nansq", ftype
,
10492 IA64_BUILTIN_NANSQ
, BUILT_IN_MD
,
10493 "nansq", NULL_TREE
);
10494 TREE_READONLY (decl
) = 1;
10495 ia64_builtins
[IA64_BUILTIN_NANSQ
] = decl
;
10497 ftype
= build_function_type_list (float128_type_node
,
10498 float128_type_node
,
10500 decl
= add_builtin_function ("__builtin_fabsq", ftype
,
10501 IA64_BUILTIN_FABSQ
, BUILT_IN_MD
,
10502 "__fabstf2", NULL_TREE
);
10503 TREE_READONLY (decl
) = 1;
10504 ia64_builtins
[IA64_BUILTIN_FABSQ
] = decl
;
10506 ftype
= build_function_type_list (float128_type_node
,
10507 float128_type_node
,
10508 float128_type_node
,
10510 decl
= add_builtin_function ("__builtin_copysignq", ftype
,
10511 IA64_BUILTIN_COPYSIGNQ
, BUILT_IN_MD
,
10512 "__copysigntf3", NULL_TREE
);
10513 TREE_READONLY (decl
) = 1;
10514 ia64_builtins
[IA64_BUILTIN_COPYSIGNQ
] = decl
;
10517 /* Under HPUX, this is a synonym for "long double". */
10518 (*lang_hooks
.types
.register_builtin_type
) (long_double_type_node
,
10521 /* Fwrite on VMS is non-standard. */
10522 #if TARGET_ABI_OPEN_VMS
10523 vms_patch_builtins ();
10526 #define def_builtin(name, type, code) \
10527 add_builtin_function ((name), (type), (code), BUILT_IN_MD, \
10530 decl
= def_builtin ("__builtin_ia64_bsp",
10531 build_function_type_list (ptr_type_node
, NULL_TREE
),
10533 ia64_builtins
[IA64_BUILTIN_BSP
] = decl
;
10535 decl
= def_builtin ("__builtin_ia64_flushrs",
10536 build_function_type_list (void_type_node
, NULL_TREE
),
10537 IA64_BUILTIN_FLUSHRS
);
10538 ia64_builtins
[IA64_BUILTIN_FLUSHRS
] = decl
;
10544 if ((decl
= builtin_decl_explicit (BUILT_IN_FINITE
)) != NULL_TREE
)
10545 set_user_assembler_name (decl
, "_Isfinite");
10546 if ((decl
= builtin_decl_explicit (BUILT_IN_FINITEF
)) != NULL_TREE
)
10547 set_user_assembler_name (decl
, "_Isfinitef");
10548 if ((decl
= builtin_decl_explicit (BUILT_IN_FINITEL
)) != NULL_TREE
)
10549 set_user_assembler_name (decl
, "_Isfinitef128");
10554 ia64_fold_builtin (tree fndecl
, int n_args ATTRIBUTE_UNUSED
,
10555 tree
*args
, bool ignore ATTRIBUTE_UNUSED
)
10557 if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
)
10559 enum ia64_builtins fn_code
= (enum ia64_builtins
)
10560 DECL_FUNCTION_CODE (fndecl
);
10563 case IA64_BUILTIN_NANQ
:
10564 case IA64_BUILTIN_NANSQ
:
10566 tree type
= TREE_TYPE (TREE_TYPE (fndecl
));
10567 const char *str
= c_getstr (*args
);
10568 int quiet
= fn_code
== IA64_BUILTIN_NANQ
;
10569 REAL_VALUE_TYPE real
;
10571 if (str
&& real_nan (&real
, str
, quiet
, TYPE_MODE (type
)))
10572 return build_real (type
, real
);
10581 #ifdef SUBTARGET_FOLD_BUILTIN
10582 return SUBTARGET_FOLD_BUILTIN (fndecl
, n_args
, args
, ignore
);
10589 ia64_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
10590 machine_mode mode ATTRIBUTE_UNUSED
,
10591 int ignore ATTRIBUTE_UNUSED
)
10593 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
10594 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
10598 case IA64_BUILTIN_BSP
:
10599 if (! target
|| ! register_operand (target
, DImode
))
10600 target
= gen_reg_rtx (DImode
);
10601 emit_insn (gen_bsp_value (target
));
10602 #ifdef POINTERS_EXTEND_UNSIGNED
10603 target
= convert_memory_address (ptr_mode
, target
);
10607 case IA64_BUILTIN_FLUSHRS
:
10608 emit_insn (gen_flushrs ());
10611 case IA64_BUILTIN_INFQ
:
10612 case IA64_BUILTIN_HUGE_VALQ
:
10614 machine_mode target_mode
= TYPE_MODE (TREE_TYPE (exp
));
10615 REAL_VALUE_TYPE inf
;
10619 tmp
= const_double_from_real_value (inf
, target_mode
);
10621 tmp
= validize_mem (force_const_mem (target_mode
, tmp
));
10624 target
= gen_reg_rtx (target_mode
);
10626 emit_move_insn (target
, tmp
);
10630 case IA64_BUILTIN_NANQ
:
10631 case IA64_BUILTIN_NANSQ
:
10632 case IA64_BUILTIN_FABSQ
:
10633 case IA64_BUILTIN_COPYSIGNQ
:
10634 return expand_call (exp
, target
, ignore
);
10637 gcc_unreachable ();
10643 /* Return the ia64 builtin for CODE. */
10646 ia64_builtin_decl (unsigned code
, bool initialize_p ATTRIBUTE_UNUSED
)
10648 if (code
>= IA64_BUILTIN_max
)
10649 return error_mark_node
;
10651 return ia64_builtins
[code
];
10654 /* Implement TARGET_FUNCTION_ARG_PADDING.
10656 For the HP-UX IA64 aggregate parameters are passed stored in the
10657 most significant bits of the stack slot. */
10659 static pad_direction
10660 ia64_function_arg_padding (machine_mode mode
, const_tree type
)
10662 /* Exception to normal case for structures/unions/etc. */
10665 && AGGREGATE_TYPE_P (type
)
10666 && int_size_in_bytes (type
) < UNITS_PER_WORD
)
10669 /* Fall back to the default. */
10670 return default_function_arg_padding (mode
, type
);
10673 /* Emit text to declare externally defined variables and functions, because
10674 the Intel assembler does not support undefined externals. */
10677 ia64_asm_output_external (FILE *file
, tree decl
, const char *name
)
10679 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
10680 set in order to avoid putting out names that are never really
10682 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl
)))
10684 /* maybe_assemble_visibility will return 1 if the assembler
10685 visibility directive is output. */
10686 int need_visibility
= ((*targetm
.binds_local_p
) (decl
)
10687 && maybe_assemble_visibility (decl
));
10689 /* GNU as does not need anything here, but the HP linker does
10690 need something for external functions. */
10691 if ((TARGET_HPUX_LD
|| !TARGET_GNU_AS
)
10692 && TREE_CODE (decl
) == FUNCTION_DECL
)
10693 (*targetm
.asm_out
.globalize_decl_name
) (file
, decl
);
10694 else if (need_visibility
&& !TARGET_GNU_AS
)
10695 (*targetm
.asm_out
.globalize_label
) (file
, name
);
10699 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
10700 modes of word_mode and larger. Rename the TFmode libfuncs using the
10701 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
10702 backward compatibility. */
10705 ia64_init_libfuncs (void)
10707 set_optab_libfunc (sdiv_optab
, SImode
, "__divsi3");
10708 set_optab_libfunc (udiv_optab
, SImode
, "__udivsi3");
10709 set_optab_libfunc (smod_optab
, SImode
, "__modsi3");
10710 set_optab_libfunc (umod_optab
, SImode
, "__umodsi3");
10712 set_optab_libfunc (add_optab
, TFmode
, "_U_Qfadd");
10713 set_optab_libfunc (sub_optab
, TFmode
, "_U_Qfsub");
10714 set_optab_libfunc (smul_optab
, TFmode
, "_U_Qfmpy");
10715 set_optab_libfunc (sdiv_optab
, TFmode
, "_U_Qfdiv");
10716 set_optab_libfunc (neg_optab
, TFmode
, "_U_Qfneg");
10718 set_conv_libfunc (sext_optab
, TFmode
, SFmode
, "_U_Qfcnvff_sgl_to_quad");
10719 set_conv_libfunc (sext_optab
, TFmode
, DFmode
, "_U_Qfcnvff_dbl_to_quad");
10720 set_conv_libfunc (sext_optab
, TFmode
, XFmode
, "_U_Qfcnvff_f80_to_quad");
10721 set_conv_libfunc (trunc_optab
, SFmode
, TFmode
, "_U_Qfcnvff_quad_to_sgl");
10722 set_conv_libfunc (trunc_optab
, DFmode
, TFmode
, "_U_Qfcnvff_quad_to_dbl");
10723 set_conv_libfunc (trunc_optab
, XFmode
, TFmode
, "_U_Qfcnvff_quad_to_f80");
10725 set_conv_libfunc (sfix_optab
, SImode
, TFmode
, "_U_Qfcnvfxt_quad_to_sgl");
10726 set_conv_libfunc (sfix_optab
, DImode
, TFmode
, "_U_Qfcnvfxt_quad_to_dbl");
10727 set_conv_libfunc (sfix_optab
, TImode
, TFmode
, "_U_Qfcnvfxt_quad_to_quad");
10728 set_conv_libfunc (ufix_optab
, SImode
, TFmode
, "_U_Qfcnvfxut_quad_to_sgl");
10729 set_conv_libfunc (ufix_optab
, DImode
, TFmode
, "_U_Qfcnvfxut_quad_to_dbl");
10731 set_conv_libfunc (sfloat_optab
, TFmode
, SImode
, "_U_Qfcnvxf_sgl_to_quad");
10732 set_conv_libfunc (sfloat_optab
, TFmode
, DImode
, "_U_Qfcnvxf_dbl_to_quad");
10733 set_conv_libfunc (sfloat_optab
, TFmode
, TImode
, "_U_Qfcnvxf_quad_to_quad");
10734 /* HP-UX 11.23 libc does not have a function for unsigned
10735 SImode-to-TFmode conversion. */
10736 set_conv_libfunc (ufloat_optab
, TFmode
, DImode
, "_U_Qfcnvxuf_dbl_to_quad");
10739 /* Rename all the TFmode libfuncs using the HPUX conventions. */
10742 ia64_hpux_init_libfuncs (void)
10744 ia64_init_libfuncs ();
10746 /* The HP SI millicode division and mod functions expect DI arguments.
10747 By turning them off completely we avoid using both libgcc and the
10748 non-standard millicode routines and use the HP DI millicode routines
10751 set_optab_libfunc (sdiv_optab
, SImode
, 0);
10752 set_optab_libfunc (udiv_optab
, SImode
, 0);
10753 set_optab_libfunc (smod_optab
, SImode
, 0);
10754 set_optab_libfunc (umod_optab
, SImode
, 0);
10756 set_optab_libfunc (sdiv_optab
, DImode
, "__milli_divI");
10757 set_optab_libfunc (udiv_optab
, DImode
, "__milli_divU");
10758 set_optab_libfunc (smod_optab
, DImode
, "__milli_remI");
10759 set_optab_libfunc (umod_optab
, DImode
, "__milli_remU");
10761 /* HP-UX libc has TF min/max/abs routines in it. */
10762 set_optab_libfunc (smin_optab
, TFmode
, "_U_Qfmin");
10763 set_optab_libfunc (smax_optab
, TFmode
, "_U_Qfmax");
10764 set_optab_libfunc (abs_optab
, TFmode
, "_U_Qfabs");
10766 /* ia64_expand_compare uses this. */
10767 cmptf_libfunc
= init_one_libfunc ("_U_Qfcmp");
10769 /* These should never be used. */
10770 set_optab_libfunc (eq_optab
, TFmode
, 0);
10771 set_optab_libfunc (ne_optab
, TFmode
, 0);
10772 set_optab_libfunc (gt_optab
, TFmode
, 0);
10773 set_optab_libfunc (ge_optab
, TFmode
, 0);
10774 set_optab_libfunc (lt_optab
, TFmode
, 0);
10775 set_optab_libfunc (le_optab
, TFmode
, 0);
10778 /* Rename the division and modulus functions in VMS. */
10781 ia64_vms_init_libfuncs (void)
10783 set_optab_libfunc (sdiv_optab
, SImode
, "OTS$DIV_I");
10784 set_optab_libfunc (sdiv_optab
, DImode
, "OTS$DIV_L");
10785 set_optab_libfunc (udiv_optab
, SImode
, "OTS$DIV_UI");
10786 set_optab_libfunc (udiv_optab
, DImode
, "OTS$DIV_UL");
10787 set_optab_libfunc (smod_optab
, SImode
, "OTS$REM_I");
10788 set_optab_libfunc (smod_optab
, DImode
, "OTS$REM_L");
10789 set_optab_libfunc (umod_optab
, SImode
, "OTS$REM_UI");
10790 set_optab_libfunc (umod_optab
, DImode
, "OTS$REM_UL");
10791 #ifdef MEM_LIBFUNCS_INIT
10796 /* Rename the TFmode libfuncs available from soft-fp in glibc using
10797 the HPUX conventions. */
10800 ia64_sysv4_init_libfuncs (void)
10802 ia64_init_libfuncs ();
10804 /* These functions are not part of the HPUX TFmode interface. We
10805 use them instead of _U_Qfcmp, which doesn't work the way we
10807 set_optab_libfunc (eq_optab
, TFmode
, "_U_Qfeq");
10808 set_optab_libfunc (ne_optab
, TFmode
, "_U_Qfne");
10809 set_optab_libfunc (gt_optab
, TFmode
, "_U_Qfgt");
10810 set_optab_libfunc (ge_optab
, TFmode
, "_U_Qfge");
10811 set_optab_libfunc (lt_optab
, TFmode
, "_U_Qflt");
10812 set_optab_libfunc (le_optab
, TFmode
, "_U_Qfle");
10814 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
10815 glibc doesn't have them. */
10821 ia64_soft_fp_init_libfuncs (void)
10826 ia64_vms_valid_pointer_mode (scalar_int_mode mode
)
10828 return (mode
== SImode
|| mode
== DImode
);
10831 /* For HPUX, it is illegal to have relocations in shared segments. */
10834 ia64_hpux_reloc_rw_mask (void)
10839 /* For others, relax this so that relocations to local data goes in
10840 read-only segments, but we still cannot allow global relocations
10841 in read-only segments. */
10844 ia64_reloc_rw_mask (void)
10846 return flag_pic
? 3 : 2;
10849 /* Return the section to use for X. The only special thing we do here
10850 is to honor small data. */
10853 ia64_select_rtx_section (machine_mode mode
, rtx x
,
10854 unsigned HOST_WIDE_INT align
)
10856 if (GET_MODE_SIZE (mode
) > 0
10857 && GET_MODE_SIZE (mode
) <= ia64_section_threshold
10858 && !TARGET_NO_SDATA
)
10859 return sdata_section
;
10861 return default_elf_select_rtx_section (mode
, x
, align
);
10864 static unsigned int
10865 ia64_section_type_flags (tree decl
, const char *name
, int reloc
)
10867 unsigned int flags
= 0;
10869 if (strcmp (name
, ".sdata") == 0
10870 || strncmp (name
, ".sdata.", 7) == 0
10871 || strncmp (name
, ".gnu.linkonce.s.", 16) == 0
10872 || strncmp (name
, ".sdata2.", 8) == 0
10873 || strncmp (name
, ".gnu.linkonce.s2.", 17) == 0
10874 || strcmp (name
, ".sbss") == 0
10875 || strncmp (name
, ".sbss.", 6) == 0
10876 || strncmp (name
, ".gnu.linkonce.sb.", 17) == 0)
10877 flags
= SECTION_SMALL
;
10879 flags
|= default_section_type_flags (decl
, name
, reloc
);
10883 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
10884 structure type and that the address of that type should be passed
10885 in out0, rather than in r8. */
10888 ia64_struct_retval_addr_is_first_parm_p (tree fntype
)
10890 tree ret_type
= TREE_TYPE (fntype
);
10892 /* The Itanium C++ ABI requires that out0, rather than r8, be used
10893 as the structure return address parameter, if the return value
10894 type has a non-trivial copy constructor or destructor. It is not
10895 clear if this same convention should be used for other
10896 programming languages. Until G++ 3.4, we incorrectly used r8 for
10897 these return values. */
10898 return (abi_version_at_least (2)
10900 && TYPE_MODE (ret_type
) == BLKmode
10901 && TREE_ADDRESSABLE (ret_type
)
10902 && lang_GNU_CXX ());
10905 /* Output the assembler code for a thunk function. THUNK_DECL is the
10906 declaration for the thunk function itself, FUNCTION is the decl for
10907 the target function. DELTA is an immediate constant offset to be
10908 added to THIS. If VCALL_OFFSET is nonzero, the word at
10909 *(*this + vcall_offset) should be added to THIS. */
10912 ia64_output_mi_thunk (FILE *file
, tree thunk ATTRIBUTE_UNUSED
,
10913 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
10916 rtx this_rtx
, funexp
;
10918 unsigned int this_parmno
;
10919 unsigned int this_regno
;
10922 reload_completed
= 1;
10923 epilogue_completed
= 1;
10925 /* Set things up as ia64_expand_prologue might. */
10926 last_scratch_gr_reg
= 15;
10928 memset (¤t_frame_info
, 0, sizeof (current_frame_info
));
10929 current_frame_info
.spill_cfa_off
= -16;
10930 current_frame_info
.n_input_regs
= 1;
10931 current_frame_info
.need_regstk
= (TARGET_REG_NAMES
!= 0);
10933 /* Mark the end of the (empty) prologue. */
10934 emit_note (NOTE_INSN_PROLOGUE_END
);
10936 /* Figure out whether "this" will be the first parameter (the
10937 typical case) or the second parameter (as happens when the
10938 virtual function returns certain class objects). */
10940 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk
))
10942 this_regno
= IN_REG (this_parmno
);
10943 if (!TARGET_REG_NAMES
)
10944 reg_names
[this_regno
] = ia64_reg_numbers
[this_parmno
];
10946 this_rtx
= gen_rtx_REG (Pmode
, this_regno
);
10948 /* Apply the constant offset, if required. */
10949 delta_rtx
= GEN_INT (delta
);
10952 rtx tmp
= gen_rtx_REG (ptr_mode
, this_regno
);
10953 REG_POINTER (tmp
) = 1;
10954 if (delta
&& satisfies_constraint_I (delta_rtx
))
10956 emit_insn (gen_ptr_extend_plus_imm (this_rtx
, tmp
, delta_rtx
));
10960 emit_insn (gen_ptr_extend (this_rtx
, tmp
));
10964 if (!satisfies_constraint_I (delta_rtx
))
10966 rtx tmp
= gen_rtx_REG (Pmode
, 2);
10967 emit_move_insn (tmp
, delta_rtx
);
10970 emit_insn (gen_adddi3 (this_rtx
, this_rtx
, delta_rtx
));
10973 /* Apply the offset from the vtable, if required. */
10976 rtx vcall_offset_rtx
= GEN_INT (vcall_offset
);
10977 rtx tmp
= gen_rtx_REG (Pmode
, 2);
10981 rtx t
= gen_rtx_REG (ptr_mode
, 2);
10982 REG_POINTER (t
) = 1;
10983 emit_move_insn (t
, gen_rtx_MEM (ptr_mode
, this_rtx
));
10984 if (satisfies_constraint_I (vcall_offset_rtx
))
10986 emit_insn (gen_ptr_extend_plus_imm (tmp
, t
, vcall_offset_rtx
));
10990 emit_insn (gen_ptr_extend (tmp
, t
));
10993 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, this_rtx
));
10997 if (!satisfies_constraint_J (vcall_offset_rtx
))
10999 rtx tmp2
= gen_rtx_REG (Pmode
, next_scratch_gr_reg ());
11000 emit_move_insn (tmp2
, vcall_offset_rtx
);
11001 vcall_offset_rtx
= tmp2
;
11003 emit_insn (gen_adddi3 (tmp
, tmp
, vcall_offset_rtx
));
11007 emit_insn (gen_zero_extendsidi2 (tmp
, gen_rtx_MEM (ptr_mode
, tmp
)));
11009 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, tmp
));
11011 emit_insn (gen_adddi3 (this_rtx
, this_rtx
, tmp
));
11014 /* Generate a tail call to the target function. */
11015 if (! TREE_USED (function
))
11017 assemble_external (function
);
11018 TREE_USED (function
) = 1;
11020 funexp
= XEXP (DECL_RTL (function
), 0);
11021 funexp
= gen_rtx_MEM (FUNCTION_MODE
, funexp
);
11022 ia64_expand_call (NULL_RTX
, funexp
, NULL_RTX
, 1);
11023 insn
= get_last_insn ();
11024 SIBLING_CALL_P (insn
) = 1;
11026 /* Code generation for calls relies on splitting. */
11027 reload_completed
= 1;
11028 epilogue_completed
= 1;
11029 try_split (PATTERN (insn
), insn
, 0);
11033 /* Run just enough of rest_of_compilation to get the insns emitted.
11034 There's not really enough bulk here to make other passes such as
11035 instruction scheduling worth while. Note that use_thunk calls
11036 assemble_start_function and assemble_end_function. */
11038 emit_all_insn_group_barriers (NULL
);
11039 insn
= get_insns ();
11040 shorten_branches (insn
);
11041 final_start_function (insn
, file
, 1);
11042 final (insn
, file
, 1);
11043 final_end_function ();
11045 reload_completed
= 0;
11046 epilogue_completed
= 0;
11049 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
11052 ia64_struct_value_rtx (tree fntype
,
11053 int incoming ATTRIBUTE_UNUSED
)
11055 if (TARGET_ABI_OPEN_VMS
||
11056 (fntype
&& ia64_struct_retval_addr_is_first_parm_p (fntype
)))
11058 return gen_rtx_REG (Pmode
, GR_REG (8));
11062 ia64_scalar_mode_supported_p (scalar_mode mode
)
11088 ia64_vector_mode_supported_p (machine_mode mode
)
11105 /* Implement the FUNCTION_PROFILER macro. */
11108 ia64_output_function_profiler (FILE *file
, int labelno
)
11110 bool indirect_call
;
11112 /* If the function needs a static chain and the static chain
11113 register is r15, we use an indirect call so as to bypass
11114 the PLT stub in case the executable is dynamically linked,
11115 because the stub clobbers r15 as per 5.3.6 of the psABI.
11116 We don't need to do that in non canonical PIC mode. */
11118 if (cfun
->static_chain_decl
&& !TARGET_NO_PIC
&& !TARGET_AUTO_PIC
)
11120 gcc_assert (STATIC_CHAIN_REGNUM
== 15);
11121 indirect_call
= true;
11124 indirect_call
= false;
11127 fputs ("\t.prologue 4, r40\n", file
);
11129 fputs ("\t.prologue\n\t.save ar.pfs, r40\n", file
);
11130 fputs ("\talloc out0 = ar.pfs, 8, 0, 4, 0\n", file
);
11132 if (NO_PROFILE_COUNTERS
)
11133 fputs ("\tmov out3 = r0\n", file
);
11137 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
11139 if (TARGET_AUTO_PIC
)
11140 fputs ("\tmovl out3 = @gprel(", file
);
11142 fputs ("\taddl out3 = @ltoff(", file
);
11143 assemble_name (file
, buf
);
11144 if (TARGET_AUTO_PIC
)
11145 fputs (")\n", file
);
11147 fputs ("), r1\n", file
);
11151 fputs ("\taddl r14 = @ltoff(@fptr(_mcount)), r1\n", file
);
11152 fputs ("\t;;\n", file
);
11154 fputs ("\t.save rp, r42\n", file
);
11155 fputs ("\tmov out2 = b0\n", file
);
11157 fputs ("\tld8 r14 = [r14]\n\t;;\n", file
);
11158 fputs ("\t.body\n", file
);
11159 fputs ("\tmov out1 = r1\n", file
);
11162 fputs ("\tld8 r16 = [r14], 8\n\t;;\n", file
);
11163 fputs ("\tmov b6 = r16\n", file
);
11164 fputs ("\tld8 r1 = [r14]\n", file
);
11165 fputs ("\tbr.call.sptk.many b0 = b6\n\t;;\n", file
);
11168 fputs ("\tbr.call.sptk.many b0 = _mcount\n\t;;\n", file
);
11171 static GTY(()) rtx mcount_func_rtx
;
11173 gen_mcount_func_rtx (void)
11175 if (!mcount_func_rtx
)
11176 mcount_func_rtx
= init_one_libfunc ("_mcount");
11177 return mcount_func_rtx
;
11181 ia64_profile_hook (int labelno
)
11185 if (NO_PROFILE_COUNTERS
)
11186 label
= const0_rtx
;
11190 const char *label_name
;
11191 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
11192 label_name
= ggc_strdup ((*targetm
.strip_name_encoding
) (buf
));
11193 label
= gen_rtx_SYMBOL_REF (Pmode
, label_name
);
11194 SYMBOL_REF_FLAGS (label
) = SYMBOL_FLAG_LOCAL
;
11196 ip
= gen_reg_rtx (Pmode
);
11197 emit_insn (gen_ip_value (ip
));
11198 emit_library_call (gen_mcount_func_rtx (), LCT_NORMAL
,
11200 gen_rtx_REG (Pmode
, BR_REG (0)), Pmode
,
11205 /* Return the mangling of TYPE if it is an extended fundamental type. */
11207 static const char *
11208 ia64_mangle_type (const_tree type
)
11210 type
= TYPE_MAIN_VARIANT (type
);
11212 if (TREE_CODE (type
) != VOID_TYPE
&& TREE_CODE (type
) != BOOLEAN_TYPE
11213 && TREE_CODE (type
) != INTEGER_TYPE
&& TREE_CODE (type
) != REAL_TYPE
)
11216 /* On HP-UX, "long double" is mangled as "e" so __float128 is
11218 if (!TARGET_HPUX
&& TYPE_MODE (type
) == TFmode
)
11220 /* On HP-UX, "e" is not available as a mangling of __float80 so use
11221 an extended mangling. Elsewhere, "e" is available since long
11222 double is 80 bits. */
11223 if (TYPE_MODE (type
) == XFmode
)
11224 return TARGET_HPUX
? "u9__float80" : "e";
11225 if (TYPE_MODE (type
) == RFmode
)
11226 return "u7__fpreg";
11230 /* Return the diagnostic message string if conversion from FROMTYPE to
11231 TOTYPE is not allowed, NULL otherwise. */
11232 static const char *
11233 ia64_invalid_conversion (const_tree fromtype
, const_tree totype
)
11235 /* Reject nontrivial conversion to or from __fpreg. */
11236 if (TYPE_MODE (fromtype
) == RFmode
11237 && TYPE_MODE (totype
) != RFmode
11238 && TYPE_MODE (totype
) != VOIDmode
)
11239 return N_("invalid conversion from %<__fpreg%>");
11240 if (TYPE_MODE (totype
) == RFmode
11241 && TYPE_MODE (fromtype
) != RFmode
)
11242 return N_("invalid conversion to %<__fpreg%>");
11246 /* Return the diagnostic message string if the unary operation OP is
11247 not permitted on TYPE, NULL otherwise. */
11248 static const char *
11249 ia64_invalid_unary_op (int op
, const_tree type
)
11251 /* Reject operations on __fpreg other than unary + or &. */
11252 if (TYPE_MODE (type
) == RFmode
11253 && op
!= CONVERT_EXPR
11254 && op
!= ADDR_EXPR
)
11255 return N_("invalid operation on %<__fpreg%>");
11259 /* Return the diagnostic message string if the binary operation OP is
11260 not permitted on TYPE1 and TYPE2, NULL otherwise. */
11261 static const char *
11262 ia64_invalid_binary_op (int op ATTRIBUTE_UNUSED
, const_tree type1
, const_tree type2
)
11264 /* Reject operations on __fpreg. */
11265 if (TYPE_MODE (type1
) == RFmode
|| TYPE_MODE (type2
) == RFmode
)
11266 return N_("invalid operation on %<__fpreg%>");
11270 /* HP-UX version_id attribute.
11271 For object foo, if the version_id is set to 1234 put out an alias
11272 of '.alias foo "foo{1234}" We can't use "foo{1234}" in anything
11273 other than an alias statement because it is an illegal symbol name. */
11276 ia64_handle_version_id_attribute (tree
*node ATTRIBUTE_UNUSED
,
11277 tree name ATTRIBUTE_UNUSED
,
11279 int flags ATTRIBUTE_UNUSED
,
11280 bool *no_add_attrs
)
11282 tree arg
= TREE_VALUE (args
);
11284 if (TREE_CODE (arg
) != STRING_CST
)
11286 error("version attribute is not a string");
11287 *no_add_attrs
= true;
11293 /* Target hook for c_mode_for_suffix. */
11295 static machine_mode
11296 ia64_c_mode_for_suffix (char suffix
)
11306 static GTY(()) rtx ia64_dconst_0_5_rtx
;
11309 ia64_dconst_0_5 (void)
11311 if (! ia64_dconst_0_5_rtx
)
11313 REAL_VALUE_TYPE rv
;
11314 real_from_string (&rv
, "0.5");
11315 ia64_dconst_0_5_rtx
= const_double_from_real_value (rv
, DFmode
);
11317 return ia64_dconst_0_5_rtx
;
11320 static GTY(()) rtx ia64_dconst_0_375_rtx
;
11323 ia64_dconst_0_375 (void)
11325 if (! ia64_dconst_0_375_rtx
)
11327 REAL_VALUE_TYPE rv
;
11328 real_from_string (&rv
, "0.375");
11329 ia64_dconst_0_375_rtx
= const_double_from_real_value (rv
, DFmode
);
11331 return ia64_dconst_0_375_rtx
;
11334 static fixed_size_mode
11335 ia64_get_reg_raw_mode (int regno
)
11337 if (FR_REGNO_P (regno
))
11339 return default_get_reg_raw_mode(regno
);
11342 /* Implement TARGET_MEMBER_TYPE_FORCES_BLK. ??? Might not be needed
11346 ia64_member_type_forces_blk (const_tree
, machine_mode mode
)
11348 return TARGET_HPUX
&& mode
== TFmode
;
11351 /* Always default to .text section until HP-UX linker is fixed. */
11353 ATTRIBUTE_UNUSED
static section
*
11354 ia64_hpux_function_section (tree decl ATTRIBUTE_UNUSED
,
11355 enum node_frequency freq ATTRIBUTE_UNUSED
,
11356 bool startup ATTRIBUTE_UNUSED
,
11357 bool exit ATTRIBUTE_UNUSED
)
11362 /* Construct (set target (vec_select op0 (parallel perm))) and
11363 return true if that's a valid instruction in the active ISA. */
11366 expand_vselect (rtx target
, rtx op0
, const unsigned char *perm
, unsigned nelt
)
11368 rtx rperm
[MAX_VECT_LEN
], x
;
11371 for (i
= 0; i
< nelt
; ++i
)
11372 rperm
[i
] = GEN_INT (perm
[i
]);
11374 x
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (nelt
, rperm
));
11375 x
= gen_rtx_VEC_SELECT (GET_MODE (target
), op0
, x
);
11376 x
= gen_rtx_SET (target
, x
);
11378 rtx_insn
*insn
= emit_insn (x
);
11379 if (recog_memoized (insn
) < 0)
11381 remove_insn (insn
);
11387 /* Similar, but generate a vec_concat from op0 and op1 as well. */
11390 expand_vselect_vconcat (rtx target
, rtx op0
, rtx op1
,
11391 const unsigned char *perm
, unsigned nelt
)
11393 machine_mode v2mode
;
11396 if (!GET_MODE_2XWIDER_MODE (GET_MODE (op0
)).exists (&v2mode
))
11398 x
= gen_rtx_VEC_CONCAT (v2mode
, op0
, op1
);
11399 return expand_vselect (target
, x
, perm
, nelt
);
11402 /* Try to expand a no-op permutation. */
11405 expand_vec_perm_identity (struct expand_vec_perm_d
*d
)
11407 unsigned i
, nelt
= d
->nelt
;
11409 for (i
= 0; i
< nelt
; ++i
)
11410 if (d
->perm
[i
] != i
)
11414 emit_move_insn (d
->target
, d
->op0
);
11419 /* Try to expand D via a shrp instruction. */
11422 expand_vec_perm_shrp (struct expand_vec_perm_d
*d
)
11424 unsigned i
, nelt
= d
->nelt
, shift
, mask
;
11427 /* ??? Don't force V2SFmode into the integer registers. */
11428 if (d
->vmode
== V2SFmode
)
11431 mask
= (d
->one_operand_p
? nelt
- 1 : 2 * nelt
- 1);
11433 shift
= d
->perm
[0];
11434 if (BYTES_BIG_ENDIAN
&& shift
> nelt
)
11437 for (i
= 1; i
< nelt
; ++i
)
11438 if (d
->perm
[i
] != ((shift
+ i
) & mask
))
11444 hi
= shift
< nelt
? d
->op1
: d
->op0
;
11445 lo
= shift
< nelt
? d
->op0
: d
->op1
;
11449 shift
*= GET_MODE_UNIT_SIZE (d
->vmode
) * BITS_PER_UNIT
;
11451 /* We've eliminated the shift 0 case via expand_vec_perm_identity. */
11452 gcc_assert (IN_RANGE (shift
, 1, 63));
11454 /* Recall that big-endian elements are numbered starting at the top of
11455 the register. Ideally we'd have a shift-left-pair. But since we
11456 don't, convert to a shift the other direction. */
11457 if (BYTES_BIG_ENDIAN
)
11458 shift
= 64 - shift
;
11460 tmp
= gen_reg_rtx (DImode
);
11461 hi
= gen_lowpart (DImode
, hi
);
11462 lo
= gen_lowpart (DImode
, lo
);
11463 emit_insn (gen_shrp (tmp
, hi
, lo
, GEN_INT (shift
)));
11465 emit_move_insn (d
->target
, gen_lowpart (d
->vmode
, tmp
));
11469 /* Try to instantiate D in a single instruction. */
11472 expand_vec_perm_1 (struct expand_vec_perm_d
*d
)
11474 unsigned i
, nelt
= d
->nelt
;
11475 unsigned char perm2
[MAX_VECT_LEN
];
11477 /* Try single-operand selections. */
11478 if (d
->one_operand_p
)
11480 if (expand_vec_perm_identity (d
))
11482 if (expand_vselect (d
->target
, d
->op0
, d
->perm
, nelt
))
11486 /* Try two operand selections. */
11487 if (expand_vselect_vconcat (d
->target
, d
->op0
, d
->op1
, d
->perm
, nelt
))
11490 /* Recognize interleave style patterns with reversed operands. */
11491 if (!d
->one_operand_p
)
11493 for (i
= 0; i
< nelt
; ++i
)
11495 unsigned e
= d
->perm
[i
];
11503 if (expand_vselect_vconcat (d
->target
, d
->op1
, d
->op0
, perm2
, nelt
))
11507 if (expand_vec_perm_shrp (d
))
11510 /* ??? Look for deposit-like permutations where most of the result
11511 comes from one vector unchanged and the rest comes from a
11512 sequential hunk of the other vector. */
11517 /* Pattern match broadcast permutations. */
11520 expand_vec_perm_broadcast (struct expand_vec_perm_d
*d
)
11522 unsigned i
, elt
, nelt
= d
->nelt
;
11523 unsigned char perm2
[2];
11527 if (!d
->one_operand_p
)
11531 for (i
= 1; i
< nelt
; ++i
)
11532 if (d
->perm
[i
] != elt
)
11539 /* Implementable by interleave. */
11541 perm2
[1] = elt
+ 2;
11542 ok
= expand_vselect_vconcat (d
->target
, d
->op0
, d
->op0
, perm2
, 2);
11547 /* Implementable by extract + broadcast. */
11548 if (BYTES_BIG_ENDIAN
)
11550 elt
*= BITS_PER_UNIT
;
11551 temp
= gen_reg_rtx (DImode
);
11552 emit_insn (gen_extzv (temp
, gen_lowpart (DImode
, d
->op0
),
11553 GEN_INT (8), GEN_INT (elt
)));
11554 emit_insn (gen_mux1_brcst_qi (d
->target
, gen_lowpart (QImode
, temp
)));
11558 /* Should have been matched directly by vec_select. */
11560 gcc_unreachable ();
11566 /* A subroutine of ia64_expand_vec_perm_const_1. Try to simplify a
11567 two vector permutation into a single vector permutation by using
11568 an interleave operation to merge the vectors. */
11571 expand_vec_perm_interleave_2 (struct expand_vec_perm_d
*d
)
11573 struct expand_vec_perm_d dremap
, dfinal
;
11574 unsigned char remap
[2 * MAX_VECT_LEN
];
11575 unsigned contents
, i
, nelt
, nelt2
;
11576 unsigned h0
, h1
, h2
, h3
;
11580 if (d
->one_operand_p
)
11586 /* Examine from whence the elements come. */
11588 for (i
= 0; i
< nelt
; ++i
)
11589 contents
|= 1u << d
->perm
[i
];
11591 memset (remap
, 0xff, sizeof (remap
));
11594 h0
= (1u << nelt2
) - 1;
11597 h3
= h0
<< (nelt
+ nelt2
);
11599 if ((contents
& (h0
| h2
)) == contents
) /* punpck even halves */
11601 for (i
= 0; i
< nelt
; ++i
)
11603 unsigned which
= i
/ 2 + (i
& 1 ? nelt
: 0);
11605 dremap
.perm
[i
] = which
;
11608 else if ((contents
& (h1
| h3
)) == contents
) /* punpck odd halves */
11610 for (i
= 0; i
< nelt
; ++i
)
11612 unsigned which
= i
/ 2 + nelt2
+ (i
& 1 ? nelt
: 0);
11614 dremap
.perm
[i
] = which
;
11617 else if ((contents
& 0x5555) == contents
) /* mix even elements */
11619 for (i
= 0; i
< nelt
; ++i
)
11621 unsigned which
= (i
& ~1) + (i
& 1 ? nelt
: 0);
11623 dremap
.perm
[i
] = which
;
11626 else if ((contents
& 0xaaaa) == contents
) /* mix odd elements */
11628 for (i
= 0; i
< nelt
; ++i
)
11630 unsigned which
= (i
| 1) + (i
& 1 ? nelt
: 0);
11632 dremap
.perm
[i
] = which
;
11635 else if (floor_log2 (contents
) - ctz_hwi (contents
) < (int)nelt
) /* shrp */
11637 unsigned shift
= ctz_hwi (contents
);
11638 for (i
= 0; i
< nelt
; ++i
)
11640 unsigned which
= (i
+ shift
) & (2 * nelt
- 1);
11642 dremap
.perm
[i
] = which
;
11648 /* Use the remapping array set up above to move the elements from their
11649 swizzled locations into their final destinations. */
11651 for (i
= 0; i
< nelt
; ++i
)
11653 unsigned e
= remap
[d
->perm
[i
]];
11654 gcc_assert (e
< nelt
);
11655 dfinal
.perm
[i
] = e
;
11658 dfinal
.op0
= gen_raw_REG (dfinal
.vmode
, LAST_VIRTUAL_REGISTER
+ 1);
11660 dfinal
.op0
= gen_reg_rtx (dfinal
.vmode
);
11661 dfinal
.op1
= dfinal
.op0
;
11662 dfinal
.one_operand_p
= true;
11663 dremap
.target
= dfinal
.op0
;
11665 /* Test if the final remap can be done with a single insn. For V4HImode
11666 this *will* succeed. For V8QImode or V2SImode it may not. */
11668 ok
= expand_vec_perm_1 (&dfinal
);
11669 seq
= get_insns ();
11676 ok
= expand_vec_perm_1 (&dremap
);
11683 /* A subroutine of ia64_expand_vec_perm_const_1. Emit a full V4HImode
11684 constant permutation via two mux2 and a merge. */
11687 expand_vec_perm_v4hi_5 (struct expand_vec_perm_d
*d
)
11689 unsigned char perm2
[4];
11692 rtx t0
, t1
, mask
, x
;
11695 if (d
->vmode
!= V4HImode
|| d
->one_operand_p
)
11700 for (i
= 0; i
< 4; ++i
)
11702 perm2
[i
] = d
->perm
[i
] & 3;
11703 rmask
[i
] = (d
->perm
[i
] & 4 ? const0_rtx
: constm1_rtx
);
11705 mask
= gen_rtx_CONST_VECTOR (V4HImode
, gen_rtvec_v (4, rmask
));
11706 mask
= force_reg (V4HImode
, mask
);
11708 t0
= gen_reg_rtx (V4HImode
);
11709 t1
= gen_reg_rtx (V4HImode
);
11711 ok
= expand_vselect (t0
, d
->op0
, perm2
, 4);
11713 ok
= expand_vselect (t1
, d
->op1
, perm2
, 4);
11716 x
= gen_rtx_AND (V4HImode
, mask
, t0
);
11717 emit_insn (gen_rtx_SET (t0
, x
));
11719 x
= gen_rtx_NOT (V4HImode
, mask
);
11720 x
= gen_rtx_AND (V4HImode
, x
, t1
);
11721 emit_insn (gen_rtx_SET (t1
, x
));
11723 x
= gen_rtx_IOR (V4HImode
, t0
, t1
);
11724 emit_insn (gen_rtx_SET (d
->target
, x
));
11729 /* The guts of ia64_expand_vec_perm_const, also used by the ok hook.
11730 With all of the interface bits taken care of, perform the expansion
11731 in D and return true on success. */
11734 ia64_expand_vec_perm_const_1 (struct expand_vec_perm_d
*d
)
11736 if (expand_vec_perm_1 (d
))
11738 if (expand_vec_perm_broadcast (d
))
11740 if (expand_vec_perm_interleave_2 (d
))
11742 if (expand_vec_perm_v4hi_5 (d
))
11747 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */
11750 ia64_vectorize_vec_perm_const (machine_mode vmode
, rtx target
, rtx op0
,
11751 rtx op1
, const vec_perm_indices
&sel
)
11753 struct expand_vec_perm_d d
;
11754 unsigned char perm
[MAX_VECT_LEN
];
11755 unsigned int i
, nelt
, which
;
11762 gcc_assert (VECTOR_MODE_P (d
.vmode
));
11763 d
.nelt
= nelt
= GET_MODE_NUNITS (d
.vmode
);
11764 d
.testing_p
= !target
;
11766 gcc_assert (sel
.length () == nelt
);
11767 gcc_checking_assert (sizeof (d
.perm
) == sizeof (perm
));
11769 for (i
= which
= 0; i
< nelt
; ++i
)
11771 unsigned int ei
= sel
[i
] & (2 * nelt
- 1);
11773 which
|= (ei
< nelt
? 1 : 2);
11784 if (d
.testing_p
|| !rtx_equal_p (d
.op0
, d
.op1
))
11786 d
.one_operand_p
= false;
11790 /* The elements of PERM do not suggest that only the first operand
11791 is used, but both operands are identical. Allow easier matching
11792 of the permutation by folding the permutation into the single
11794 for (i
= 0; i
< nelt
; ++i
)
11795 if (d
.perm
[i
] >= nelt
)
11801 d
.one_operand_p
= true;
11805 for (i
= 0; i
< nelt
; ++i
)
11808 d
.one_operand_p
= true;
11814 /* We have to go through the motions and see if we can
11815 figure out how to generate the requested permutation. */
11816 d
.target
= gen_raw_REG (d
.vmode
, LAST_VIRTUAL_REGISTER
+ 1);
11817 d
.op1
= d
.op0
= gen_raw_REG (d
.vmode
, LAST_VIRTUAL_REGISTER
+ 2);
11818 if (!d
.one_operand_p
)
11819 d
.op1
= gen_raw_REG (d
.vmode
, LAST_VIRTUAL_REGISTER
+ 3);
11822 bool ret
= ia64_expand_vec_perm_const_1 (&d
);
11828 if (ia64_expand_vec_perm_const_1 (&d
))
11831 /* If the mask says both arguments are needed, but they are the same,
11832 the above tried to expand with one_operand_p true. If that didn't
11833 work, retry with one_operand_p false, as that's what we used in _ok. */
11834 if (which
== 3 && d
.one_operand_p
)
11836 memcpy (d
.perm
, perm
, sizeof (perm
));
11837 d
.one_operand_p
= false;
11838 return ia64_expand_vec_perm_const_1 (&d
);
11845 ia64_expand_vec_setv2sf (rtx operands
[3])
11847 struct expand_vec_perm_d d
;
11848 unsigned int which
;
11851 d
.target
= operands
[0];
11852 d
.op0
= operands
[0];
11853 d
.op1
= gen_reg_rtx (V2SFmode
);
11854 d
.vmode
= V2SFmode
;
11856 d
.one_operand_p
= false;
11857 d
.testing_p
= false;
11859 which
= INTVAL (operands
[2]);
11860 gcc_assert (which
<= 1);
11861 d
.perm
[0] = 1 - which
;
11862 d
.perm
[1] = which
+ 2;
11864 emit_insn (gen_fpack (d
.op1
, operands
[1], CONST0_RTX (SFmode
)));
11866 ok
= ia64_expand_vec_perm_const_1 (&d
);
11871 ia64_expand_vec_perm_even_odd (rtx target
, rtx op0
, rtx op1
, int odd
)
11873 struct expand_vec_perm_d d
;
11874 machine_mode vmode
= GET_MODE (target
);
11875 unsigned int i
, nelt
= GET_MODE_NUNITS (vmode
);
11883 d
.one_operand_p
= false;
11884 d
.testing_p
= false;
11886 for (i
= 0; i
< nelt
; ++i
)
11887 d
.perm
[i
] = i
* 2 + odd
;
11889 ok
= ia64_expand_vec_perm_const_1 (&d
);
11893 /* Implement TARGET_CAN_CHANGE_MODE_CLASS.
11895 In BR regs, we can't change the DImode at all.
11896 In FP regs, we can't change FP values to integer values and vice versa,
11897 but we can change e.g. DImode to SImode, and V2SFmode into DImode. */
11900 ia64_can_change_mode_class (machine_mode from
, machine_mode to
,
11901 reg_class_t rclass
)
11903 if (reg_classes_intersect_p (rclass
, BR_REGS
))
11905 if (SCALAR_FLOAT_MODE_P (from
) != SCALAR_FLOAT_MODE_P (to
))
11906 return !reg_classes_intersect_p (rclass
, FR_REGS
);
11910 #include "gt-ia64.h"