* Merge with edge-vector-mergepoint-20040918.
[official-gcc.git] / gcc / config / ia64 / ia64.c
blob13cd73044871d81bade7b204d161614abdbb26aa
1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004
3 Free Software Foundation, Inc.
4 Contributed by James E. Wilson <wilson@cygnus.com> and
5 David Mosberger <davidm@hpl.hp.com>.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
12 any later version.
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to
21 the Free Software Foundation, 59 Temple Place - Suite 330,
22 Boston, MA 02111-1307, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "recog.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "except.h"
42 #include "function.h"
43 #include "ggc.h"
44 #include "basic-block.h"
45 #include "toplev.h"
46 #include "sched-int.h"
47 #include "timevar.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "tm_p.h"
51 #include "hashtab.h"
52 #include "langhooks.h"
53 #include "cfglayout.h"
54 #include "tree-gimple.h"
56 /* This is used for communication between ASM_OUTPUT_LABEL and
57 ASM_OUTPUT_LABELREF. */
58 int ia64_asm_output_label = 0;
60 /* Define the information needed to generate branch and scc insns. This is
61 stored from the compare operation. */
62 struct rtx_def * ia64_compare_op0;
63 struct rtx_def * ia64_compare_op1;
65 /* Register names for ia64_expand_prologue. */
66 static const char * const ia64_reg_numbers[96] =
67 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
68 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
69 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
70 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
71 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
72 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
73 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
74 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
75 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
76 "r104","r105","r106","r107","r108","r109","r110","r111",
77 "r112","r113","r114","r115","r116","r117","r118","r119",
78 "r120","r121","r122","r123","r124","r125","r126","r127"};
80 /* ??? These strings could be shared with REGISTER_NAMES. */
81 static const char * const ia64_input_reg_names[8] =
82 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
84 /* ??? These strings could be shared with REGISTER_NAMES. */
85 static const char * const ia64_local_reg_names[80] =
86 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
87 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
88 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
89 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
90 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
91 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
92 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
93 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
94 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
95 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
97 /* ??? These strings could be shared with REGISTER_NAMES. */
98 static const char * const ia64_output_reg_names[8] =
99 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
101 /* String used with the -mfixed-range= option. */
102 const char *ia64_fixed_range_string;
104 /* Determines whether we use adds, addl, or movl to generate our
105 TLS immediate offsets. */
106 int ia64_tls_size = 22;
108 /* String used with the -mtls-size= option. */
109 const char *ia64_tls_size_string;
111 /* Which cpu are we scheduling for. */
112 enum processor_type ia64_tune;
114 /* String used with the -tune= option. */
115 const char *ia64_tune_string;
117 /* Determines whether we run our final scheduling pass or not. We always
118 avoid the normal second scheduling pass. */
119 static int ia64_flag_schedule_insns2;
121 /* Determines whether we run variable tracking in machine dependent
122 reorganization. */
123 static int ia64_flag_var_tracking;
125 /* Variables which are this size or smaller are put in the sdata/sbss
126 sections. */
128 unsigned int ia64_section_threshold;
130 /* The following variable is used by the DFA insn scheduler. The value is
131 TRUE if we do insn bundling instead of insn scheduling. */
132 int bundling_p = 0;
134 /* Structure to be filled in by ia64_compute_frame_size with register
135 save masks and offsets for the current function. */
137 struct ia64_frame_info
139 HOST_WIDE_INT total_size; /* size of the stack frame, not including
140 the caller's scratch area. */
141 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
142 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
143 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
144 HARD_REG_SET mask; /* mask of saved registers. */
145 unsigned int gr_used_mask; /* mask of registers in use as gr spill
146 registers or long-term scratches. */
147 int n_spilled; /* number of spilled registers. */
148 int reg_fp; /* register for fp. */
149 int reg_save_b0; /* save register for b0. */
150 int reg_save_pr; /* save register for prs. */
151 int reg_save_ar_pfs; /* save register for ar.pfs. */
152 int reg_save_ar_unat; /* save register for ar.unat. */
153 int reg_save_ar_lc; /* save register for ar.lc. */
154 int reg_save_gp; /* save register for gp. */
155 int n_input_regs; /* number of input registers used. */
156 int n_local_regs; /* number of local registers used. */
157 int n_output_regs; /* number of output registers used. */
158 int n_rotate_regs; /* number of rotating registers used. */
160 char need_regstk; /* true if a .regstk directive needed. */
161 char initialized; /* true if the data is finalized. */
164 /* Current frame information calculated by ia64_compute_frame_size. */
165 static struct ia64_frame_info current_frame_info;
167 static int ia64_first_cycle_multipass_dfa_lookahead (void);
168 static void ia64_dependencies_evaluation_hook (rtx, rtx);
169 static void ia64_init_dfa_pre_cycle_insn (void);
170 static rtx ia64_dfa_pre_cycle_insn (void);
171 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
172 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
173 static rtx gen_tls_get_addr (void);
174 static rtx gen_thread_pointer (void);
175 static rtx ia64_expand_tls_address (enum tls_model, rtx, rtx);
176 static int find_gr_spill (int);
177 static int next_scratch_gr_reg (void);
178 static void mark_reg_gr_used_mask (rtx, void *);
179 static void ia64_compute_frame_size (HOST_WIDE_INT);
180 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
181 static void finish_spill_pointers (void);
182 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
183 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
184 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
185 static rtx gen_movdi_x (rtx, rtx, rtx);
186 static rtx gen_fr_spill_x (rtx, rtx, rtx);
187 static rtx gen_fr_restore_x (rtx, rtx, rtx);
189 static enum machine_mode hfa_element_mode (tree, int);
190 static void ia64_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
191 tree, int *, int);
192 static bool ia64_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
193 tree, bool);
194 static bool ia64_function_ok_for_sibcall (tree, tree);
195 static bool ia64_return_in_memory (tree, tree);
196 static bool ia64_rtx_costs (rtx, int, int, int *);
197 static void fix_range (const char *);
198 static struct machine_function * ia64_init_machine_status (void);
199 static void emit_insn_group_barriers (FILE *);
200 static void emit_all_insn_group_barriers (FILE *);
201 static void final_emit_insn_group_barriers (FILE *);
202 static void emit_predicate_relation_info (void);
203 static void ia64_reorg (void);
204 static bool ia64_in_small_data_p (tree);
205 static void process_epilogue (void);
206 static int process_set (FILE *, rtx);
208 static rtx ia64_expand_fetch_and_op (optab, enum machine_mode, tree, rtx);
209 static rtx ia64_expand_op_and_fetch (optab, enum machine_mode, tree, rtx);
210 static rtx ia64_expand_compare_and_swap (enum machine_mode, enum machine_mode,
211 int, tree, rtx);
212 static rtx ia64_expand_lock_test_and_set (enum machine_mode, tree, rtx);
213 static rtx ia64_expand_lock_release (enum machine_mode, tree, rtx);
214 static bool ia64_assemble_integer (rtx, unsigned int, int);
215 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
216 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
217 static void ia64_output_function_end_prologue (FILE *);
219 static int ia64_issue_rate (void);
220 static int ia64_adjust_cost (rtx, rtx, rtx, int);
221 static void ia64_sched_init (FILE *, int, int);
222 static void ia64_sched_finish (FILE *, int);
223 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
224 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
225 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
226 static int ia64_variable_issue (FILE *, int, rtx, int);
228 static struct bundle_state *get_free_bundle_state (void);
229 static void free_bundle_state (struct bundle_state *);
230 static void initiate_bundle_states (void);
231 static void finish_bundle_states (void);
232 static unsigned bundle_state_hash (const void *);
233 static int bundle_state_eq_p (const void *, const void *);
234 static int insert_bundle_state (struct bundle_state *);
235 static void initiate_bundle_state_table (void);
236 static void finish_bundle_state_table (void);
237 static int try_issue_nops (struct bundle_state *, int);
238 static int try_issue_insn (struct bundle_state *, rtx);
239 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
240 static int get_max_pos (state_t);
241 static int get_template (state_t, int);
243 static rtx get_next_important_insn (rtx, rtx);
244 static void bundling (FILE *, int, rtx, rtx);
246 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
247 HOST_WIDE_INT, tree);
248 static void ia64_file_start (void);
250 static void ia64_select_rtx_section (enum machine_mode, rtx,
251 unsigned HOST_WIDE_INT);
252 static void ia64_rwreloc_select_section (tree, int, unsigned HOST_WIDE_INT)
253 ATTRIBUTE_UNUSED;
254 static void ia64_rwreloc_unique_section (tree, int)
255 ATTRIBUTE_UNUSED;
256 static void ia64_rwreloc_select_rtx_section (enum machine_mode, rtx,
257 unsigned HOST_WIDE_INT)
258 ATTRIBUTE_UNUSED;
259 static unsigned int ia64_rwreloc_section_type_flags (tree, const char *, int)
260 ATTRIBUTE_UNUSED;
262 static void ia64_hpux_add_extern_decl (tree decl)
263 ATTRIBUTE_UNUSED;
264 static void ia64_hpux_file_end (void)
265 ATTRIBUTE_UNUSED;
266 static void ia64_init_libfuncs (void)
267 ATTRIBUTE_UNUSED;
268 static void ia64_hpux_init_libfuncs (void)
269 ATTRIBUTE_UNUSED;
270 static void ia64_sysv4_init_libfuncs (void)
271 ATTRIBUTE_UNUSED;
272 static void ia64_vms_init_libfuncs (void)
273 ATTRIBUTE_UNUSED;
275 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
276 static void ia64_encode_section_info (tree, rtx, int);
277 static rtx ia64_struct_value_rtx (tree, int);
278 static tree ia64_gimplify_va_arg (tree, tree, tree *, tree *);
279 static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
282 /* Table of valid machine attributes. */
283 static const struct attribute_spec ia64_attribute_table[] =
285 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
286 { "syscall_linkage", 0, 0, false, true, true, NULL },
287 { "model", 1, 1, true, false, false, ia64_handle_model_attribute },
288 { NULL, 0, 0, false, false, false, NULL }
291 /* Initialize the GCC target structure. */
292 #undef TARGET_ATTRIBUTE_TABLE
293 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
295 #undef TARGET_INIT_BUILTINS
296 #define TARGET_INIT_BUILTINS ia64_init_builtins
298 #undef TARGET_EXPAND_BUILTIN
299 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
301 #undef TARGET_ASM_BYTE_OP
302 #define TARGET_ASM_BYTE_OP "\tdata1\t"
303 #undef TARGET_ASM_ALIGNED_HI_OP
304 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
305 #undef TARGET_ASM_ALIGNED_SI_OP
306 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
307 #undef TARGET_ASM_ALIGNED_DI_OP
308 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
309 #undef TARGET_ASM_UNALIGNED_HI_OP
310 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
311 #undef TARGET_ASM_UNALIGNED_SI_OP
312 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
313 #undef TARGET_ASM_UNALIGNED_DI_OP
314 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
315 #undef TARGET_ASM_INTEGER
316 #define TARGET_ASM_INTEGER ia64_assemble_integer
318 #undef TARGET_ASM_FUNCTION_PROLOGUE
319 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
320 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
321 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
322 #undef TARGET_ASM_FUNCTION_EPILOGUE
323 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
325 #undef TARGET_IN_SMALL_DATA_P
326 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
328 #undef TARGET_SCHED_ADJUST_COST
329 #define TARGET_SCHED_ADJUST_COST ia64_adjust_cost
330 #undef TARGET_SCHED_ISSUE_RATE
331 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
332 #undef TARGET_SCHED_VARIABLE_ISSUE
333 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
334 #undef TARGET_SCHED_INIT
335 #define TARGET_SCHED_INIT ia64_sched_init
336 #undef TARGET_SCHED_FINISH
337 #define TARGET_SCHED_FINISH ia64_sched_finish
338 #undef TARGET_SCHED_REORDER
339 #define TARGET_SCHED_REORDER ia64_sched_reorder
340 #undef TARGET_SCHED_REORDER2
341 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
343 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
344 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
346 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
347 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
349 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
350 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
351 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
352 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
354 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
355 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
356 ia64_first_cycle_multipass_dfa_lookahead_guard
358 #undef TARGET_SCHED_DFA_NEW_CYCLE
359 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
361 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
362 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
363 #undef TARGET_PASS_BY_REFERENCE
364 #define TARGET_PASS_BY_REFERENCE ia64_pass_by_reference
366 #undef TARGET_ASM_OUTPUT_MI_THUNK
367 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
368 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
369 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
371 #undef TARGET_ASM_FILE_START
372 #define TARGET_ASM_FILE_START ia64_file_start
374 #undef TARGET_RTX_COSTS
375 #define TARGET_RTX_COSTS ia64_rtx_costs
376 #undef TARGET_ADDRESS_COST
377 #define TARGET_ADDRESS_COST hook_int_rtx_0
379 #undef TARGET_MACHINE_DEPENDENT_REORG
380 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
382 #undef TARGET_ENCODE_SECTION_INFO
383 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
385 /* ??? ABI doesn't allow us to define this. */
386 #if 0
387 #undef TARGET_PROMOTE_FUNCTION_ARGS
388 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
389 #endif
391 /* ??? ABI doesn't allow us to define this. */
392 #if 0
393 #undef TARGET_PROMOTE_FUNCTION_RETURN
394 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
395 #endif
397 /* ??? Investigate. */
398 #if 0
399 #undef TARGET_PROMOTE_PROTOTYPES
400 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
401 #endif
403 #undef TARGET_STRUCT_VALUE_RTX
404 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
405 #undef TARGET_RETURN_IN_MEMORY
406 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
407 #undef TARGET_SETUP_INCOMING_VARARGS
408 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
409 #undef TARGET_STRICT_ARGUMENT_NAMING
410 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
411 #undef TARGET_MUST_PASS_IN_STACK
412 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
414 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
415 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
417 #undef TARGET_UNWIND_EMIT
418 #define TARGET_UNWIND_EMIT process_for_unwind_directive
420 #undef TARGET_SCALAR_MODE_SUPPORTED_P
421 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
423 struct gcc_target targetm = TARGET_INITIALIZER;
425 typedef enum
427 ADDR_AREA_NORMAL, /* normal address area */
428 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
430 ia64_addr_area;
432 static GTY(()) tree small_ident1;
433 static GTY(()) tree small_ident2;
435 static void
436 init_idents (void)
438 if (small_ident1 == 0)
440 small_ident1 = get_identifier ("small");
441 small_ident2 = get_identifier ("__small__");
445 /* Retrieve the address area that has been chosen for the given decl. */
447 static ia64_addr_area
448 ia64_get_addr_area (tree decl)
450 tree model_attr;
452 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
453 if (model_attr)
455 tree id;
457 init_idents ();
458 id = TREE_VALUE (TREE_VALUE (model_attr));
459 if (id == small_ident1 || id == small_ident2)
460 return ADDR_AREA_SMALL;
462 return ADDR_AREA_NORMAL;
465 static tree
466 ia64_handle_model_attribute (tree *node, tree name, tree args, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
468 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
469 ia64_addr_area area;
470 tree arg, decl = *node;
472 init_idents ();
473 arg = TREE_VALUE (args);
474 if (arg == small_ident1 || arg == small_ident2)
476 addr_area = ADDR_AREA_SMALL;
478 else
480 warning ("invalid argument of `%s' attribute",
481 IDENTIFIER_POINTER (name));
482 *no_add_attrs = true;
485 switch (TREE_CODE (decl))
487 case VAR_DECL:
488 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
489 == FUNCTION_DECL)
490 && !TREE_STATIC (decl))
492 error ("%Jan address area attribute cannot be specified for "
493 "local variables", decl, decl);
494 *no_add_attrs = true;
496 area = ia64_get_addr_area (decl);
497 if (area != ADDR_AREA_NORMAL && addr_area != area)
499 error ("%Jaddress area of '%s' conflicts with previous "
500 "declaration", decl, decl);
501 *no_add_attrs = true;
503 break;
505 case FUNCTION_DECL:
506 error ("%Jaddress area attribute cannot be specified for functions",
507 decl, decl);
508 *no_add_attrs = true;
509 break;
511 default:
512 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
513 *no_add_attrs = true;
514 break;
517 return NULL_TREE;
520 static void
521 ia64_encode_addr_area (tree decl, rtx symbol)
523 int flags;
525 flags = SYMBOL_REF_FLAGS (symbol);
526 switch (ia64_get_addr_area (decl))
528 case ADDR_AREA_NORMAL: break;
529 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
530 default: abort ();
532 SYMBOL_REF_FLAGS (symbol) = flags;
535 static void
536 ia64_encode_section_info (tree decl, rtx rtl, int first)
538 default_encode_section_info (decl, rtl, first);
540 /* Careful not to prod global register variables. */
541 if (TREE_CODE (decl) == VAR_DECL
542 && GET_CODE (DECL_RTL (decl)) == MEM
543 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
544 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
545 ia64_encode_addr_area (decl, XEXP (rtl, 0));
548 /* Return 1 if the operands of a move are ok. */
551 ia64_move_ok (rtx dst, rtx src)
553 /* If we're under init_recog_no_volatile, we'll not be able to use
554 memory_operand. So check the code directly and don't worry about
555 the validity of the underlying address, which should have been
556 checked elsewhere anyway. */
557 if (GET_CODE (dst) != MEM)
558 return 1;
559 if (GET_CODE (src) == MEM)
560 return 0;
561 if (register_operand (src, VOIDmode))
562 return 1;
564 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
565 if (INTEGRAL_MODE_P (GET_MODE (dst)))
566 return src == const0_rtx;
567 else
568 return GET_CODE (src) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_G (src);
572 addp4_optimize_ok (rtx op1, rtx op2)
574 return (basereg_operand (op1, GET_MODE(op1)) !=
575 basereg_operand (op2, GET_MODE(op2)));
578 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
579 Return the length of the field, or <= 0 on failure. */
582 ia64_depz_field_mask (rtx rop, rtx rshift)
584 unsigned HOST_WIDE_INT op = INTVAL (rop);
585 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
587 /* Get rid of the zero bits we're shifting in. */
588 op >>= shift;
590 /* We must now have a solid block of 1's at bit 0. */
591 return exact_log2 (op + 1);
594 /* Expand a symbolic constant load. */
596 void
597 ia64_expand_load_address (rtx dest, rtx src)
599 if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (src))
600 abort ();
601 if (GET_CODE (dest) != REG)
602 abort ();
604 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
605 having to pointer-extend the value afterward. Other forms of address
606 computation below are also more natural to compute as 64-bit quantities.
607 If we've been given an SImode destination register, change it. */
608 if (GET_MODE (dest) != Pmode)
609 dest = gen_rtx_REG (Pmode, REGNO (dest));
611 if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_SMALL_ADDR_P (src))
613 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
614 return;
616 else if (TARGET_AUTO_PIC)
618 emit_insn (gen_load_gprel64 (dest, src));
619 return;
621 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
623 emit_insn (gen_load_fptr (dest, src));
624 return;
626 else if (sdata_symbolic_operand (src, VOIDmode))
628 emit_insn (gen_load_gprel (dest, src));
629 return;
632 if (GET_CODE (src) == CONST
633 && GET_CODE (XEXP (src, 0)) == PLUS
634 && GET_CODE (XEXP (XEXP (src, 0), 1)) == CONST_INT
635 && (INTVAL (XEXP (XEXP (src, 0), 1)) & 0x3fff) != 0)
637 rtx sym = XEXP (XEXP (src, 0), 0);
638 HOST_WIDE_INT ofs, hi, lo;
640 /* Split the offset into a sign extended 14-bit low part
641 and a complementary high part. */
642 ofs = INTVAL (XEXP (XEXP (src, 0), 1));
643 lo = ((ofs & 0x3fff) ^ 0x2000) - 0x2000;
644 hi = ofs - lo;
646 ia64_expand_load_address (dest, plus_constant (sym, hi));
647 emit_insn (gen_adddi3 (dest, dest, GEN_INT (lo)));
649 else
651 rtx tmp;
653 tmp = gen_rtx_HIGH (Pmode, src);
654 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
655 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
657 tmp = gen_rtx_LO_SUM (GET_MODE (dest), dest, src);
658 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
662 static GTY(()) rtx gen_tls_tga;
663 static rtx
664 gen_tls_get_addr (void)
666 if (!gen_tls_tga)
667 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
668 return gen_tls_tga;
671 static GTY(()) rtx thread_pointer_rtx;
672 static rtx
673 gen_thread_pointer (void)
675 if (!thread_pointer_rtx)
676 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
677 return thread_pointer_rtx;
680 static rtx
681 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1)
683 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
684 rtx orig_op0 = op0;
686 switch (tls_kind)
688 case TLS_MODEL_GLOBAL_DYNAMIC:
689 start_sequence ();
691 tga_op1 = gen_reg_rtx (Pmode);
692 emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1));
693 tga_op1 = gen_const_mem (Pmode, tga_op1);
695 tga_op2 = gen_reg_rtx (Pmode);
696 emit_insn (gen_load_ltoff_dtprel (tga_op2, op1));
697 tga_op2 = gen_const_mem (Pmode, tga_op2);
699 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
700 LCT_CONST, Pmode, 2, tga_op1,
701 Pmode, tga_op2, Pmode);
703 insns = get_insns ();
704 end_sequence ();
706 if (GET_MODE (op0) != Pmode)
707 op0 = tga_ret;
708 emit_libcall_block (insns, op0, tga_ret, op1);
709 break;
711 case TLS_MODEL_LOCAL_DYNAMIC:
712 /* ??? This isn't the completely proper way to do local-dynamic
713 If the call to __tls_get_addr is used only by a single symbol,
714 then we should (somehow) move the dtprel to the second arg
715 to avoid the extra add. */
716 start_sequence ();
718 tga_op1 = gen_reg_rtx (Pmode);
719 emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1));
720 tga_op1 = gen_const_mem (Pmode, tga_op1);
722 tga_op2 = const0_rtx;
724 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
725 LCT_CONST, Pmode, 2, tga_op1,
726 Pmode, tga_op2, Pmode);
728 insns = get_insns ();
729 end_sequence ();
731 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
732 UNSPEC_LD_BASE);
733 tmp = gen_reg_rtx (Pmode);
734 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
736 if (!register_operand (op0, Pmode))
737 op0 = gen_reg_rtx (Pmode);
738 if (TARGET_TLS64)
740 emit_insn (gen_load_dtprel (op0, op1));
741 emit_insn (gen_adddi3 (op0, tmp, op0));
743 else
744 emit_insn (gen_add_dtprel (op0, tmp, op1));
745 break;
747 case TLS_MODEL_INITIAL_EXEC:
748 tmp = gen_reg_rtx (Pmode);
749 emit_insn (gen_load_ltoff_tprel (tmp, op1));
750 tmp = gen_const_mem (Pmode, tmp);
751 tmp = force_reg (Pmode, tmp);
753 if (!register_operand (op0, Pmode))
754 op0 = gen_reg_rtx (Pmode);
755 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
756 break;
758 case TLS_MODEL_LOCAL_EXEC:
759 if (!register_operand (op0, Pmode))
760 op0 = gen_reg_rtx (Pmode);
761 if (TARGET_TLS64)
763 emit_insn (gen_load_tprel (op0, op1));
764 emit_insn (gen_adddi3 (op0, gen_thread_pointer (), op0));
766 else
767 emit_insn (gen_add_tprel (op0, gen_thread_pointer (), op1));
768 break;
770 default:
771 abort ();
774 if (orig_op0 == op0)
775 return NULL_RTX;
776 if (GET_MODE (orig_op0) == Pmode)
777 return op0;
778 return gen_lowpart (GET_MODE (orig_op0), op0);
782 ia64_expand_move (rtx op0, rtx op1)
784 enum machine_mode mode = GET_MODE (op0);
786 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
787 op1 = force_reg (mode, op1);
789 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
791 enum tls_model tls_kind;
792 if (GET_CODE (op1) == SYMBOL_REF
793 && (tls_kind = SYMBOL_REF_TLS_MODEL (op1)))
794 return ia64_expand_tls_address (tls_kind, op0, op1);
796 if (!TARGET_NO_PIC && reload_completed)
798 ia64_expand_load_address (op0, op1);
799 return NULL_RTX;
803 return op1;
806 /* Split a move from OP1 to OP0 conditional on COND. */
808 void
809 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
811 rtx insn, first = get_last_insn ();
813 emit_move_insn (op0, op1);
815 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
816 if (INSN_P (insn))
817 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
818 PATTERN (insn));
821 /* Split a post-reload TImode or TFmode reference into two DImode
822 components. This is made extra difficult by the fact that we do
823 not get any scratch registers to work with, because reload cannot
824 be prevented from giving us a scratch that overlaps the register
825 pair involved. So instead, when addressing memory, we tweak the
826 pointer register up and back down with POST_INCs. Or up and not
827 back down when we can get away with it.
829 REVERSED is true when the loads must be done in reversed order
830 (high word first) for correctness. DEAD is true when the pointer
831 dies with the second insn we generate and therefore the second
832 address must not carry a postmodify.
834 May return an insn which is to be emitted after the moves. */
836 static rtx
837 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
839 rtx fixup = 0;
841 switch (GET_CODE (in))
843 case REG:
844 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
845 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
846 break;
848 case CONST_INT:
849 case CONST_DOUBLE:
850 /* Cannot occur reversed. */
851 if (reversed) abort ();
853 if (GET_MODE (in) != TFmode)
854 split_double (in, &out[0], &out[1]);
855 else
856 /* split_double does not understand how to split a TFmode
857 quantity into a pair of DImode constants. */
859 REAL_VALUE_TYPE r;
860 unsigned HOST_WIDE_INT p[2];
861 long l[4]; /* TFmode is 128 bits */
863 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
864 real_to_target (l, &r, TFmode);
866 if (FLOAT_WORDS_BIG_ENDIAN)
868 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
869 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
871 else
873 p[0] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
874 p[1] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
876 out[0] = GEN_INT (p[0]);
877 out[1] = GEN_INT (p[1]);
879 break;
881 case MEM:
883 rtx base = XEXP (in, 0);
884 rtx offset;
886 switch (GET_CODE (base))
888 case REG:
889 if (!reversed)
891 out[0] = adjust_automodify_address
892 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
893 out[1] = adjust_automodify_address
894 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
896 else
898 /* Reversal requires a pre-increment, which can only
899 be done as a separate insn. */
900 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
901 out[0] = adjust_automodify_address
902 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
903 out[1] = adjust_address (in, DImode, 0);
905 break;
907 case POST_INC:
908 if (reversed || dead) abort ();
909 /* Just do the increment in two steps. */
910 out[0] = adjust_automodify_address (in, DImode, 0, 0);
911 out[1] = adjust_automodify_address (in, DImode, 0, 8);
912 break;
914 case POST_DEC:
915 if (reversed || dead) abort ();
916 /* Add 8, subtract 24. */
917 base = XEXP (base, 0);
918 out[0] = adjust_automodify_address
919 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
920 out[1] = adjust_automodify_address
921 (in, DImode,
922 gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -24)),
924 break;
926 case POST_MODIFY:
927 if (reversed || dead) abort ();
928 /* Extract and adjust the modification. This case is
929 trickier than the others, because we might have an
930 index register, or we might have a combined offset that
931 doesn't fit a signed 9-bit displacement field. We can
932 assume the incoming expression is already legitimate. */
933 offset = XEXP (base, 1);
934 base = XEXP (base, 0);
936 out[0] = adjust_automodify_address
937 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
939 if (GET_CODE (XEXP (offset, 1)) == REG)
941 /* Can't adjust the postmodify to match. Emit the
942 original, then a separate addition insn. */
943 out[1] = adjust_automodify_address (in, DImode, 0, 8);
944 fixup = gen_adddi3 (base, base, GEN_INT (-8));
946 else if (GET_CODE (XEXP (offset, 1)) != CONST_INT)
947 abort ();
948 else if (INTVAL (XEXP (offset, 1)) < -256 + 8)
950 /* Again the postmodify cannot be made to match, but
951 in this case it's more efficient to get rid of the
952 postmodify entirely and fix up with an add insn. */
953 out[1] = adjust_automodify_address (in, DImode, base, 8);
954 fixup = gen_adddi3 (base, base,
955 GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
957 else
959 /* Combined offset still fits in the displacement field.
960 (We cannot overflow it at the high end.) */
961 out[1] = adjust_automodify_address
962 (in, DImode,
963 gen_rtx_POST_MODIFY (Pmode, base,
964 gen_rtx_PLUS (Pmode, base,
965 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
968 break;
970 default:
971 abort ();
973 break;
976 default:
977 abort ();
980 return fixup;
983 /* Split a TImode or TFmode move instruction after reload.
984 This is used by *movtf_internal and *movti_internal. */
985 void
986 ia64_split_tmode_move (rtx operands[])
988 rtx in[2], out[2], insn;
989 rtx fixup[2];
990 bool dead = false;
991 bool reversed = false;
993 /* It is possible for reload to decide to overwrite a pointer with
994 the value it points to. In that case we have to do the loads in
995 the appropriate order so that the pointer is not destroyed too
996 early. Also we must not generate a postmodify for that second
997 load, or rws_access_regno will abort. */
998 if (GET_CODE (operands[1]) == MEM
999 && reg_overlap_mentioned_p (operands[0], operands[1]))
1001 rtx base = XEXP (operands[1], 0);
1002 while (GET_CODE (base) != REG)
1003 base = XEXP (base, 0);
1005 if (REGNO (base) == REGNO (operands[0]))
1006 reversed = true;
1007 dead = true;
1009 /* Another reason to do the moves in reversed order is if the first
1010 element of the target register pair is also the second element of
1011 the source register pair. */
1012 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1013 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1014 reversed = true;
1016 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1017 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1019 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1020 if (GET_CODE (EXP) == MEM \
1021 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1022 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1023 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1024 REG_NOTES (INSN) = gen_rtx_EXPR_LIST (REG_INC, \
1025 XEXP (XEXP (EXP, 0), 0), \
1026 REG_NOTES (INSN))
1028 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1029 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1030 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1032 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1033 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1034 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1036 if (fixup[0])
1037 emit_insn (fixup[0]);
1038 if (fixup[1])
1039 emit_insn (fixup[1]);
1041 #undef MAYBE_ADD_REG_INC_NOTE
1044 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1045 through memory plus an extra GR scratch register. Except that you can
1046 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1047 SECONDARY_RELOAD_CLASS, but not both.
1049 We got into problems in the first place by allowing a construct like
1050 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1051 This solution attempts to prevent this situation from occurring. When
1052 we see something like the above, we spill the inner register to memory. */
1055 spill_xfmode_operand (rtx in, int force)
1057 if (GET_CODE (in) == SUBREG
1058 && GET_MODE (SUBREG_REG (in)) == TImode
1059 && GET_CODE (SUBREG_REG (in)) == REG)
1061 rtx memt = assign_stack_temp (TImode, 16, 0);
1062 emit_move_insn (memt, SUBREG_REG (in));
1063 return adjust_address (memt, XFmode, 0);
1065 else if (force && GET_CODE (in) == REG)
1067 rtx memx = assign_stack_temp (XFmode, 16, 0);
1068 emit_move_insn (memx, in);
1069 return memx;
1071 else
1072 return in;
1075 /* Emit comparison instruction if necessary, returning the expression
1076 that holds the compare result in the proper mode. */
1078 static GTY(()) rtx cmptf_libfunc;
1081 ia64_expand_compare (enum rtx_code code, enum machine_mode mode)
1083 rtx op0 = ia64_compare_op0, op1 = ia64_compare_op1;
1084 rtx cmp;
1086 /* If we have a BImode input, then we already have a compare result, and
1087 do not need to emit another comparison. */
1088 if (GET_MODE (op0) == BImode)
1090 if ((code == NE || code == EQ) && op1 == const0_rtx)
1091 cmp = op0;
1092 else
1093 abort ();
1095 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1096 magic number as its third argument, that indicates what to do.
1097 The return value is an integer to be compared against zero. */
1098 else if (GET_MODE (op0) == TFmode)
1100 enum qfcmp_magic {
1101 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1102 QCMP_UNORD = 2,
1103 QCMP_EQ = 4,
1104 QCMP_LT = 8,
1105 QCMP_GT = 16
1106 } magic;
1107 enum rtx_code ncode;
1108 rtx ret, insns;
1109 if (!cmptf_libfunc || GET_MODE (op1) != TFmode)
1110 abort ();
1111 switch (code)
1113 /* 1 = equal, 0 = not equal. Equality operators do
1114 not raise FP_INVALID when given an SNaN operand. */
1115 case EQ: magic = QCMP_EQ; ncode = NE; break;
1116 case NE: magic = QCMP_EQ; ncode = EQ; break;
1117 /* isunordered() from C99. */
1118 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1119 case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
1120 /* Relational operators raise FP_INVALID when given
1121 an SNaN operand. */
1122 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1123 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1124 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1125 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1126 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1127 Expanders for buneq etc. weuld have to be added to ia64.md
1128 for this to be useful. */
1129 default: abort ();
1132 start_sequence ();
1134 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1135 op0, TFmode, op1, TFmode,
1136 GEN_INT (magic), DImode);
1137 cmp = gen_reg_rtx (BImode);
1138 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1139 gen_rtx_fmt_ee (ncode, BImode,
1140 ret, const0_rtx)));
1142 insns = get_insns ();
1143 end_sequence ();
1145 emit_libcall_block (insns, cmp, cmp,
1146 gen_rtx_fmt_ee (code, BImode, op0, op1));
1147 code = NE;
1149 else
1151 cmp = gen_reg_rtx (BImode);
1152 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1153 gen_rtx_fmt_ee (code, BImode, op0, op1)));
1154 code = NE;
1157 return gen_rtx_fmt_ee (code, mode, cmp, const0_rtx);
1160 /* Emit the appropriate sequence for a call. */
1162 void
1163 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
1164 int sibcall_p)
1166 rtx insn, b0;
1168 addr = XEXP (addr, 0);
1169 addr = convert_memory_address (DImode, addr);
1170 b0 = gen_rtx_REG (DImode, R_BR (0));
1172 /* ??? Should do this for functions known to bind local too. */
1173 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
1175 if (sibcall_p)
1176 insn = gen_sibcall_nogp (addr);
1177 else if (! retval)
1178 insn = gen_call_nogp (addr, b0);
1179 else
1180 insn = gen_call_value_nogp (retval, addr, b0);
1181 insn = emit_call_insn (insn);
1183 else
1185 if (sibcall_p)
1186 insn = gen_sibcall_gp (addr);
1187 else if (! retval)
1188 insn = gen_call_gp (addr, b0);
1189 else
1190 insn = gen_call_value_gp (retval, addr, b0);
1191 insn = emit_call_insn (insn);
1193 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
1196 if (sibcall_p)
1197 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
1200 void
1201 ia64_reload_gp (void)
1203 rtx tmp;
1205 if (current_frame_info.reg_save_gp)
1206 tmp = gen_rtx_REG (DImode, current_frame_info.reg_save_gp);
1207 else
1209 HOST_WIDE_INT offset;
1211 offset = (current_frame_info.spill_cfa_off
1212 + current_frame_info.spill_size);
1213 if (frame_pointer_needed)
1215 tmp = hard_frame_pointer_rtx;
1216 offset = -offset;
1218 else
1220 tmp = stack_pointer_rtx;
1221 offset = current_frame_info.total_size - offset;
1224 if (CONST_OK_FOR_I (offset))
1225 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1226 tmp, GEN_INT (offset)));
1227 else
1229 emit_move_insn (pic_offset_table_rtx, GEN_INT (offset));
1230 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1231 pic_offset_table_rtx, tmp));
1234 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
1237 emit_move_insn (pic_offset_table_rtx, tmp);
1240 void
1241 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
1242 rtx scratch_b, int noreturn_p, int sibcall_p)
1244 rtx insn;
1245 bool is_desc = false;
1247 /* If we find we're calling through a register, then we're actually
1248 calling through a descriptor, so load up the values. */
1249 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
1251 rtx tmp;
1252 bool addr_dead_p;
1254 /* ??? We are currently constrained to *not* use peep2, because
1255 we can legitimately change the global lifetime of the GP
1256 (in the form of killing where previously live). This is
1257 because a call through a descriptor doesn't use the previous
1258 value of the GP, while a direct call does, and we do not
1259 commit to either form until the split here.
1261 That said, this means that we lack precise life info for
1262 whether ADDR is dead after this call. This is not terribly
1263 important, since we can fix things up essentially for free
1264 with the POST_DEC below, but it's nice to not use it when we
1265 can immediately tell it's not necessary. */
1266 addr_dead_p = ((noreturn_p || sibcall_p
1267 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
1268 REGNO (addr)))
1269 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
1271 /* Load the code address into scratch_b. */
1272 tmp = gen_rtx_POST_INC (Pmode, addr);
1273 tmp = gen_rtx_MEM (Pmode, tmp);
1274 emit_move_insn (scratch_r, tmp);
1275 emit_move_insn (scratch_b, scratch_r);
1277 /* Load the GP address. If ADDR is not dead here, then we must
1278 revert the change made above via the POST_INCREMENT. */
1279 if (!addr_dead_p)
1280 tmp = gen_rtx_POST_DEC (Pmode, addr);
1281 else
1282 tmp = addr;
1283 tmp = gen_rtx_MEM (Pmode, tmp);
1284 emit_move_insn (pic_offset_table_rtx, tmp);
1286 is_desc = true;
1287 addr = scratch_b;
1290 if (sibcall_p)
1291 insn = gen_sibcall_nogp (addr);
1292 else if (retval)
1293 insn = gen_call_value_nogp (retval, addr, retaddr);
1294 else
1295 insn = gen_call_nogp (addr, retaddr);
1296 emit_call_insn (insn);
1298 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
1299 ia64_reload_gp ();
1302 /* Begin the assembly file. */
1304 static void
1305 ia64_file_start (void)
1307 default_file_start ();
1308 emit_safe_across_calls ();
1311 void
1312 emit_safe_across_calls (void)
1314 unsigned int rs, re;
1315 int out_state;
1317 rs = 1;
1318 out_state = 0;
1319 while (1)
1321 while (rs < 64 && call_used_regs[PR_REG (rs)])
1322 rs++;
1323 if (rs >= 64)
1324 break;
1325 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
1326 continue;
1327 if (out_state == 0)
1329 fputs ("\t.pred.safe_across_calls ", asm_out_file);
1330 out_state = 1;
1332 else
1333 fputc (',', asm_out_file);
1334 if (re == rs + 1)
1335 fprintf (asm_out_file, "p%u", rs);
1336 else
1337 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
1338 rs = re + 1;
1340 if (out_state)
1341 fputc ('\n', asm_out_file);
1344 /* Helper function for ia64_compute_frame_size: find an appropriate general
1345 register to spill some special register to. SPECIAL_SPILL_MASK contains
1346 bits in GR0 to GR31 that have already been allocated by this routine.
1347 TRY_LOCALS is true if we should attempt to locate a local regnum. */
1349 static int
1350 find_gr_spill (int try_locals)
1352 int regno;
1354 /* If this is a leaf function, first try an otherwise unused
1355 call-clobbered register. */
1356 if (current_function_is_leaf)
1358 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
1359 if (! regs_ever_live[regno]
1360 && call_used_regs[regno]
1361 && ! fixed_regs[regno]
1362 && ! global_regs[regno]
1363 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
1365 current_frame_info.gr_used_mask |= 1 << regno;
1366 return regno;
1370 if (try_locals)
1372 regno = current_frame_info.n_local_regs;
1373 /* If there is a frame pointer, then we can't use loc79, because
1374 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
1375 reg_name switching code in ia64_expand_prologue. */
1376 if (regno < (80 - frame_pointer_needed))
1378 current_frame_info.n_local_regs = regno + 1;
1379 return LOC_REG (0) + regno;
1383 /* Failed to find a general register to spill to. Must use stack. */
1384 return 0;
1387 /* In order to make for nice schedules, we try to allocate every temporary
1388 to a different register. We must of course stay away from call-saved,
1389 fixed, and global registers. We must also stay away from registers
1390 allocated in current_frame_info.gr_used_mask, since those include regs
1391 used all through the prologue.
1393 Any register allocated here must be used immediately. The idea is to
1394 aid scheduling, not to solve data flow problems. */
1396 static int last_scratch_gr_reg;
1398 static int
1399 next_scratch_gr_reg (void)
1401 int i, regno;
1403 for (i = 0; i < 32; ++i)
1405 regno = (last_scratch_gr_reg + i + 1) & 31;
1406 if (call_used_regs[regno]
1407 && ! fixed_regs[regno]
1408 && ! global_regs[regno]
1409 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
1411 last_scratch_gr_reg = regno;
1412 return regno;
1416 /* There must be _something_ available. */
1417 abort ();
1420 /* Helper function for ia64_compute_frame_size, called through
1421 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
1423 static void
1424 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
1426 unsigned int regno = REGNO (reg);
1427 if (regno < 32)
1429 unsigned int i, n = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1430 for (i = 0; i < n; ++i)
1431 current_frame_info.gr_used_mask |= 1 << (regno + i);
1435 /* Returns the number of bytes offset between the frame pointer and the stack
1436 pointer for the current function. SIZE is the number of bytes of space
1437 needed for local variables. */
1439 static void
1440 ia64_compute_frame_size (HOST_WIDE_INT size)
1442 HOST_WIDE_INT total_size;
1443 HOST_WIDE_INT spill_size = 0;
1444 HOST_WIDE_INT extra_spill_size = 0;
1445 HOST_WIDE_INT pretend_args_size;
1446 HARD_REG_SET mask;
1447 int n_spilled = 0;
1448 int spilled_gr_p = 0;
1449 int spilled_fr_p = 0;
1450 unsigned int regno;
1451 int i;
1453 if (current_frame_info.initialized)
1454 return;
1456 memset (&current_frame_info, 0, sizeof current_frame_info);
1457 CLEAR_HARD_REG_SET (mask);
1459 /* Don't allocate scratches to the return register. */
1460 diddle_return_value (mark_reg_gr_used_mask, NULL);
1462 /* Don't allocate scratches to the EH scratch registers. */
1463 if (cfun->machine->ia64_eh_epilogue_sp)
1464 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
1465 if (cfun->machine->ia64_eh_epilogue_bsp)
1466 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
1468 /* Find the size of the register stack frame. We have only 80 local
1469 registers, because we reserve 8 for the inputs and 8 for the
1470 outputs. */
1472 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
1473 since we'll be adjusting that down later. */
1474 regno = LOC_REG (78) + ! frame_pointer_needed;
1475 for (; regno >= LOC_REG (0); regno--)
1476 if (regs_ever_live[regno])
1477 break;
1478 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
1480 /* For functions marked with the syscall_linkage attribute, we must mark
1481 all eight input registers as in use, so that locals aren't visible to
1482 the caller. */
1484 if (cfun->machine->n_varargs > 0
1485 || lookup_attribute ("syscall_linkage",
1486 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
1487 current_frame_info.n_input_regs = 8;
1488 else
1490 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
1491 if (regs_ever_live[regno])
1492 break;
1493 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
1496 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
1497 if (regs_ever_live[regno])
1498 break;
1499 i = regno - OUT_REG (0) + 1;
1501 /* When -p profiling, we need one output register for the mcount argument.
1502 Likewise for -a profiling for the bb_init_func argument. For -ax
1503 profiling, we need two output registers for the two bb_init_trace_func
1504 arguments. */
1505 if (current_function_profile)
1506 i = MAX (i, 1);
1507 current_frame_info.n_output_regs = i;
1509 /* ??? No rotating register support yet. */
1510 current_frame_info.n_rotate_regs = 0;
1512 /* Discover which registers need spilling, and how much room that
1513 will take. Begin with floating point and general registers,
1514 which will always wind up on the stack. */
1516 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
1517 if (regs_ever_live[regno] && ! call_used_regs[regno])
1519 SET_HARD_REG_BIT (mask, regno);
1520 spill_size += 16;
1521 n_spilled += 1;
1522 spilled_fr_p = 1;
1525 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
1526 if (regs_ever_live[regno] && ! call_used_regs[regno])
1528 SET_HARD_REG_BIT (mask, regno);
1529 spill_size += 8;
1530 n_spilled += 1;
1531 spilled_gr_p = 1;
1534 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
1535 if (regs_ever_live[regno] && ! call_used_regs[regno])
1537 SET_HARD_REG_BIT (mask, regno);
1538 spill_size += 8;
1539 n_spilled += 1;
1542 /* Now come all special registers that might get saved in other
1543 general registers. */
1545 if (frame_pointer_needed)
1547 current_frame_info.reg_fp = find_gr_spill (1);
1548 /* If we did not get a register, then we take LOC79. This is guaranteed
1549 to be free, even if regs_ever_live is already set, because this is
1550 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
1551 as we don't count loc79 above. */
1552 if (current_frame_info.reg_fp == 0)
1554 current_frame_info.reg_fp = LOC_REG (79);
1555 current_frame_info.n_local_regs++;
1559 if (! current_function_is_leaf)
1561 /* Emit a save of BR0 if we call other functions. Do this even
1562 if this function doesn't return, as EH depends on this to be
1563 able to unwind the stack. */
1564 SET_HARD_REG_BIT (mask, BR_REG (0));
1566 current_frame_info.reg_save_b0 = find_gr_spill (1);
1567 if (current_frame_info.reg_save_b0 == 0)
1569 spill_size += 8;
1570 n_spilled += 1;
1573 /* Similarly for ar.pfs. */
1574 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
1575 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
1576 if (current_frame_info.reg_save_ar_pfs == 0)
1578 extra_spill_size += 8;
1579 n_spilled += 1;
1582 /* Similarly for gp. Note that if we're calling setjmp, the stacked
1583 registers are clobbered, so we fall back to the stack. */
1584 current_frame_info.reg_save_gp
1585 = (current_function_calls_setjmp ? 0 : find_gr_spill (1));
1586 if (current_frame_info.reg_save_gp == 0)
1588 SET_HARD_REG_BIT (mask, GR_REG (1));
1589 spill_size += 8;
1590 n_spilled += 1;
1593 else
1595 if (regs_ever_live[BR_REG (0)] && ! call_used_regs[BR_REG (0)])
1597 SET_HARD_REG_BIT (mask, BR_REG (0));
1598 spill_size += 8;
1599 n_spilled += 1;
1602 if (regs_ever_live[AR_PFS_REGNUM])
1604 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
1605 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
1606 if (current_frame_info.reg_save_ar_pfs == 0)
1608 extra_spill_size += 8;
1609 n_spilled += 1;
1614 /* Unwind descriptor hackery: things are most efficient if we allocate
1615 consecutive GR save registers for RP, PFS, FP in that order. However,
1616 it is absolutely critical that FP get the only hard register that's
1617 guaranteed to be free, so we allocated it first. If all three did
1618 happen to be allocated hard regs, and are consecutive, rearrange them
1619 into the preferred order now. */
1620 if (current_frame_info.reg_fp != 0
1621 && current_frame_info.reg_save_b0 == current_frame_info.reg_fp + 1
1622 && current_frame_info.reg_save_ar_pfs == current_frame_info.reg_fp + 2)
1624 current_frame_info.reg_save_b0 = current_frame_info.reg_fp;
1625 current_frame_info.reg_save_ar_pfs = current_frame_info.reg_fp + 1;
1626 current_frame_info.reg_fp = current_frame_info.reg_fp + 2;
1629 /* See if we need to store the predicate register block. */
1630 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
1631 if (regs_ever_live[regno] && ! call_used_regs[regno])
1632 break;
1633 if (regno <= PR_REG (63))
1635 SET_HARD_REG_BIT (mask, PR_REG (0));
1636 current_frame_info.reg_save_pr = find_gr_spill (1);
1637 if (current_frame_info.reg_save_pr == 0)
1639 extra_spill_size += 8;
1640 n_spilled += 1;
1643 /* ??? Mark them all as used so that register renaming and such
1644 are free to use them. */
1645 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
1646 regs_ever_live[regno] = 1;
1649 /* If we're forced to use st8.spill, we're forced to save and restore
1650 ar.unat as well. The check for existing liveness allows inline asm
1651 to touch ar.unat. */
1652 if (spilled_gr_p || cfun->machine->n_varargs
1653 || regs_ever_live[AR_UNAT_REGNUM])
1655 regs_ever_live[AR_UNAT_REGNUM] = 1;
1656 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
1657 current_frame_info.reg_save_ar_unat = find_gr_spill (spill_size == 0);
1658 if (current_frame_info.reg_save_ar_unat == 0)
1660 extra_spill_size += 8;
1661 n_spilled += 1;
1665 if (regs_ever_live[AR_LC_REGNUM])
1667 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
1668 current_frame_info.reg_save_ar_lc = find_gr_spill (spill_size == 0);
1669 if (current_frame_info.reg_save_ar_lc == 0)
1671 extra_spill_size += 8;
1672 n_spilled += 1;
1676 /* If we have an odd number of words of pretend arguments written to
1677 the stack, then the FR save area will be unaligned. We round the
1678 size of this area up to keep things 16 byte aligned. */
1679 if (spilled_fr_p)
1680 pretend_args_size = IA64_STACK_ALIGN (current_function_pretend_args_size);
1681 else
1682 pretend_args_size = current_function_pretend_args_size;
1684 total_size = (spill_size + extra_spill_size + size + pretend_args_size
1685 + current_function_outgoing_args_size);
1686 total_size = IA64_STACK_ALIGN (total_size);
1688 /* We always use the 16-byte scratch area provided by the caller, but
1689 if we are a leaf function, there's no one to which we need to provide
1690 a scratch area. */
1691 if (current_function_is_leaf)
1692 total_size = MAX (0, total_size - 16);
1694 current_frame_info.total_size = total_size;
1695 current_frame_info.spill_cfa_off = pretend_args_size - 16;
1696 current_frame_info.spill_size = spill_size;
1697 current_frame_info.extra_spill_size = extra_spill_size;
1698 COPY_HARD_REG_SET (current_frame_info.mask, mask);
1699 current_frame_info.n_spilled = n_spilled;
1700 current_frame_info.initialized = reload_completed;
1703 /* Compute the initial difference between the specified pair of registers. */
1705 HOST_WIDE_INT
1706 ia64_initial_elimination_offset (int from, int to)
1708 HOST_WIDE_INT offset;
1710 ia64_compute_frame_size (get_frame_size ());
1711 switch (from)
1713 case FRAME_POINTER_REGNUM:
1714 if (to == HARD_FRAME_POINTER_REGNUM)
1716 if (current_function_is_leaf)
1717 offset = -current_frame_info.total_size;
1718 else
1719 offset = -(current_frame_info.total_size
1720 - current_function_outgoing_args_size - 16);
1722 else if (to == STACK_POINTER_REGNUM)
1724 if (current_function_is_leaf)
1725 offset = 0;
1726 else
1727 offset = 16 + current_function_outgoing_args_size;
1729 else
1730 abort ();
1731 break;
1733 case ARG_POINTER_REGNUM:
1734 /* Arguments start above the 16 byte save area, unless stdarg
1735 in which case we store through the 16 byte save area. */
1736 if (to == HARD_FRAME_POINTER_REGNUM)
1737 offset = 16 - current_function_pretend_args_size;
1738 else if (to == STACK_POINTER_REGNUM)
1739 offset = (current_frame_info.total_size
1740 + 16 - current_function_pretend_args_size);
1741 else
1742 abort ();
1743 break;
1745 default:
1746 abort ();
1749 return offset;
1752 /* If there are more than a trivial number of register spills, we use
1753 two interleaved iterators so that we can get two memory references
1754 per insn group.
1756 In order to simplify things in the prologue and epilogue expanders,
1757 we use helper functions to fix up the memory references after the
1758 fact with the appropriate offsets to a POST_MODIFY memory mode.
1759 The following data structure tracks the state of the two iterators
1760 while insns are being emitted. */
1762 struct spill_fill_data
1764 rtx init_after; /* point at which to emit initializations */
1765 rtx init_reg[2]; /* initial base register */
1766 rtx iter_reg[2]; /* the iterator registers */
1767 rtx *prev_addr[2]; /* address of last memory use */
1768 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
1769 HOST_WIDE_INT prev_off[2]; /* last offset */
1770 int n_iter; /* number of iterators in use */
1771 int next_iter; /* next iterator to use */
1772 unsigned int save_gr_used_mask;
1775 static struct spill_fill_data spill_fill_data;
1777 static void
1778 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
1780 int i;
1782 spill_fill_data.init_after = get_last_insn ();
1783 spill_fill_data.init_reg[0] = init_reg;
1784 spill_fill_data.init_reg[1] = init_reg;
1785 spill_fill_data.prev_addr[0] = NULL;
1786 spill_fill_data.prev_addr[1] = NULL;
1787 spill_fill_data.prev_insn[0] = NULL;
1788 spill_fill_data.prev_insn[1] = NULL;
1789 spill_fill_data.prev_off[0] = cfa_off;
1790 spill_fill_data.prev_off[1] = cfa_off;
1791 spill_fill_data.next_iter = 0;
1792 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
1794 spill_fill_data.n_iter = 1 + (n_spills > 2);
1795 for (i = 0; i < spill_fill_data.n_iter; ++i)
1797 int regno = next_scratch_gr_reg ();
1798 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
1799 current_frame_info.gr_used_mask |= 1 << regno;
1803 static void
1804 finish_spill_pointers (void)
1806 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
1809 static rtx
1810 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
1812 int iter = spill_fill_data.next_iter;
1813 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
1814 rtx disp_rtx = GEN_INT (disp);
1815 rtx mem;
1817 if (spill_fill_data.prev_addr[iter])
1819 if (CONST_OK_FOR_N (disp))
1821 *spill_fill_data.prev_addr[iter]
1822 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
1823 gen_rtx_PLUS (DImode,
1824 spill_fill_data.iter_reg[iter],
1825 disp_rtx));
1826 REG_NOTES (spill_fill_data.prev_insn[iter])
1827 = gen_rtx_EXPR_LIST (REG_INC, spill_fill_data.iter_reg[iter],
1828 REG_NOTES (spill_fill_data.prev_insn[iter]));
1830 else
1832 /* ??? Could use register post_modify for loads. */
1833 if (! CONST_OK_FOR_I (disp))
1835 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
1836 emit_move_insn (tmp, disp_rtx);
1837 disp_rtx = tmp;
1839 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
1840 spill_fill_data.iter_reg[iter], disp_rtx));
1843 /* Micro-optimization: if we've created a frame pointer, it's at
1844 CFA 0, which may allow the real iterator to be initialized lower,
1845 slightly increasing parallelism. Also, if there are few saves
1846 it may eliminate the iterator entirely. */
1847 else if (disp == 0
1848 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
1849 && frame_pointer_needed)
1851 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
1852 set_mem_alias_set (mem, get_varargs_alias_set ());
1853 return mem;
1855 else
1857 rtx seq, insn;
1859 if (disp == 0)
1860 seq = gen_movdi (spill_fill_data.iter_reg[iter],
1861 spill_fill_data.init_reg[iter]);
1862 else
1864 start_sequence ();
1866 if (! CONST_OK_FOR_I (disp))
1868 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
1869 emit_move_insn (tmp, disp_rtx);
1870 disp_rtx = tmp;
1873 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
1874 spill_fill_data.init_reg[iter],
1875 disp_rtx));
1877 seq = get_insns ();
1878 end_sequence ();
1881 /* Careful for being the first insn in a sequence. */
1882 if (spill_fill_data.init_after)
1883 insn = emit_insn_after (seq, spill_fill_data.init_after);
1884 else
1886 rtx first = get_insns ();
1887 if (first)
1888 insn = emit_insn_before (seq, first);
1889 else
1890 insn = emit_insn (seq);
1892 spill_fill_data.init_after = insn;
1894 /* If DISP is 0, we may or may not have a further adjustment
1895 afterward. If we do, then the load/store insn may be modified
1896 to be a post-modify. If we don't, then this copy may be
1897 eliminated by copyprop_hardreg_forward, which makes this
1898 insn garbage, which runs afoul of the sanity check in
1899 propagate_one_insn. So mark this insn as legal to delete. */
1900 if (disp == 0)
1901 REG_NOTES(insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx,
1902 REG_NOTES (insn));
1905 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
1907 /* ??? Not all of the spills are for varargs, but some of them are.
1908 The rest of the spills belong in an alias set of their own. But
1909 it doesn't actually hurt to include them here. */
1910 set_mem_alias_set (mem, get_varargs_alias_set ());
1912 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
1913 spill_fill_data.prev_off[iter] = cfa_off;
1915 if (++iter >= spill_fill_data.n_iter)
1916 iter = 0;
1917 spill_fill_data.next_iter = iter;
1919 return mem;
1922 static void
1923 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
1924 rtx frame_reg)
1926 int iter = spill_fill_data.next_iter;
1927 rtx mem, insn;
1929 mem = spill_restore_mem (reg, cfa_off);
1930 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
1931 spill_fill_data.prev_insn[iter] = insn;
1933 if (frame_reg)
1935 rtx base;
1936 HOST_WIDE_INT off;
1938 RTX_FRAME_RELATED_P (insn) = 1;
1940 /* Don't even pretend that the unwind code can intuit its way
1941 through a pair of interleaved post_modify iterators. Just
1942 provide the correct answer. */
1944 if (frame_pointer_needed)
1946 base = hard_frame_pointer_rtx;
1947 off = - cfa_off;
1949 else
1951 base = stack_pointer_rtx;
1952 off = current_frame_info.total_size - cfa_off;
1955 REG_NOTES (insn)
1956 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
1957 gen_rtx_SET (VOIDmode,
1958 gen_rtx_MEM (GET_MODE (reg),
1959 plus_constant (base, off)),
1960 frame_reg),
1961 REG_NOTES (insn));
1965 static void
1966 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
1968 int iter = spill_fill_data.next_iter;
1969 rtx insn;
1971 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
1972 GEN_INT (cfa_off)));
1973 spill_fill_data.prev_insn[iter] = insn;
1976 /* Wrapper functions that discards the CONST_INT spill offset. These
1977 exist so that we can give gr_spill/gr_fill the offset they need and
1978 use a consistent function interface. */
1980 static rtx
1981 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
1983 return gen_movdi (dest, src);
1986 static rtx
1987 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
1989 return gen_fr_spill (dest, src);
1992 static rtx
1993 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
1995 return gen_fr_restore (dest, src);
1998 /* Called after register allocation to add any instructions needed for the
1999 prologue. Using a prologue insn is favored compared to putting all of the
2000 instructions in output_function_prologue(), since it allows the scheduler
2001 to intermix instructions with the saves of the caller saved registers. In
2002 some cases, it might be necessary to emit a barrier instruction as the last
2003 insn to prevent such scheduling.
2005 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
2006 so that the debug info generation code can handle them properly.
2008 The register save area is layed out like so:
2009 cfa+16
2010 [ varargs spill area ]
2011 [ fr register spill area ]
2012 [ br register spill area ]
2013 [ ar register spill area ]
2014 [ pr register spill area ]
2015 [ gr register spill area ] */
2017 /* ??? Get inefficient code when the frame size is larger than can fit in an
2018 adds instruction. */
2020 void
2021 ia64_expand_prologue (void)
2023 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
2024 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
2025 rtx reg, alt_reg;
2027 ia64_compute_frame_size (get_frame_size ());
2028 last_scratch_gr_reg = 15;
2030 /* If there is no epilogue, then we don't need some prologue insns.
2031 We need to avoid emitting the dead prologue insns, because flow
2032 will complain about them. */
2033 if (optimize)
2035 edge e;
2037 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
2039 if ((e->flags & EDGE_FAKE) == 0
2040 && (e->flags & EDGE_FALLTHRU) != 0)
2041 break;
2043 epilogue_p = (e != NULL);
2045 else
2046 epilogue_p = 1;
2048 /* Set the local, input, and output register names. We need to do this
2049 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
2050 half. If we use in/loc/out register names, then we get assembler errors
2051 in crtn.S because there is no alloc insn or regstk directive in there. */
2052 if (! TARGET_REG_NAMES)
2054 int inputs = current_frame_info.n_input_regs;
2055 int locals = current_frame_info.n_local_regs;
2056 int outputs = current_frame_info.n_output_regs;
2058 for (i = 0; i < inputs; i++)
2059 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
2060 for (i = 0; i < locals; i++)
2061 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
2062 for (i = 0; i < outputs; i++)
2063 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
2066 /* Set the frame pointer register name. The regnum is logically loc79,
2067 but of course we'll not have allocated that many locals. Rather than
2068 worrying about renumbering the existing rtxs, we adjust the name. */
2069 /* ??? This code means that we can never use one local register when
2070 there is a frame pointer. loc79 gets wasted in this case, as it is
2071 renamed to a register that will never be used. See also the try_locals
2072 code in find_gr_spill. */
2073 if (current_frame_info.reg_fp)
2075 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
2076 reg_names[HARD_FRAME_POINTER_REGNUM]
2077 = reg_names[current_frame_info.reg_fp];
2078 reg_names[current_frame_info.reg_fp] = tmp;
2081 /* We don't need an alloc instruction if we've used no outputs or locals. */
2082 if (current_frame_info.n_local_regs == 0
2083 && current_frame_info.n_output_regs == 0
2084 && current_frame_info.n_input_regs <= current_function_args_info.int_regs
2085 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
2087 /* If there is no alloc, but there are input registers used, then we
2088 need a .regstk directive. */
2089 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
2090 ar_pfs_save_reg = NULL_RTX;
2092 else
2094 current_frame_info.need_regstk = 0;
2096 if (current_frame_info.reg_save_ar_pfs)
2097 regno = current_frame_info.reg_save_ar_pfs;
2098 else
2099 regno = next_scratch_gr_reg ();
2100 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
2102 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
2103 GEN_INT (current_frame_info.n_input_regs),
2104 GEN_INT (current_frame_info.n_local_regs),
2105 GEN_INT (current_frame_info.n_output_regs),
2106 GEN_INT (current_frame_info.n_rotate_regs)));
2107 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_pfs != 0);
2110 /* Set up frame pointer, stack pointer, and spill iterators. */
2112 n_varargs = cfun->machine->n_varargs;
2113 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
2114 stack_pointer_rtx, 0);
2116 if (frame_pointer_needed)
2118 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
2119 RTX_FRAME_RELATED_P (insn) = 1;
2122 if (current_frame_info.total_size != 0)
2124 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
2125 rtx offset;
2127 if (CONST_OK_FOR_I (- current_frame_info.total_size))
2128 offset = frame_size_rtx;
2129 else
2131 regno = next_scratch_gr_reg ();
2132 offset = gen_rtx_REG (DImode, regno);
2133 emit_move_insn (offset, frame_size_rtx);
2136 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
2137 stack_pointer_rtx, offset));
2139 if (! frame_pointer_needed)
2141 RTX_FRAME_RELATED_P (insn) = 1;
2142 if (GET_CODE (offset) != CONST_INT)
2144 REG_NOTES (insn)
2145 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2146 gen_rtx_SET (VOIDmode,
2147 stack_pointer_rtx,
2148 gen_rtx_PLUS (DImode,
2149 stack_pointer_rtx,
2150 frame_size_rtx)),
2151 REG_NOTES (insn));
2155 /* ??? At this point we must generate a magic insn that appears to
2156 modify the stack pointer, the frame pointer, and all spill
2157 iterators. This would allow the most scheduling freedom. For
2158 now, just hard stop. */
2159 emit_insn (gen_blockage ());
2162 /* Must copy out ar.unat before doing any integer spills. */
2163 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2165 if (current_frame_info.reg_save_ar_unat)
2166 ar_unat_save_reg
2167 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
2168 else
2170 alt_regno = next_scratch_gr_reg ();
2171 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
2172 current_frame_info.gr_used_mask |= 1 << alt_regno;
2175 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2176 insn = emit_move_insn (ar_unat_save_reg, reg);
2177 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_unat != 0);
2179 /* Even if we're not going to generate an epilogue, we still
2180 need to save the register so that EH works. */
2181 if (! epilogue_p && current_frame_info.reg_save_ar_unat)
2182 emit_insn (gen_prologue_use (ar_unat_save_reg));
2184 else
2185 ar_unat_save_reg = NULL_RTX;
2187 /* Spill all varargs registers. Do this before spilling any GR registers,
2188 since we want the UNAT bits for the GR registers to override the UNAT
2189 bits from varargs, which we don't care about. */
2191 cfa_off = -16;
2192 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
2194 reg = gen_rtx_REG (DImode, regno);
2195 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
2198 /* Locate the bottom of the register save area. */
2199 cfa_off = (current_frame_info.spill_cfa_off
2200 + current_frame_info.spill_size
2201 + current_frame_info.extra_spill_size);
2203 /* Save the predicate register block either in a register or in memory. */
2204 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
2206 reg = gen_rtx_REG (DImode, PR_REG (0));
2207 if (current_frame_info.reg_save_pr != 0)
2209 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
2210 insn = emit_move_insn (alt_reg, reg);
2212 /* ??? Denote pr spill/fill by a DImode move that modifies all
2213 64 hard registers. */
2214 RTX_FRAME_RELATED_P (insn) = 1;
2215 REG_NOTES (insn)
2216 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2217 gen_rtx_SET (VOIDmode, alt_reg, reg),
2218 REG_NOTES (insn));
2220 /* Even if we're not going to generate an epilogue, we still
2221 need to save the register so that EH works. */
2222 if (! epilogue_p)
2223 emit_insn (gen_prologue_use (alt_reg));
2225 else
2227 alt_regno = next_scratch_gr_reg ();
2228 alt_reg = gen_rtx_REG (DImode, alt_regno);
2229 insn = emit_move_insn (alt_reg, reg);
2230 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2231 cfa_off -= 8;
2235 /* Handle AR regs in numerical order. All of them get special handling. */
2236 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
2237 && current_frame_info.reg_save_ar_unat == 0)
2239 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2240 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
2241 cfa_off -= 8;
2244 /* The alloc insn already copied ar.pfs into a general register. The
2245 only thing we have to do now is copy that register to a stack slot
2246 if we'd not allocated a local register for the job. */
2247 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
2248 && current_frame_info.reg_save_ar_pfs == 0)
2250 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2251 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
2252 cfa_off -= 8;
2255 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
2257 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
2258 if (current_frame_info.reg_save_ar_lc != 0)
2260 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
2261 insn = emit_move_insn (alt_reg, reg);
2262 RTX_FRAME_RELATED_P (insn) = 1;
2264 /* Even if we're not going to generate an epilogue, we still
2265 need to save the register so that EH works. */
2266 if (! epilogue_p)
2267 emit_insn (gen_prologue_use (alt_reg));
2269 else
2271 alt_regno = next_scratch_gr_reg ();
2272 alt_reg = gen_rtx_REG (DImode, alt_regno);
2273 emit_move_insn (alt_reg, reg);
2274 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2275 cfa_off -= 8;
2279 if (current_frame_info.reg_save_gp)
2281 insn = emit_move_insn (gen_rtx_REG (DImode,
2282 current_frame_info.reg_save_gp),
2283 pic_offset_table_rtx);
2284 /* We don't know for sure yet if this is actually needed, since
2285 we've not split the PIC call patterns. If all of the calls
2286 are indirect, and not followed by any uses of the gp, then
2287 this save is dead. Allow it to go away. */
2288 REG_NOTES (insn)
2289 = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, REG_NOTES (insn));
2292 /* We should now be at the base of the gr/br/fr spill area. */
2293 if (cfa_off != (current_frame_info.spill_cfa_off
2294 + current_frame_info.spill_size))
2295 abort ();
2297 /* Spill all general registers. */
2298 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
2299 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2301 reg = gen_rtx_REG (DImode, regno);
2302 do_spill (gen_gr_spill, reg, cfa_off, reg);
2303 cfa_off -= 8;
2306 /* Handle BR0 specially -- it may be getting stored permanently in
2307 some GR register. */
2308 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
2310 reg = gen_rtx_REG (DImode, BR_REG (0));
2311 if (current_frame_info.reg_save_b0 != 0)
2313 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
2314 insn = emit_move_insn (alt_reg, reg);
2315 RTX_FRAME_RELATED_P (insn) = 1;
2317 /* Even if we're not going to generate an epilogue, we still
2318 need to save the register so that EH works. */
2319 if (! epilogue_p)
2320 emit_insn (gen_prologue_use (alt_reg));
2322 else
2324 alt_regno = next_scratch_gr_reg ();
2325 alt_reg = gen_rtx_REG (DImode, alt_regno);
2326 emit_move_insn (alt_reg, reg);
2327 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2328 cfa_off -= 8;
2332 /* Spill the rest of the BR registers. */
2333 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
2334 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2336 alt_regno = next_scratch_gr_reg ();
2337 alt_reg = gen_rtx_REG (DImode, alt_regno);
2338 reg = gen_rtx_REG (DImode, regno);
2339 emit_move_insn (alt_reg, reg);
2340 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2341 cfa_off -= 8;
2344 /* Align the frame and spill all FR registers. */
2345 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
2346 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2348 if (cfa_off & 15)
2349 abort ();
2350 reg = gen_rtx_REG (XFmode, regno);
2351 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
2352 cfa_off -= 16;
2355 if (cfa_off != current_frame_info.spill_cfa_off)
2356 abort ();
2358 finish_spill_pointers ();
2361 /* Called after register allocation to add any instructions needed for the
2362 epilogue. Using an epilogue insn is favored compared to putting all of the
2363 instructions in output_function_prologue(), since it allows the scheduler
2364 to intermix instructions with the saves of the caller saved registers. In
2365 some cases, it might be necessary to emit a barrier instruction as the last
2366 insn to prevent such scheduling. */
2368 void
2369 ia64_expand_epilogue (int sibcall_p)
2371 rtx insn, reg, alt_reg, ar_unat_save_reg;
2372 int regno, alt_regno, cfa_off;
2374 ia64_compute_frame_size (get_frame_size ());
2376 /* If there is a frame pointer, then we use it instead of the stack
2377 pointer, so that the stack pointer does not need to be valid when
2378 the epilogue starts. See EXIT_IGNORE_STACK. */
2379 if (frame_pointer_needed)
2380 setup_spill_pointers (current_frame_info.n_spilled,
2381 hard_frame_pointer_rtx, 0);
2382 else
2383 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
2384 current_frame_info.total_size);
2386 if (current_frame_info.total_size != 0)
2388 /* ??? At this point we must generate a magic insn that appears to
2389 modify the spill iterators and the frame pointer. This would
2390 allow the most scheduling freedom. For now, just hard stop. */
2391 emit_insn (gen_blockage ());
2394 /* Locate the bottom of the register save area. */
2395 cfa_off = (current_frame_info.spill_cfa_off
2396 + current_frame_info.spill_size
2397 + current_frame_info.extra_spill_size);
2399 /* Restore the predicate registers. */
2400 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
2402 if (current_frame_info.reg_save_pr != 0)
2403 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
2404 else
2406 alt_regno = next_scratch_gr_reg ();
2407 alt_reg = gen_rtx_REG (DImode, alt_regno);
2408 do_restore (gen_movdi_x, alt_reg, cfa_off);
2409 cfa_off -= 8;
2411 reg = gen_rtx_REG (DImode, PR_REG (0));
2412 emit_move_insn (reg, alt_reg);
2415 /* Restore the application registers. */
2417 /* Load the saved unat from the stack, but do not restore it until
2418 after the GRs have been restored. */
2419 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2421 if (current_frame_info.reg_save_ar_unat != 0)
2422 ar_unat_save_reg
2423 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
2424 else
2426 alt_regno = next_scratch_gr_reg ();
2427 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
2428 current_frame_info.gr_used_mask |= 1 << alt_regno;
2429 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
2430 cfa_off -= 8;
2433 else
2434 ar_unat_save_reg = NULL_RTX;
2436 if (current_frame_info.reg_save_ar_pfs != 0)
2438 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_pfs);
2439 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2440 emit_move_insn (reg, alt_reg);
2442 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
2444 alt_regno = next_scratch_gr_reg ();
2445 alt_reg = gen_rtx_REG (DImode, alt_regno);
2446 do_restore (gen_movdi_x, alt_reg, cfa_off);
2447 cfa_off -= 8;
2448 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2449 emit_move_insn (reg, alt_reg);
2452 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
2454 if (current_frame_info.reg_save_ar_lc != 0)
2455 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
2456 else
2458 alt_regno = next_scratch_gr_reg ();
2459 alt_reg = gen_rtx_REG (DImode, alt_regno);
2460 do_restore (gen_movdi_x, alt_reg, cfa_off);
2461 cfa_off -= 8;
2463 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
2464 emit_move_insn (reg, alt_reg);
2467 /* We should now be at the base of the gr/br/fr spill area. */
2468 if (cfa_off != (current_frame_info.spill_cfa_off
2469 + current_frame_info.spill_size))
2470 abort ();
2472 /* The GP may be stored on the stack in the prologue, but it's
2473 never restored in the epilogue. Skip the stack slot. */
2474 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
2475 cfa_off -= 8;
2477 /* Restore all general registers. */
2478 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
2479 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2481 reg = gen_rtx_REG (DImode, regno);
2482 do_restore (gen_gr_restore, reg, cfa_off);
2483 cfa_off -= 8;
2486 /* Restore the branch registers. Handle B0 specially, as it may
2487 have gotten stored in some GR register. */
2488 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
2490 if (current_frame_info.reg_save_b0 != 0)
2491 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
2492 else
2494 alt_regno = next_scratch_gr_reg ();
2495 alt_reg = gen_rtx_REG (DImode, alt_regno);
2496 do_restore (gen_movdi_x, alt_reg, cfa_off);
2497 cfa_off -= 8;
2499 reg = gen_rtx_REG (DImode, BR_REG (0));
2500 emit_move_insn (reg, alt_reg);
2503 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
2504 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2506 alt_regno = next_scratch_gr_reg ();
2507 alt_reg = gen_rtx_REG (DImode, alt_regno);
2508 do_restore (gen_movdi_x, alt_reg, cfa_off);
2509 cfa_off -= 8;
2510 reg = gen_rtx_REG (DImode, regno);
2511 emit_move_insn (reg, alt_reg);
2514 /* Restore floating point registers. */
2515 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
2516 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2518 if (cfa_off & 15)
2519 abort ();
2520 reg = gen_rtx_REG (XFmode, regno);
2521 do_restore (gen_fr_restore_x, reg, cfa_off);
2522 cfa_off -= 16;
2525 /* Restore ar.unat for real. */
2526 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2528 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2529 emit_move_insn (reg, ar_unat_save_reg);
2532 if (cfa_off != current_frame_info.spill_cfa_off)
2533 abort ();
2535 finish_spill_pointers ();
2537 if (current_frame_info.total_size || cfun->machine->ia64_eh_epilogue_sp)
2539 /* ??? At this point we must generate a magic insn that appears to
2540 modify the spill iterators, the stack pointer, and the frame
2541 pointer. This would allow the most scheduling freedom. For now,
2542 just hard stop. */
2543 emit_insn (gen_blockage ());
2546 if (cfun->machine->ia64_eh_epilogue_sp)
2547 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
2548 else if (frame_pointer_needed)
2550 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
2551 RTX_FRAME_RELATED_P (insn) = 1;
2553 else if (current_frame_info.total_size)
2555 rtx offset, frame_size_rtx;
2557 frame_size_rtx = GEN_INT (current_frame_info.total_size);
2558 if (CONST_OK_FOR_I (current_frame_info.total_size))
2559 offset = frame_size_rtx;
2560 else
2562 regno = next_scratch_gr_reg ();
2563 offset = gen_rtx_REG (DImode, regno);
2564 emit_move_insn (offset, frame_size_rtx);
2567 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
2568 offset));
2570 RTX_FRAME_RELATED_P (insn) = 1;
2571 if (GET_CODE (offset) != CONST_INT)
2573 REG_NOTES (insn)
2574 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2575 gen_rtx_SET (VOIDmode,
2576 stack_pointer_rtx,
2577 gen_rtx_PLUS (DImode,
2578 stack_pointer_rtx,
2579 frame_size_rtx)),
2580 REG_NOTES (insn));
2584 if (cfun->machine->ia64_eh_epilogue_bsp)
2585 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
2587 if (! sibcall_p)
2588 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
2589 else
2591 int fp = GR_REG (2);
2592 /* We need a throw away register here, r0 and r1 are reserved, so r2 is the
2593 first available call clobbered register. If there was a frame_pointer
2594 register, we may have swapped the names of r2 and HARD_FRAME_POINTER_REGNUM,
2595 so we have to make sure we're using the string "r2" when emitting
2596 the register name for the assembler. */
2597 if (current_frame_info.reg_fp && current_frame_info.reg_fp == GR_REG (2))
2598 fp = HARD_FRAME_POINTER_REGNUM;
2600 /* We must emit an alloc to force the input registers to become output
2601 registers. Otherwise, if the callee tries to pass its parameters
2602 through to another call without an intervening alloc, then these
2603 values get lost. */
2604 /* ??? We don't need to preserve all input registers. We only need to
2605 preserve those input registers used as arguments to the sibling call.
2606 It is unclear how to compute that number here. */
2607 if (current_frame_info.n_input_regs != 0)
2608 emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
2609 const0_rtx, const0_rtx,
2610 GEN_INT (current_frame_info.n_input_regs),
2611 const0_rtx));
2615 /* Return 1 if br.ret can do all the work required to return from a
2616 function. */
2619 ia64_direct_return (void)
2621 if (reload_completed && ! frame_pointer_needed)
2623 ia64_compute_frame_size (get_frame_size ());
2625 return (current_frame_info.total_size == 0
2626 && current_frame_info.n_spilled == 0
2627 && current_frame_info.reg_save_b0 == 0
2628 && current_frame_info.reg_save_pr == 0
2629 && current_frame_info.reg_save_ar_pfs == 0
2630 && current_frame_info.reg_save_ar_unat == 0
2631 && current_frame_info.reg_save_ar_lc == 0);
2633 return 0;
2636 /* Return the magic cookie that we use to hold the return address
2637 during early compilation. */
2640 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
2642 if (count != 0)
2643 return NULL;
2644 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
2647 /* Split this value after reload, now that we know where the return
2648 address is saved. */
2650 void
2651 ia64_split_return_addr_rtx (rtx dest)
2653 rtx src;
2655 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
2657 if (current_frame_info.reg_save_b0 != 0)
2658 src = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
2659 else
2661 HOST_WIDE_INT off;
2662 unsigned int regno;
2664 /* Compute offset from CFA for BR0. */
2665 /* ??? Must be kept in sync with ia64_expand_prologue. */
2666 off = (current_frame_info.spill_cfa_off
2667 + current_frame_info.spill_size);
2668 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
2669 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2670 off -= 8;
2672 /* Convert CFA offset to a register based offset. */
2673 if (frame_pointer_needed)
2674 src = hard_frame_pointer_rtx;
2675 else
2677 src = stack_pointer_rtx;
2678 off += current_frame_info.total_size;
2681 /* Load address into scratch register. */
2682 if (CONST_OK_FOR_I (off))
2683 emit_insn (gen_adddi3 (dest, src, GEN_INT (off)));
2684 else
2686 emit_move_insn (dest, GEN_INT (off));
2687 emit_insn (gen_adddi3 (dest, src, dest));
2690 src = gen_rtx_MEM (Pmode, dest);
2693 else
2694 src = gen_rtx_REG (DImode, BR_REG (0));
2696 emit_move_insn (dest, src);
2700 ia64_hard_regno_rename_ok (int from, int to)
2702 /* Don't clobber any of the registers we reserved for the prologue. */
2703 if (to == current_frame_info.reg_fp
2704 || to == current_frame_info.reg_save_b0
2705 || to == current_frame_info.reg_save_pr
2706 || to == current_frame_info.reg_save_ar_pfs
2707 || to == current_frame_info.reg_save_ar_unat
2708 || to == current_frame_info.reg_save_ar_lc)
2709 return 0;
2711 if (from == current_frame_info.reg_fp
2712 || from == current_frame_info.reg_save_b0
2713 || from == current_frame_info.reg_save_pr
2714 || from == current_frame_info.reg_save_ar_pfs
2715 || from == current_frame_info.reg_save_ar_unat
2716 || from == current_frame_info.reg_save_ar_lc)
2717 return 0;
2719 /* Don't use output registers outside the register frame. */
2720 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
2721 return 0;
2723 /* Retain even/oddness on predicate register pairs. */
2724 if (PR_REGNO_P (from) && PR_REGNO_P (to))
2725 return (from & 1) == (to & 1);
2727 return 1;
2730 /* Target hook for assembling integer objects. Handle word-sized
2731 aligned objects and detect the cases when @fptr is needed. */
2733 static bool
2734 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
2736 if (size == POINTER_SIZE / BITS_PER_UNIT
2737 && aligned_p
2738 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
2739 && GET_CODE (x) == SYMBOL_REF
2740 && SYMBOL_REF_FUNCTION_P (x))
2742 if (POINTER_SIZE == 32)
2743 fputs ("\tdata4\t@fptr(", asm_out_file);
2744 else
2745 fputs ("\tdata8\t@fptr(", asm_out_file);
2746 output_addr_const (asm_out_file, x);
2747 fputs (")\n", asm_out_file);
2748 return true;
2750 return default_assemble_integer (x, size, aligned_p);
2753 /* Emit the function prologue. */
2755 static void
2756 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
2758 int mask, grsave, grsave_prev;
2760 if (current_frame_info.need_regstk)
2761 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
2762 current_frame_info.n_input_regs,
2763 current_frame_info.n_local_regs,
2764 current_frame_info.n_output_regs,
2765 current_frame_info.n_rotate_regs);
2767 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
2768 return;
2770 /* Emit the .prologue directive. */
2772 mask = 0;
2773 grsave = grsave_prev = 0;
2774 if (current_frame_info.reg_save_b0 != 0)
2776 mask |= 8;
2777 grsave = grsave_prev = current_frame_info.reg_save_b0;
2779 if (current_frame_info.reg_save_ar_pfs != 0
2780 && (grsave_prev == 0
2781 || current_frame_info.reg_save_ar_pfs == grsave_prev + 1))
2783 mask |= 4;
2784 if (grsave_prev == 0)
2785 grsave = current_frame_info.reg_save_ar_pfs;
2786 grsave_prev = current_frame_info.reg_save_ar_pfs;
2788 if (current_frame_info.reg_fp != 0
2789 && (grsave_prev == 0
2790 || current_frame_info.reg_fp == grsave_prev + 1))
2792 mask |= 2;
2793 if (grsave_prev == 0)
2794 grsave = HARD_FRAME_POINTER_REGNUM;
2795 grsave_prev = current_frame_info.reg_fp;
2797 if (current_frame_info.reg_save_pr != 0
2798 && (grsave_prev == 0
2799 || current_frame_info.reg_save_pr == grsave_prev + 1))
2801 mask |= 1;
2802 if (grsave_prev == 0)
2803 grsave = current_frame_info.reg_save_pr;
2806 if (mask && TARGET_GNU_AS)
2807 fprintf (file, "\t.prologue %d, %d\n", mask,
2808 ia64_dbx_register_number (grsave));
2809 else
2810 fputs ("\t.prologue\n", file);
2812 /* Emit a .spill directive, if necessary, to relocate the base of
2813 the register spill area. */
2814 if (current_frame_info.spill_cfa_off != -16)
2815 fprintf (file, "\t.spill %ld\n",
2816 (long) (current_frame_info.spill_cfa_off
2817 + current_frame_info.spill_size));
2820 /* Emit the .body directive at the scheduled end of the prologue. */
2822 static void
2823 ia64_output_function_end_prologue (FILE *file)
2825 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
2826 return;
2828 fputs ("\t.body\n", file);
2831 /* Emit the function epilogue. */
2833 static void
2834 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
2835 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
2837 int i;
2839 if (current_frame_info.reg_fp)
2841 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
2842 reg_names[HARD_FRAME_POINTER_REGNUM]
2843 = reg_names[current_frame_info.reg_fp];
2844 reg_names[current_frame_info.reg_fp] = tmp;
2846 if (! TARGET_REG_NAMES)
2848 for (i = 0; i < current_frame_info.n_input_regs; i++)
2849 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
2850 for (i = 0; i < current_frame_info.n_local_regs; i++)
2851 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
2852 for (i = 0; i < current_frame_info.n_output_regs; i++)
2853 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
2856 current_frame_info.initialized = 0;
2860 ia64_dbx_register_number (int regno)
2862 /* In ia64_expand_prologue we quite literally renamed the frame pointer
2863 from its home at loc79 to something inside the register frame. We
2864 must perform the same renumbering here for the debug info. */
2865 if (current_frame_info.reg_fp)
2867 if (regno == HARD_FRAME_POINTER_REGNUM)
2868 regno = current_frame_info.reg_fp;
2869 else if (regno == current_frame_info.reg_fp)
2870 regno = HARD_FRAME_POINTER_REGNUM;
2873 if (IN_REGNO_P (regno))
2874 return 32 + regno - IN_REG (0);
2875 else if (LOC_REGNO_P (regno))
2876 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
2877 else if (OUT_REGNO_P (regno))
2878 return (32 + current_frame_info.n_input_regs
2879 + current_frame_info.n_local_regs + regno - OUT_REG (0));
2880 else
2881 return regno;
2884 void
2885 ia64_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
2887 rtx addr_reg, eight = GEN_INT (8);
2889 /* The Intel assembler requires that the global __ia64_trampoline symbol
2890 be declared explicitly */
2891 if (!TARGET_GNU_AS)
2893 static bool declared_ia64_trampoline = false;
2895 if (!declared_ia64_trampoline)
2897 declared_ia64_trampoline = true;
2898 (*targetm.asm_out.globalize_label) (asm_out_file,
2899 "__ia64_trampoline");
2903 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
2904 addr = convert_memory_address (Pmode, addr);
2905 fnaddr = convert_memory_address (Pmode, fnaddr);
2906 static_chain = convert_memory_address (Pmode, static_chain);
2908 /* Load up our iterator. */
2909 addr_reg = gen_reg_rtx (Pmode);
2910 emit_move_insn (addr_reg, addr);
2912 /* The first two words are the fake descriptor:
2913 __ia64_trampoline, ADDR+16. */
2914 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
2915 gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline"));
2916 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
2918 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
2919 copy_to_reg (plus_constant (addr, 16)));
2920 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
2922 /* The third word is the target descriptor. */
2923 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), fnaddr);
2924 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
2926 /* The fourth word is the static chain. */
2927 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), static_chain);
2930 /* Do any needed setup for a variadic function. CUM has not been updated
2931 for the last named argument which has type TYPE and mode MODE.
2933 We generate the actual spill instructions during prologue generation. */
2935 static void
2936 ia64_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
2937 tree type, int * pretend_size,
2938 int second_time ATTRIBUTE_UNUSED)
2940 CUMULATIVE_ARGS next_cum = *cum;
2942 /* Skip the current argument. */
2943 ia64_function_arg_advance (&next_cum, mode, type, 1);
2945 if (next_cum.words < MAX_ARGUMENT_SLOTS)
2947 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
2948 *pretend_size = n * UNITS_PER_WORD;
2949 cfun->machine->n_varargs = n;
2953 /* Check whether TYPE is a homogeneous floating point aggregate. If
2954 it is, return the mode of the floating point type that appears
2955 in all leafs. If it is not, return VOIDmode.
2957 An aggregate is a homogeneous floating point aggregate is if all
2958 fields/elements in it have the same floating point type (e.g,
2959 SFmode). 128-bit quad-precision floats are excluded. */
2961 static enum machine_mode
2962 hfa_element_mode (tree type, int nested)
2964 enum machine_mode element_mode = VOIDmode;
2965 enum machine_mode mode;
2966 enum tree_code code = TREE_CODE (type);
2967 int know_element_mode = 0;
2968 tree t;
2970 switch (code)
2972 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
2973 case BOOLEAN_TYPE: case CHAR_TYPE: case POINTER_TYPE:
2974 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
2975 case FILE_TYPE: case SET_TYPE: case LANG_TYPE:
2976 case FUNCTION_TYPE:
2977 return VOIDmode;
2979 /* Fortran complex types are supposed to be HFAs, so we need to handle
2980 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
2981 types though. */
2982 case COMPLEX_TYPE:
2983 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
2984 && TYPE_MODE (type) != TCmode)
2985 return GET_MODE_INNER (TYPE_MODE (type));
2986 else
2987 return VOIDmode;
2989 case REAL_TYPE:
2990 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
2991 mode if this is contained within an aggregate. */
2992 if (nested && TYPE_MODE (type) != TFmode)
2993 return TYPE_MODE (type);
2994 else
2995 return VOIDmode;
2997 case ARRAY_TYPE:
2998 return hfa_element_mode (TREE_TYPE (type), 1);
3000 case RECORD_TYPE:
3001 case UNION_TYPE:
3002 case QUAL_UNION_TYPE:
3003 for (t = TYPE_FIELDS (type); t; t = TREE_CHAIN (t))
3005 if (TREE_CODE (t) != FIELD_DECL)
3006 continue;
3008 mode = hfa_element_mode (TREE_TYPE (t), 1);
3009 if (know_element_mode)
3011 if (mode != element_mode)
3012 return VOIDmode;
3014 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
3015 return VOIDmode;
3016 else
3018 know_element_mode = 1;
3019 element_mode = mode;
3022 return element_mode;
3024 default:
3025 /* If we reach here, we probably have some front-end specific type
3026 that the backend doesn't know about. This can happen via the
3027 aggregate_value_p call in init_function_start. All we can do is
3028 ignore unknown tree types. */
3029 return VOIDmode;
3032 return VOIDmode;
3035 /* Return the number of words required to hold a quantity of TYPE and MODE
3036 when passed as an argument. */
3037 static int
3038 ia64_function_arg_words (tree type, enum machine_mode mode)
3040 int words;
3042 if (mode == BLKmode)
3043 words = int_size_in_bytes (type);
3044 else
3045 words = GET_MODE_SIZE (mode);
3047 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
3050 /* Return the number of registers that should be skipped so the current
3051 argument (described by TYPE and WORDS) will be properly aligned.
3053 Integer and float arguments larger than 8 bytes start at the next
3054 even boundary. Aggregates larger than 8 bytes start at the next
3055 even boundary if the aggregate has 16 byte alignment. Note that
3056 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
3057 but are still to be aligned in registers.
3059 ??? The ABI does not specify how to handle aggregates with
3060 alignment from 9 to 15 bytes, or greater than 16. We handle them
3061 all as if they had 16 byte alignment. Such aggregates can occur
3062 only if gcc extensions are used. */
3063 static int
3064 ia64_function_arg_offset (CUMULATIVE_ARGS *cum, tree type, int words)
3066 if ((cum->words & 1) == 0)
3067 return 0;
3069 if (type
3070 && TREE_CODE (type) != INTEGER_TYPE
3071 && TREE_CODE (type) != REAL_TYPE)
3072 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
3073 else
3074 return words > 1;
3077 /* Return rtx for register where argument is passed, or zero if it is passed
3078 on the stack. */
3079 /* ??? 128-bit quad-precision floats are always passed in general
3080 registers. */
3083 ia64_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
3084 int named, int incoming)
3086 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
3087 int words = ia64_function_arg_words (type, mode);
3088 int offset = ia64_function_arg_offset (cum, type, words);
3089 enum machine_mode hfa_mode = VOIDmode;
3091 /* If all argument slots are used, then it must go on the stack. */
3092 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
3093 return 0;
3095 /* Check for and handle homogeneous FP aggregates. */
3096 if (type)
3097 hfa_mode = hfa_element_mode (type, 0);
3099 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
3100 and unprototyped hfas are passed specially. */
3101 if (hfa_mode != VOIDmode && (! cum->prototype || named))
3103 rtx loc[16];
3104 int i = 0;
3105 int fp_regs = cum->fp_regs;
3106 int int_regs = cum->words + offset;
3107 int hfa_size = GET_MODE_SIZE (hfa_mode);
3108 int byte_size;
3109 int args_byte_size;
3111 /* If prototyped, pass it in FR regs then GR regs.
3112 If not prototyped, pass it in both FR and GR regs.
3114 If this is an SFmode aggregate, then it is possible to run out of
3115 FR regs while GR regs are still left. In that case, we pass the
3116 remaining part in the GR regs. */
3118 /* Fill the FP regs. We do this always. We stop if we reach the end
3119 of the argument, the last FP register, or the last argument slot. */
3121 byte_size = ((mode == BLKmode)
3122 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3123 args_byte_size = int_regs * UNITS_PER_WORD;
3124 offset = 0;
3125 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
3126 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
3128 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3129 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
3130 + fp_regs)),
3131 GEN_INT (offset));
3132 offset += hfa_size;
3133 args_byte_size += hfa_size;
3134 fp_regs++;
3137 /* If no prototype, then the whole thing must go in GR regs. */
3138 if (! cum->prototype)
3139 offset = 0;
3140 /* If this is an SFmode aggregate, then we might have some left over
3141 that needs to go in GR regs. */
3142 else if (byte_size != offset)
3143 int_regs += offset / UNITS_PER_WORD;
3145 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
3147 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
3149 enum machine_mode gr_mode = DImode;
3150 unsigned int gr_size;
3152 /* If we have an odd 4 byte hunk because we ran out of FR regs,
3153 then this goes in a GR reg left adjusted/little endian, right
3154 adjusted/big endian. */
3155 /* ??? Currently this is handled wrong, because 4-byte hunks are
3156 always right adjusted/little endian. */
3157 if (offset & 0x4)
3158 gr_mode = SImode;
3159 /* If we have an even 4 byte hunk because the aggregate is a
3160 multiple of 4 bytes in size, then this goes in a GR reg right
3161 adjusted/little endian. */
3162 else if (byte_size - offset == 4)
3163 gr_mode = SImode;
3165 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3166 gen_rtx_REG (gr_mode, (basereg
3167 + int_regs)),
3168 GEN_INT (offset));
3170 gr_size = GET_MODE_SIZE (gr_mode);
3171 offset += gr_size;
3172 if (gr_size == UNITS_PER_WORD
3173 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
3174 int_regs++;
3175 else if (gr_size > UNITS_PER_WORD)
3176 int_regs += gr_size / UNITS_PER_WORD;
3178 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
3181 /* Integral and aggregates go in general registers. If we have run out of
3182 FR registers, then FP values must also go in general registers. This can
3183 happen when we have a SFmode HFA. */
3184 else if (mode == TFmode || mode == TCmode
3185 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
3187 int byte_size = ((mode == BLKmode)
3188 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3189 if (BYTES_BIG_ENDIAN
3190 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
3191 && byte_size < UNITS_PER_WORD
3192 && byte_size > 0)
3194 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
3195 gen_rtx_REG (DImode,
3196 (basereg + cum->words
3197 + offset)),
3198 const0_rtx);
3199 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
3201 else
3202 return gen_rtx_REG (mode, basereg + cum->words + offset);
3206 /* If there is a prototype, then FP values go in a FR register when
3207 named, and in a GR register when unnamed. */
3208 else if (cum->prototype)
3210 if (named)
3211 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
3212 /* In big-endian mode, an anonymous SFmode value must be represented
3213 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
3214 the value into the high half of the general register. */
3215 else if (BYTES_BIG_ENDIAN && mode == SFmode)
3216 return gen_rtx_PARALLEL (mode,
3217 gen_rtvec (1,
3218 gen_rtx_EXPR_LIST (VOIDmode,
3219 gen_rtx_REG (DImode, basereg + cum->words + offset),
3220 const0_rtx)));
3221 else
3222 return gen_rtx_REG (mode, basereg + cum->words + offset);
3224 /* If there is no prototype, then FP values go in both FR and GR
3225 registers. */
3226 else
3228 /* See comment above. */
3229 enum machine_mode inner_mode =
3230 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
3232 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
3233 gen_rtx_REG (mode, (FR_ARG_FIRST
3234 + cum->fp_regs)),
3235 const0_rtx);
3236 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
3237 gen_rtx_REG (inner_mode,
3238 (basereg + cum->words
3239 + offset)),
3240 const0_rtx);
3242 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
3246 /* Return number of words, at the beginning of the argument, that must be
3247 put in registers. 0 is the argument is entirely in registers or entirely
3248 in memory. */
3251 ia64_function_arg_partial_nregs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3252 tree type, int named ATTRIBUTE_UNUSED)
3254 int words = ia64_function_arg_words (type, mode);
3255 int offset = ia64_function_arg_offset (cum, type, words);
3257 /* If all argument slots are used, then it must go on the stack. */
3258 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
3259 return 0;
3261 /* It doesn't matter whether the argument goes in FR or GR regs. If
3262 it fits within the 8 argument slots, then it goes entirely in
3263 registers. If it extends past the last argument slot, then the rest
3264 goes on the stack. */
3266 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
3267 return 0;
3269 return MAX_ARGUMENT_SLOTS - cum->words - offset;
3272 /* Update CUM to point after this argument. This is patterned after
3273 ia64_function_arg. */
3275 void
3276 ia64_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3277 tree type, int named)
3279 int words = ia64_function_arg_words (type, mode);
3280 int offset = ia64_function_arg_offset (cum, type, words);
3281 enum machine_mode hfa_mode = VOIDmode;
3283 /* If all arg slots are already full, then there is nothing to do. */
3284 if (cum->words >= MAX_ARGUMENT_SLOTS)
3285 return;
3287 cum->words += words + offset;
3289 /* Check for and handle homogeneous FP aggregates. */
3290 if (type)
3291 hfa_mode = hfa_element_mode (type, 0);
3293 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
3294 and unprototyped hfas are passed specially. */
3295 if (hfa_mode != VOIDmode && (! cum->prototype || named))
3297 int fp_regs = cum->fp_regs;
3298 /* This is the original value of cum->words + offset. */
3299 int int_regs = cum->words - words;
3300 int hfa_size = GET_MODE_SIZE (hfa_mode);
3301 int byte_size;
3302 int args_byte_size;
3304 /* If prototyped, pass it in FR regs then GR regs.
3305 If not prototyped, pass it in both FR and GR regs.
3307 If this is an SFmode aggregate, then it is possible to run out of
3308 FR regs while GR regs are still left. In that case, we pass the
3309 remaining part in the GR regs. */
3311 /* Fill the FP regs. We do this always. We stop if we reach the end
3312 of the argument, the last FP register, or the last argument slot. */
3314 byte_size = ((mode == BLKmode)
3315 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3316 args_byte_size = int_regs * UNITS_PER_WORD;
3317 offset = 0;
3318 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
3319 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
3321 offset += hfa_size;
3322 args_byte_size += hfa_size;
3323 fp_regs++;
3326 cum->fp_regs = fp_regs;
3329 /* Integral and aggregates go in general registers. If we have run out of
3330 FR registers, then FP values must also go in general registers. This can
3331 happen when we have a SFmode HFA. */
3332 else if (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS)
3333 cum->int_regs = cum->words;
3335 /* If there is a prototype, then FP values go in a FR register when
3336 named, and in a GR register when unnamed. */
3337 else if (cum->prototype)
3339 if (! named)
3340 cum->int_regs = cum->words;
3341 else
3342 /* ??? Complex types should not reach here. */
3343 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
3345 /* If there is no prototype, then FP values go in both FR and GR
3346 registers. */
3347 else
3349 /* ??? Complex types should not reach here. */
3350 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
3351 cum->int_regs = cum->words;
3355 /* Variable sized types are passed by reference. */
3356 /* ??? At present this is a GCC extension to the IA-64 ABI. */
3358 static bool
3359 ia64_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
3360 enum machine_mode mode ATTRIBUTE_UNUSED,
3361 tree type, bool named ATTRIBUTE_UNUSED)
3363 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
3366 /* True if it is OK to do sibling call optimization for the specified
3367 call expression EXP. DECL will be the called function, or NULL if
3368 this is an indirect call. */
3369 static bool
3370 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
3372 /* We can't perform a sibcall if the current function has the syscall_linkage
3373 attribute. */
3374 if (lookup_attribute ("syscall_linkage",
3375 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
3376 return false;
3378 /* We must always return with our current GP. This means we can
3379 only sibcall to functions defined in the current module. */
3380 return decl && (*targetm.binds_local_p) (decl);
3384 /* Implement va_arg. */
3386 static tree
3387 ia64_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
3389 /* Variable sized types are passed by reference. */
3390 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
3392 tree ptrtype = build_pointer_type (type);
3393 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
3394 return build_va_arg_indirect_ref (addr);
3397 /* Aggregate arguments with alignment larger than 8 bytes start at
3398 the next even boundary. Integer and floating point arguments
3399 do so if they are larger than 8 bytes, whether or not they are
3400 also aligned larger than 8 bytes. */
3401 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
3402 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
3404 tree t = build (PLUS_EXPR, TREE_TYPE (valist), valist,
3405 build_int_cst (NULL_TREE, 2 * UNITS_PER_WORD - 1));
3406 t = build (BIT_AND_EXPR, TREE_TYPE (t), t,
3407 build_int_cst (NULL_TREE, -2 * UNITS_PER_WORD));
3408 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
3409 gimplify_and_add (t, pre_p);
3412 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
3415 /* Return 1 if function return value returned in memory. Return 0 if it is
3416 in a register. */
3418 static bool
3419 ia64_return_in_memory (tree valtype, tree fntype ATTRIBUTE_UNUSED)
3421 enum machine_mode mode;
3422 enum machine_mode hfa_mode;
3423 HOST_WIDE_INT byte_size;
3425 mode = TYPE_MODE (valtype);
3426 byte_size = GET_MODE_SIZE (mode);
3427 if (mode == BLKmode)
3429 byte_size = int_size_in_bytes (valtype);
3430 if (byte_size < 0)
3431 return true;
3434 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
3436 hfa_mode = hfa_element_mode (valtype, 0);
3437 if (hfa_mode != VOIDmode)
3439 int hfa_size = GET_MODE_SIZE (hfa_mode);
3441 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
3442 return true;
3443 else
3444 return false;
3446 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
3447 return true;
3448 else
3449 return false;
3452 /* Return rtx for register that holds the function return value. */
3455 ia64_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
3457 enum machine_mode mode;
3458 enum machine_mode hfa_mode;
3460 mode = TYPE_MODE (valtype);
3461 hfa_mode = hfa_element_mode (valtype, 0);
3463 if (hfa_mode != VOIDmode)
3465 rtx loc[8];
3466 int i;
3467 int hfa_size;
3468 int byte_size;
3469 int offset;
3471 hfa_size = GET_MODE_SIZE (hfa_mode);
3472 byte_size = ((mode == BLKmode)
3473 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
3474 offset = 0;
3475 for (i = 0; offset < byte_size; i++)
3477 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3478 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
3479 GEN_INT (offset));
3480 offset += hfa_size;
3482 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
3484 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
3485 return gen_rtx_REG (mode, FR_ARG_FIRST);
3486 else
3488 if (BYTES_BIG_ENDIAN
3489 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
3491 rtx loc[8];
3492 int offset;
3493 int bytesize;
3494 int i;
3496 offset = 0;
3497 bytesize = int_size_in_bytes (valtype);
3498 for (i = 0; offset < bytesize; i++)
3500 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3501 gen_rtx_REG (DImode,
3502 GR_RET_FIRST + i),
3503 GEN_INT (offset));
3504 offset += UNITS_PER_WORD;
3506 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
3508 else
3509 return gen_rtx_REG (mode, GR_RET_FIRST);
3513 /* This is called from dwarf2out.c via ASM_OUTPUT_DWARF_DTPREL.
3514 We need to emit DTP-relative relocations. */
3516 void
3517 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
3519 if (size != 8)
3520 abort ();
3521 fputs ("\tdata8.ua\t@dtprel(", file);
3522 output_addr_const (file, x);
3523 fputs (")", file);
3526 /* Print a memory address as an operand to reference that memory location. */
3528 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
3529 also call this from ia64_print_operand for memory addresses. */
3531 void
3532 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
3533 rtx address ATTRIBUTE_UNUSED)
3537 /* Print an operand to an assembler instruction.
3538 C Swap and print a comparison operator.
3539 D Print an FP comparison operator.
3540 E Print 32 - constant, for SImode shifts as extract.
3541 e Print 64 - constant, for DImode rotates.
3542 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
3543 a floating point register emitted normally.
3544 I Invert a predicate register by adding 1.
3545 J Select the proper predicate register for a condition.
3546 j Select the inverse predicate register for a condition.
3547 O Append .acq for volatile load.
3548 P Postincrement of a MEM.
3549 Q Append .rel for volatile store.
3550 S Shift amount for shladd instruction.
3551 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
3552 for Intel assembler.
3553 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
3554 for Intel assembler.
3555 r Print register name, or constant 0 as r0. HP compatibility for
3556 Linux kernel. */
3557 void
3558 ia64_print_operand (FILE * file, rtx x, int code)
3560 const char *str;
3562 switch (code)
3564 case 0:
3565 /* Handled below. */
3566 break;
3568 case 'C':
3570 enum rtx_code c = swap_condition (GET_CODE (x));
3571 fputs (GET_RTX_NAME (c), file);
3572 return;
3575 case 'D':
3576 switch (GET_CODE (x))
3578 case NE:
3579 str = "neq";
3580 break;
3581 case UNORDERED:
3582 str = "unord";
3583 break;
3584 case ORDERED:
3585 str = "ord";
3586 break;
3587 default:
3588 str = GET_RTX_NAME (GET_CODE (x));
3589 break;
3591 fputs (str, file);
3592 return;
3594 case 'E':
3595 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
3596 return;
3598 case 'e':
3599 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
3600 return;
3602 case 'F':
3603 if (x == CONST0_RTX (GET_MODE (x)))
3604 str = reg_names [FR_REG (0)];
3605 else if (x == CONST1_RTX (GET_MODE (x)))
3606 str = reg_names [FR_REG (1)];
3607 else if (GET_CODE (x) == REG)
3608 str = reg_names [REGNO (x)];
3609 else
3610 abort ();
3611 fputs (str, file);
3612 return;
3614 case 'I':
3615 fputs (reg_names [REGNO (x) + 1], file);
3616 return;
3618 case 'J':
3619 case 'j':
3621 unsigned int regno = REGNO (XEXP (x, 0));
3622 if (GET_CODE (x) == EQ)
3623 regno += 1;
3624 if (code == 'j')
3625 regno ^= 1;
3626 fputs (reg_names [regno], file);
3628 return;
3630 case 'O':
3631 if (MEM_VOLATILE_P (x))
3632 fputs(".acq", file);
3633 return;
3635 case 'P':
3637 HOST_WIDE_INT value;
3639 switch (GET_CODE (XEXP (x, 0)))
3641 default:
3642 return;
3644 case POST_MODIFY:
3645 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
3646 if (GET_CODE (x) == CONST_INT)
3647 value = INTVAL (x);
3648 else if (GET_CODE (x) == REG)
3650 fprintf (file, ", %s", reg_names[REGNO (x)]);
3651 return;
3653 else
3654 abort ();
3655 break;
3657 case POST_INC:
3658 value = GET_MODE_SIZE (GET_MODE (x));
3659 break;
3661 case POST_DEC:
3662 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
3663 break;
3666 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
3667 return;
3670 case 'Q':
3671 if (MEM_VOLATILE_P (x))
3672 fputs(".rel", file);
3673 return;
3675 case 'S':
3676 fprintf (file, "%d", exact_log2 (INTVAL (x)));
3677 return;
3679 case 'T':
3680 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
3682 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
3683 return;
3685 break;
3687 case 'U':
3688 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
3690 const char *prefix = "0x";
3691 if (INTVAL (x) & 0x80000000)
3693 fprintf (file, "0xffffffff");
3694 prefix = "";
3696 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
3697 return;
3699 break;
3701 case 'r':
3702 /* If this operand is the constant zero, write it as register zero.
3703 Any register, zero, or CONST_INT value is OK here. */
3704 if (GET_CODE (x) == REG)
3705 fputs (reg_names[REGNO (x)], file);
3706 else if (x == CONST0_RTX (GET_MODE (x)))
3707 fputs ("r0", file);
3708 else if (GET_CODE (x) == CONST_INT)
3709 output_addr_const (file, x);
3710 else
3711 output_operand_lossage ("invalid %%r value");
3712 return;
3714 case '+':
3716 const char *which;
3718 /* For conditional branches, returns or calls, substitute
3719 sptk, dptk, dpnt, or spnt for %s. */
3720 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
3721 if (x)
3723 int pred_val = INTVAL (XEXP (x, 0));
3725 /* Guess top and bottom 10% statically predicted. */
3726 if (pred_val < REG_BR_PROB_BASE / 50)
3727 which = ".spnt";
3728 else if (pred_val < REG_BR_PROB_BASE / 2)
3729 which = ".dpnt";
3730 else if (pred_val < REG_BR_PROB_BASE / 100 * 98)
3731 which = ".dptk";
3732 else
3733 which = ".sptk";
3735 else if (GET_CODE (current_output_insn) == CALL_INSN)
3736 which = ".sptk";
3737 else
3738 which = ".dptk";
3740 fputs (which, file);
3741 return;
3744 case ',':
3745 x = current_insn_predicate;
3746 if (x)
3748 unsigned int regno = REGNO (XEXP (x, 0));
3749 if (GET_CODE (x) == EQ)
3750 regno += 1;
3751 fprintf (file, "(%s) ", reg_names [regno]);
3753 return;
3755 default:
3756 output_operand_lossage ("ia64_print_operand: unknown code");
3757 return;
3760 switch (GET_CODE (x))
3762 /* This happens for the spill/restore instructions. */
3763 case POST_INC:
3764 case POST_DEC:
3765 case POST_MODIFY:
3766 x = XEXP (x, 0);
3767 /* ... fall through ... */
3769 case REG:
3770 fputs (reg_names [REGNO (x)], file);
3771 break;
3773 case MEM:
3775 rtx addr = XEXP (x, 0);
3776 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
3777 addr = XEXP (addr, 0);
3778 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
3779 break;
3782 default:
3783 output_addr_const (file, x);
3784 break;
3787 return;
3790 /* Compute a (partial) cost for rtx X. Return true if the complete
3791 cost has been computed, and false if subexpressions should be
3792 scanned. In either case, *TOTAL contains the cost result. */
3793 /* ??? This is incomplete. */
3795 static bool
3796 ia64_rtx_costs (rtx x, int code, int outer_code, int *total)
3798 switch (code)
3800 case CONST_INT:
3801 switch (outer_code)
3803 case SET:
3804 *total = CONST_OK_FOR_J (INTVAL (x)) ? 0 : COSTS_N_INSNS (1);
3805 return true;
3806 case PLUS:
3807 if (CONST_OK_FOR_I (INTVAL (x)))
3808 *total = 0;
3809 else if (CONST_OK_FOR_J (INTVAL (x)))
3810 *total = 1;
3811 else
3812 *total = COSTS_N_INSNS (1);
3813 return true;
3814 default:
3815 if (CONST_OK_FOR_K (INTVAL (x)) || CONST_OK_FOR_L (INTVAL (x)))
3816 *total = 0;
3817 else
3818 *total = COSTS_N_INSNS (1);
3819 return true;
3822 case CONST_DOUBLE:
3823 *total = COSTS_N_INSNS (1);
3824 return true;
3826 case CONST:
3827 case SYMBOL_REF:
3828 case LABEL_REF:
3829 *total = COSTS_N_INSNS (3);
3830 return true;
3832 case MULT:
3833 /* For multiplies wider than HImode, we have to go to the FPU,
3834 which normally involves copies. Plus there's the latency
3835 of the multiply itself, and the latency of the instructions to
3836 transfer integer regs to FP regs. */
3837 /* ??? Check for FP mode. */
3838 if (GET_MODE_SIZE (GET_MODE (x)) > 2)
3839 *total = COSTS_N_INSNS (10);
3840 else
3841 *total = COSTS_N_INSNS (2);
3842 return true;
3844 case PLUS:
3845 case MINUS:
3846 case ASHIFT:
3847 case ASHIFTRT:
3848 case LSHIFTRT:
3849 *total = COSTS_N_INSNS (1);
3850 return true;
3852 case DIV:
3853 case UDIV:
3854 case MOD:
3855 case UMOD:
3856 /* We make divide expensive, so that divide-by-constant will be
3857 optimized to a multiply. */
3858 *total = COSTS_N_INSNS (60);
3859 return true;
3861 default:
3862 return false;
3866 /* Calculate the cost of moving data from a register in class FROM to
3867 one in class TO, using MODE. */
3870 ia64_register_move_cost (enum machine_mode mode, enum reg_class from,
3871 enum reg_class to)
3873 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
3874 if (to == ADDL_REGS)
3875 to = GR_REGS;
3876 if (from == ADDL_REGS)
3877 from = GR_REGS;
3879 /* All costs are symmetric, so reduce cases by putting the
3880 lower number class as the destination. */
3881 if (from < to)
3883 enum reg_class tmp = to;
3884 to = from, from = tmp;
3887 /* Moving from FR<->GR in XFmode must be more expensive than 2,
3888 so that we get secondary memory reloads. Between FR_REGS,
3889 we have to make this at least as expensive as MEMORY_MOVE_COST
3890 to avoid spectacularly poor register class preferencing. */
3891 if (mode == XFmode)
3893 if (to != GR_REGS || from != GR_REGS)
3894 return MEMORY_MOVE_COST (mode, to, 0);
3895 else
3896 return 3;
3899 switch (to)
3901 case PR_REGS:
3902 /* Moving between PR registers takes two insns. */
3903 if (from == PR_REGS)
3904 return 3;
3905 /* Moving between PR and anything but GR is impossible. */
3906 if (from != GR_REGS)
3907 return MEMORY_MOVE_COST (mode, to, 0);
3908 break;
3910 case BR_REGS:
3911 /* Moving between BR and anything but GR is impossible. */
3912 if (from != GR_REGS && from != GR_AND_BR_REGS)
3913 return MEMORY_MOVE_COST (mode, to, 0);
3914 break;
3916 case AR_I_REGS:
3917 case AR_M_REGS:
3918 /* Moving between AR and anything but GR is impossible. */
3919 if (from != GR_REGS)
3920 return MEMORY_MOVE_COST (mode, to, 0);
3921 break;
3923 case GR_REGS:
3924 case FR_REGS:
3925 case GR_AND_FR_REGS:
3926 case GR_AND_BR_REGS:
3927 case ALL_REGS:
3928 break;
3930 default:
3931 abort ();
3934 return 2;
3937 /* This function returns the register class required for a secondary
3938 register when copying between one of the registers in CLASS, and X,
3939 using MODE. A return value of NO_REGS means that no secondary register
3940 is required. */
3942 enum reg_class
3943 ia64_secondary_reload_class (enum reg_class class,
3944 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
3946 int regno = -1;
3948 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
3949 regno = true_regnum (x);
3951 switch (class)
3953 case BR_REGS:
3954 case AR_M_REGS:
3955 case AR_I_REGS:
3956 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
3957 interaction. We end up with two pseudos with overlapping lifetimes
3958 both of which are equiv to the same constant, and both which need
3959 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
3960 changes depending on the path length, which means the qty_first_reg
3961 check in make_regs_eqv can give different answers at different times.
3962 At some point I'll probably need a reload_indi pattern to handle
3963 this.
3965 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
3966 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
3967 non-general registers for good measure. */
3968 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
3969 return GR_REGS;
3971 /* This is needed if a pseudo used as a call_operand gets spilled to a
3972 stack slot. */
3973 if (GET_CODE (x) == MEM)
3974 return GR_REGS;
3975 break;
3977 case FR_REGS:
3978 /* Need to go through general registers to get to other class regs. */
3979 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
3980 return GR_REGS;
3982 /* This can happen when a paradoxical subreg is an operand to the
3983 muldi3 pattern. */
3984 /* ??? This shouldn't be necessary after instruction scheduling is
3985 enabled, because paradoxical subregs are not accepted by
3986 register_operand when INSN_SCHEDULING is defined. Or alternatively,
3987 stop the paradoxical subreg stupidity in the *_operand functions
3988 in recog.c. */
3989 if (GET_CODE (x) == MEM
3990 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
3991 || GET_MODE (x) == QImode))
3992 return GR_REGS;
3994 /* This can happen because of the ior/and/etc patterns that accept FP
3995 registers as operands. If the third operand is a constant, then it
3996 needs to be reloaded into a FP register. */
3997 if (GET_CODE (x) == CONST_INT)
3998 return GR_REGS;
4000 /* This can happen because of register elimination in a muldi3 insn.
4001 E.g. `26107 * (unsigned long)&u'. */
4002 if (GET_CODE (x) == PLUS)
4003 return GR_REGS;
4004 break;
4006 case PR_REGS:
4007 /* ??? This happens if we cse/gcse a BImode value across a call,
4008 and the function has a nonlocal goto. This is because global
4009 does not allocate call crossing pseudos to hard registers when
4010 current_function_has_nonlocal_goto is true. This is relatively
4011 common for C++ programs that use exceptions. To reproduce,
4012 return NO_REGS and compile libstdc++. */
4013 if (GET_CODE (x) == MEM)
4014 return GR_REGS;
4016 /* This can happen when we take a BImode subreg of a DImode value,
4017 and that DImode value winds up in some non-GR register. */
4018 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
4019 return GR_REGS;
4020 break;
4022 default:
4023 break;
4026 return NO_REGS;
4030 /* Emit text to declare externally defined variables and functions, because
4031 the Intel assembler does not support undefined externals. */
4033 void
4034 ia64_asm_output_external (FILE *file, tree decl, const char *name)
4036 int save_referenced;
4038 /* GNU as does not need anything here, but the HP linker does need
4039 something for external functions. */
4041 if (TARGET_GNU_AS
4042 && (!TARGET_HPUX_LD
4043 || TREE_CODE (decl) != FUNCTION_DECL
4044 || strstr (name, "__builtin_") == name))
4045 return;
4047 /* ??? The Intel assembler creates a reference that needs to be satisfied by
4048 the linker when we do this, so we need to be careful not to do this for
4049 builtin functions which have no library equivalent. Unfortunately, we
4050 can't tell here whether or not a function will actually be called by
4051 expand_expr, so we pull in library functions even if we may not need
4052 them later. */
4053 if (! strcmp (name, "__builtin_next_arg")
4054 || ! strcmp (name, "alloca")
4055 || ! strcmp (name, "__builtin_constant_p")
4056 || ! strcmp (name, "__builtin_args_info"))
4057 return;
4059 if (TARGET_HPUX_LD)
4060 ia64_hpux_add_extern_decl (decl);
4061 else
4063 /* assemble_name will set TREE_SYMBOL_REFERENCED, so we must save and
4064 restore it. */
4065 save_referenced = TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl));
4066 if (TREE_CODE (decl) == FUNCTION_DECL)
4067 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
4068 (*targetm.asm_out.globalize_label) (file, name);
4069 TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)) = save_referenced;
4073 /* Parse the -mfixed-range= option string. */
4075 static void
4076 fix_range (const char *const_str)
4078 int i, first, last;
4079 char *str, *dash, *comma;
4081 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
4082 REG2 are either register names or register numbers. The effect
4083 of this option is to mark the registers in the range from REG1 to
4084 REG2 as ``fixed'' so they won't be used by the compiler. This is
4085 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
4087 i = strlen (const_str);
4088 str = (char *) alloca (i + 1);
4089 memcpy (str, const_str, i + 1);
4091 while (1)
4093 dash = strchr (str, '-');
4094 if (!dash)
4096 warning ("value of -mfixed-range must have form REG1-REG2");
4097 return;
4099 *dash = '\0';
4101 comma = strchr (dash + 1, ',');
4102 if (comma)
4103 *comma = '\0';
4105 first = decode_reg_name (str);
4106 if (first < 0)
4108 warning ("unknown register name: %s", str);
4109 return;
4112 last = decode_reg_name (dash + 1);
4113 if (last < 0)
4115 warning ("unknown register name: %s", dash + 1);
4116 return;
4119 *dash = '-';
4121 if (first > last)
4123 warning ("%s-%s is an empty range", str, dash + 1);
4124 return;
4127 for (i = first; i <= last; ++i)
4128 fixed_regs[i] = call_used_regs[i] = 1;
4130 if (!comma)
4131 break;
4133 *comma = ',';
4134 str = comma + 1;
4138 static struct machine_function *
4139 ia64_init_machine_status (void)
4141 return ggc_alloc_cleared (sizeof (struct machine_function));
4144 /* Handle TARGET_OPTIONS switches. */
4146 void
4147 ia64_override_options (void)
4149 static struct pta
4151 const char *const name; /* processor name or nickname. */
4152 const enum processor_type processor;
4154 const processor_alias_table[] =
4156 {"itanium", PROCESSOR_ITANIUM},
4157 {"itanium1", PROCESSOR_ITANIUM},
4158 {"merced", PROCESSOR_ITANIUM},
4159 {"itanium2", PROCESSOR_ITANIUM2},
4160 {"mckinley", PROCESSOR_ITANIUM2},
4163 int const pta_size = ARRAY_SIZE (processor_alias_table);
4164 int i;
4166 if (TARGET_AUTO_PIC)
4167 target_flags |= MASK_CONST_GP;
4169 if (TARGET_INLINE_FLOAT_DIV_LAT && TARGET_INLINE_FLOAT_DIV_THR)
4171 if ((target_flags_explicit & MASK_INLINE_FLOAT_DIV_LAT)
4172 && (target_flags_explicit & MASK_INLINE_FLOAT_DIV_THR))
4174 warning ("cannot optimize floating point division for both latency and throughput");
4175 target_flags &= ~MASK_INLINE_FLOAT_DIV_THR;
4177 else
4179 if (target_flags_explicit & MASK_INLINE_FLOAT_DIV_THR)
4180 target_flags &= ~MASK_INLINE_FLOAT_DIV_LAT;
4181 else
4182 target_flags &= ~MASK_INLINE_FLOAT_DIV_THR;
4186 if (TARGET_INLINE_INT_DIV_LAT && TARGET_INLINE_INT_DIV_THR)
4188 if ((target_flags_explicit & MASK_INLINE_INT_DIV_LAT)
4189 && (target_flags_explicit & MASK_INLINE_INT_DIV_THR))
4191 warning ("cannot optimize integer division for both latency and throughput");
4192 target_flags &= ~MASK_INLINE_INT_DIV_THR;
4194 else
4196 if (target_flags_explicit & MASK_INLINE_INT_DIV_THR)
4197 target_flags &= ~MASK_INLINE_INT_DIV_LAT;
4198 else
4199 target_flags &= ~MASK_INLINE_INT_DIV_THR;
4203 if (TARGET_INLINE_SQRT_LAT && TARGET_INLINE_SQRT_THR)
4205 if ((target_flags_explicit & MASK_INLINE_SQRT_LAT)
4206 && (target_flags_explicit & MASK_INLINE_SQRT_THR))
4208 warning ("cannot optimize square root for both latency and throughput");
4209 target_flags &= ~MASK_INLINE_SQRT_THR;
4211 else
4213 if (target_flags_explicit & MASK_INLINE_SQRT_THR)
4214 target_flags &= ~MASK_INLINE_SQRT_LAT;
4215 else
4216 target_flags &= ~MASK_INLINE_SQRT_THR;
4220 if (TARGET_INLINE_SQRT_LAT)
4222 warning ("not yet implemented: latency-optimized inline square root");
4223 target_flags &= ~MASK_INLINE_SQRT_LAT;
4226 if (ia64_fixed_range_string)
4227 fix_range (ia64_fixed_range_string);
4229 if (ia64_tls_size_string)
4231 char *end;
4232 unsigned long tmp = strtoul (ia64_tls_size_string, &end, 10);
4233 if (*end || (tmp != 14 && tmp != 22 && tmp != 64))
4234 error ("bad value (%s) for -mtls-size= switch", ia64_tls_size_string);
4235 else
4236 ia64_tls_size = tmp;
4239 if (!ia64_tune_string)
4240 ia64_tune_string = "itanium2";
4242 for (i = 0; i < pta_size; i++)
4243 if (! strcmp (ia64_tune_string, processor_alias_table[i].name))
4245 ia64_tune = processor_alias_table[i].processor;
4246 break;
4249 if (i == pta_size)
4250 error ("bad value (%s) for -tune= switch", ia64_tune_string);
4252 ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
4253 flag_schedule_insns_after_reload = 0;
4255 /* Variable tracking should be run after all optimizations which change order
4256 of insns. It also needs a valid CFG. */
4257 ia64_flag_var_tracking = flag_var_tracking;
4258 flag_var_tracking = 0;
4260 ia64_section_threshold = g_switch_set ? g_switch_value : IA64_DEFAULT_GVALUE;
4262 init_machine_status = ia64_init_machine_status;
4265 static enum attr_itanium_class ia64_safe_itanium_class (rtx);
4266 static enum attr_type ia64_safe_type (rtx);
4268 static enum attr_itanium_class
4269 ia64_safe_itanium_class (rtx insn)
4271 if (recog_memoized (insn) >= 0)
4272 return get_attr_itanium_class (insn);
4273 else
4274 return ITANIUM_CLASS_UNKNOWN;
4277 static enum attr_type
4278 ia64_safe_type (rtx insn)
4280 if (recog_memoized (insn) >= 0)
4281 return get_attr_type (insn);
4282 else
4283 return TYPE_UNKNOWN;
4286 /* The following collection of routines emit instruction group stop bits as
4287 necessary to avoid dependencies. */
4289 /* Need to track some additional registers as far as serialization is
4290 concerned so we can properly handle br.call and br.ret. We could
4291 make these registers visible to gcc, but since these registers are
4292 never explicitly used in gcc generated code, it seems wasteful to
4293 do so (plus it would make the call and return patterns needlessly
4294 complex). */
4295 #define REG_RP (BR_REG (0))
4296 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
4297 /* This is used for volatile asms which may require a stop bit immediately
4298 before and after them. */
4299 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
4300 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
4301 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
4303 /* For each register, we keep track of how it has been written in the
4304 current instruction group.
4306 If a register is written unconditionally (no qualifying predicate),
4307 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
4309 If a register is written if its qualifying predicate P is true, we
4310 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
4311 may be written again by the complement of P (P^1) and when this happens,
4312 WRITE_COUNT gets set to 2.
4314 The result of this is that whenever an insn attempts to write a register
4315 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
4317 If a predicate register is written by a floating-point insn, we set
4318 WRITTEN_BY_FP to true.
4320 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
4321 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
4323 struct reg_write_state
4325 unsigned int write_count : 2;
4326 unsigned int first_pred : 16;
4327 unsigned int written_by_fp : 1;
4328 unsigned int written_by_and : 1;
4329 unsigned int written_by_or : 1;
4332 /* Cumulative info for the current instruction group. */
4333 struct reg_write_state rws_sum[NUM_REGS];
4334 /* Info for the current instruction. This gets copied to rws_sum after a
4335 stop bit is emitted. */
4336 struct reg_write_state rws_insn[NUM_REGS];
4338 /* Indicates whether this is the first instruction after a stop bit,
4339 in which case we don't need another stop bit. Without this, we hit
4340 the abort in ia64_variable_issue when scheduling an alloc. */
4341 static int first_instruction;
4343 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
4344 RTL for one instruction. */
4345 struct reg_flags
4347 unsigned int is_write : 1; /* Is register being written? */
4348 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
4349 unsigned int is_branch : 1; /* Is register used as part of a branch? */
4350 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
4351 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
4352 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
4355 static void rws_update (struct reg_write_state *, int, struct reg_flags, int);
4356 static int rws_access_regno (int, struct reg_flags, int);
4357 static int rws_access_reg (rtx, struct reg_flags, int);
4358 static void update_set_flags (rtx, struct reg_flags *, int *, rtx *);
4359 static int set_src_needs_barrier (rtx, struct reg_flags, int, rtx);
4360 static int rtx_needs_barrier (rtx, struct reg_flags, int);
4361 static void init_insn_group_barriers (void);
4362 static int group_barrier_needed_p (rtx);
4363 static int safe_group_barrier_needed_p (rtx);
4365 /* Update *RWS for REGNO, which is being written by the current instruction,
4366 with predicate PRED, and associated register flags in FLAGS. */
4368 static void
4369 rws_update (struct reg_write_state *rws, int regno, struct reg_flags flags, int pred)
4371 if (pred)
4372 rws[regno].write_count++;
4373 else
4374 rws[regno].write_count = 2;
4375 rws[regno].written_by_fp |= flags.is_fp;
4376 /* ??? Not tracking and/or across differing predicates. */
4377 rws[regno].written_by_and = flags.is_and;
4378 rws[regno].written_by_or = flags.is_or;
4379 rws[regno].first_pred = pred;
4382 /* Handle an access to register REGNO of type FLAGS using predicate register
4383 PRED. Update rws_insn and rws_sum arrays. Return 1 if this access creates
4384 a dependency with an earlier instruction in the same group. */
4386 static int
4387 rws_access_regno (int regno, struct reg_flags flags, int pred)
4389 int need_barrier = 0;
4391 if (regno >= NUM_REGS)
4392 abort ();
4394 if (! PR_REGNO_P (regno))
4395 flags.is_and = flags.is_or = 0;
4397 if (flags.is_write)
4399 int write_count;
4401 /* One insn writes same reg multiple times? */
4402 if (rws_insn[regno].write_count > 0)
4403 abort ();
4405 /* Update info for current instruction. */
4406 rws_update (rws_insn, regno, flags, pred);
4407 write_count = rws_sum[regno].write_count;
4409 switch (write_count)
4411 case 0:
4412 /* The register has not been written yet. */
4413 rws_update (rws_sum, regno, flags, pred);
4414 break;
4416 case 1:
4417 /* The register has been written via a predicate. If this is
4418 not a complementary predicate, then we need a barrier. */
4419 /* ??? This assumes that P and P+1 are always complementary
4420 predicates for P even. */
4421 if (flags.is_and && rws_sum[regno].written_by_and)
4423 else if (flags.is_or && rws_sum[regno].written_by_or)
4425 else if ((rws_sum[regno].first_pred ^ 1) != pred)
4426 need_barrier = 1;
4427 rws_update (rws_sum, regno, flags, pred);
4428 break;
4430 case 2:
4431 /* The register has been unconditionally written already. We
4432 need a barrier. */
4433 if (flags.is_and && rws_sum[regno].written_by_and)
4435 else if (flags.is_or && rws_sum[regno].written_by_or)
4437 else
4438 need_barrier = 1;
4439 rws_sum[regno].written_by_and = flags.is_and;
4440 rws_sum[regno].written_by_or = flags.is_or;
4441 break;
4443 default:
4444 abort ();
4447 else
4449 if (flags.is_branch)
4451 /* Branches have several RAW exceptions that allow to avoid
4452 barriers. */
4454 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
4455 /* RAW dependencies on branch regs are permissible as long
4456 as the writer is a non-branch instruction. Since we
4457 never generate code that uses a branch register written
4458 by a branch instruction, handling this case is
4459 easy. */
4460 return 0;
4462 if (REGNO_REG_CLASS (regno) == PR_REGS
4463 && ! rws_sum[regno].written_by_fp)
4464 /* The predicates of a branch are available within the
4465 same insn group as long as the predicate was written by
4466 something other than a floating-point instruction. */
4467 return 0;
4470 if (flags.is_and && rws_sum[regno].written_by_and)
4471 return 0;
4472 if (flags.is_or && rws_sum[regno].written_by_or)
4473 return 0;
4475 switch (rws_sum[regno].write_count)
4477 case 0:
4478 /* The register has not been written yet. */
4479 break;
4481 case 1:
4482 /* The register has been written via a predicate. If this is
4483 not a complementary predicate, then we need a barrier. */
4484 /* ??? This assumes that P and P+1 are always complementary
4485 predicates for P even. */
4486 if ((rws_sum[regno].first_pred ^ 1) != pred)
4487 need_barrier = 1;
4488 break;
4490 case 2:
4491 /* The register has been unconditionally written already. We
4492 need a barrier. */
4493 need_barrier = 1;
4494 break;
4496 default:
4497 abort ();
4501 return need_barrier;
4504 static int
4505 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
4507 int regno = REGNO (reg);
4508 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
4510 if (n == 1)
4511 return rws_access_regno (regno, flags, pred);
4512 else
4514 int need_barrier = 0;
4515 while (--n >= 0)
4516 need_barrier |= rws_access_regno (regno + n, flags, pred);
4517 return need_barrier;
4521 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
4522 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
4524 static void
4525 update_set_flags (rtx x, struct reg_flags *pflags, int *ppred, rtx *pcond)
4527 rtx src = SET_SRC (x);
4529 *pcond = 0;
4531 switch (GET_CODE (src))
4533 case CALL:
4534 return;
4536 case IF_THEN_ELSE:
4537 if (SET_DEST (x) == pc_rtx)
4538 /* X is a conditional branch. */
4539 return;
4540 else
4542 int is_complemented = 0;
4544 /* X is a conditional move. */
4545 rtx cond = XEXP (src, 0);
4546 if (GET_CODE (cond) == EQ)
4547 is_complemented = 1;
4548 cond = XEXP (cond, 0);
4549 if (GET_CODE (cond) != REG
4550 && REGNO_REG_CLASS (REGNO (cond)) != PR_REGS)
4551 abort ();
4552 *pcond = cond;
4553 if (XEXP (src, 1) == SET_DEST (x)
4554 || XEXP (src, 2) == SET_DEST (x))
4556 /* X is a conditional move that conditionally writes the
4557 destination. */
4559 /* We need another complement in this case. */
4560 if (XEXP (src, 1) == SET_DEST (x))
4561 is_complemented = ! is_complemented;
4563 *ppred = REGNO (cond);
4564 if (is_complemented)
4565 ++*ppred;
4568 /* ??? If this is a conditional write to the dest, then this
4569 instruction does not actually read one source. This probably
4570 doesn't matter, because that source is also the dest. */
4571 /* ??? Multiple writes to predicate registers are allowed
4572 if they are all AND type compares, or if they are all OR
4573 type compares. We do not generate such instructions
4574 currently. */
4576 /* ... fall through ... */
4578 default:
4579 if (COMPARISON_P (src)
4580 && GET_MODE_CLASS (GET_MODE (XEXP (src, 0))) == MODE_FLOAT)
4581 /* Set pflags->is_fp to 1 so that we know we're dealing
4582 with a floating point comparison when processing the
4583 destination of the SET. */
4584 pflags->is_fp = 1;
4586 /* Discover if this is a parallel comparison. We only handle
4587 and.orcm and or.andcm at present, since we must retain a
4588 strict inverse on the predicate pair. */
4589 else if (GET_CODE (src) == AND)
4590 pflags->is_and = 1;
4591 else if (GET_CODE (src) == IOR)
4592 pflags->is_or = 1;
4594 break;
4598 /* Subroutine of rtx_needs_barrier; this function determines whether the
4599 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
4600 are as in rtx_needs_barrier. COND is an rtx that holds the condition
4601 for this insn. */
4603 static int
4604 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred, rtx cond)
4606 int need_barrier = 0;
4607 rtx dst;
4608 rtx src = SET_SRC (x);
4610 if (GET_CODE (src) == CALL)
4611 /* We don't need to worry about the result registers that
4612 get written by subroutine call. */
4613 return rtx_needs_barrier (src, flags, pred);
4614 else if (SET_DEST (x) == pc_rtx)
4616 /* X is a conditional branch. */
4617 /* ??? This seems redundant, as the caller sets this bit for
4618 all JUMP_INSNs. */
4619 flags.is_branch = 1;
4620 return rtx_needs_barrier (src, flags, pred);
4623 need_barrier = rtx_needs_barrier (src, flags, pred);
4625 /* This instruction unconditionally uses a predicate register. */
4626 if (cond)
4627 need_barrier |= rws_access_reg (cond, flags, 0);
4629 dst = SET_DEST (x);
4630 if (GET_CODE (dst) == ZERO_EXTRACT)
4632 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
4633 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
4634 dst = XEXP (dst, 0);
4636 return need_barrier;
4639 /* Handle an access to rtx X of type FLAGS using predicate register
4640 PRED. Return 1 if this access creates a dependency with an earlier
4641 instruction in the same group. */
4643 static int
4644 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
4646 int i, j;
4647 int is_complemented = 0;
4648 int need_barrier = 0;
4649 const char *format_ptr;
4650 struct reg_flags new_flags;
4651 rtx cond = 0;
4653 if (! x)
4654 return 0;
4656 new_flags = flags;
4658 switch (GET_CODE (x))
4660 case SET:
4661 update_set_flags (x, &new_flags, &pred, &cond);
4662 need_barrier = set_src_needs_barrier (x, new_flags, pred, cond);
4663 if (GET_CODE (SET_SRC (x)) != CALL)
4665 new_flags.is_write = 1;
4666 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
4668 break;
4670 case CALL:
4671 new_flags.is_write = 0;
4672 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
4674 /* Avoid multiple register writes, in case this is a pattern with
4675 multiple CALL rtx. This avoids an abort in rws_access_reg. */
4676 if (! flags.is_sibcall && ! rws_insn[REG_AR_CFM].write_count)
4678 new_flags.is_write = 1;
4679 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
4680 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
4681 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
4683 break;
4685 case COND_EXEC:
4686 /* X is a predicated instruction. */
4688 cond = COND_EXEC_TEST (x);
4689 if (pred)
4690 abort ();
4691 need_barrier = rtx_needs_barrier (cond, flags, 0);
4693 if (GET_CODE (cond) == EQ)
4694 is_complemented = 1;
4695 cond = XEXP (cond, 0);
4696 if (GET_CODE (cond) != REG
4697 && REGNO_REG_CLASS (REGNO (cond)) != PR_REGS)
4698 abort ();
4699 pred = REGNO (cond);
4700 if (is_complemented)
4701 ++pred;
4703 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
4704 return need_barrier;
4706 case CLOBBER:
4707 case USE:
4708 /* Clobber & use are for earlier compiler-phases only. */
4709 break;
4711 case ASM_OPERANDS:
4712 case ASM_INPUT:
4713 /* We always emit stop bits for traditional asms. We emit stop bits
4714 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
4715 if (GET_CODE (x) != ASM_OPERANDS
4716 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
4718 /* Avoid writing the register multiple times if we have multiple
4719 asm outputs. This avoids an abort in rws_access_reg. */
4720 if (! rws_insn[REG_VOLATILE].write_count)
4722 new_flags.is_write = 1;
4723 rws_access_regno (REG_VOLATILE, new_flags, pred);
4725 return 1;
4728 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
4729 We cannot just fall through here since then we would be confused
4730 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
4731 traditional asms unlike their normal usage. */
4733 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
4734 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
4735 need_barrier = 1;
4736 break;
4738 case PARALLEL:
4739 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
4741 rtx pat = XVECEXP (x, 0, i);
4742 if (GET_CODE (pat) == SET)
4744 update_set_flags (pat, &new_flags, &pred, &cond);
4745 need_barrier |= set_src_needs_barrier (pat, new_flags, pred, cond);
4747 else if (GET_CODE (pat) == USE
4748 || GET_CODE (pat) == CALL
4749 || GET_CODE (pat) == ASM_OPERANDS)
4750 need_barrier |= rtx_needs_barrier (pat, flags, pred);
4751 else if (GET_CODE (pat) != CLOBBER && GET_CODE (pat) != RETURN)
4752 abort ();
4754 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
4756 rtx pat = XVECEXP (x, 0, i);
4757 if (GET_CODE (pat) == SET)
4759 if (GET_CODE (SET_SRC (pat)) != CALL)
4761 new_flags.is_write = 1;
4762 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
4763 pred);
4766 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
4767 need_barrier |= rtx_needs_barrier (pat, flags, pred);
4769 break;
4771 case SUBREG:
4772 x = SUBREG_REG (x);
4773 /* FALLTHRU */
4774 case REG:
4775 if (REGNO (x) == AR_UNAT_REGNUM)
4777 for (i = 0; i < 64; ++i)
4778 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
4780 else
4781 need_barrier = rws_access_reg (x, flags, pred);
4782 break;
4784 case MEM:
4785 /* Find the regs used in memory address computation. */
4786 new_flags.is_write = 0;
4787 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
4788 break;
4790 case CONST_INT: case CONST_DOUBLE:
4791 case SYMBOL_REF: case LABEL_REF: case CONST:
4792 break;
4794 /* Operators with side-effects. */
4795 case POST_INC: case POST_DEC:
4796 if (GET_CODE (XEXP (x, 0)) != REG)
4797 abort ();
4799 new_flags.is_write = 0;
4800 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
4801 new_flags.is_write = 1;
4802 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
4803 break;
4805 case POST_MODIFY:
4806 if (GET_CODE (XEXP (x, 0)) != REG)
4807 abort ();
4809 new_flags.is_write = 0;
4810 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
4811 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
4812 new_flags.is_write = 1;
4813 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
4814 break;
4816 /* Handle common unary and binary ops for efficiency. */
4817 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
4818 case MOD: case UDIV: case UMOD: case AND: case IOR:
4819 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
4820 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
4821 case NE: case EQ: case GE: case GT: case LE:
4822 case LT: case GEU: case GTU: case LEU: case LTU:
4823 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
4824 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
4825 break;
4827 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
4828 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
4829 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
4830 case SQRT: case FFS: case POPCOUNT:
4831 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
4832 break;
4834 case UNSPEC:
4835 switch (XINT (x, 1))
4837 case UNSPEC_LTOFF_DTPMOD:
4838 case UNSPEC_LTOFF_DTPREL:
4839 case UNSPEC_DTPREL:
4840 case UNSPEC_LTOFF_TPREL:
4841 case UNSPEC_TPREL:
4842 case UNSPEC_PRED_REL_MUTEX:
4843 case UNSPEC_PIC_CALL:
4844 case UNSPEC_MF:
4845 case UNSPEC_FETCHADD_ACQ:
4846 case UNSPEC_BSP_VALUE:
4847 case UNSPEC_FLUSHRS:
4848 case UNSPEC_BUNDLE_SELECTOR:
4849 break;
4851 case UNSPEC_GR_SPILL:
4852 case UNSPEC_GR_RESTORE:
4854 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
4855 HOST_WIDE_INT bit = (offset >> 3) & 63;
4857 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
4858 new_flags.is_write = (XINT (x, 1) == 1);
4859 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
4860 new_flags, pred);
4861 break;
4864 case UNSPEC_FR_SPILL:
4865 case UNSPEC_FR_RESTORE:
4866 case UNSPEC_GETF_EXP:
4867 case UNSPEC_SETF_EXP:
4868 case UNSPEC_ADDP4:
4869 case UNSPEC_FR_SQRT_RECIP_APPROX:
4870 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
4871 break;
4873 case UNSPEC_FR_RECIP_APPROX:
4874 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
4875 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
4876 break;
4878 case UNSPEC_CMPXCHG_ACQ:
4879 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
4880 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
4881 break;
4883 default:
4884 abort ();
4886 break;
4888 case UNSPEC_VOLATILE:
4889 switch (XINT (x, 1))
4891 case UNSPECV_ALLOC:
4892 /* Alloc must always be the first instruction of a group.
4893 We force this by always returning true. */
4894 /* ??? We might get better scheduling if we explicitly check for
4895 input/local/output register dependencies, and modify the
4896 scheduler so that alloc is always reordered to the start of
4897 the current group. We could then eliminate all of the
4898 first_instruction code. */
4899 rws_access_regno (AR_PFS_REGNUM, flags, pred);
4901 new_flags.is_write = 1;
4902 rws_access_regno (REG_AR_CFM, new_flags, pred);
4903 return 1;
4905 case UNSPECV_SET_BSP:
4906 need_barrier = 1;
4907 break;
4909 case UNSPECV_BLOCKAGE:
4910 case UNSPECV_INSN_GROUP_BARRIER:
4911 case UNSPECV_BREAK:
4912 case UNSPECV_PSAC_ALL:
4913 case UNSPECV_PSAC_NORMAL:
4914 return 0;
4916 default:
4917 abort ();
4919 break;
4921 case RETURN:
4922 new_flags.is_write = 0;
4923 need_barrier = rws_access_regno (REG_RP, flags, pred);
4924 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
4926 new_flags.is_write = 1;
4927 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
4928 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
4929 break;
4931 default:
4932 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
4933 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
4934 switch (format_ptr[i])
4936 case '0': /* unused field */
4937 case 'i': /* integer */
4938 case 'n': /* note */
4939 case 'w': /* wide integer */
4940 case 's': /* pointer to string */
4941 case 'S': /* optional pointer to string */
4942 break;
4944 case 'e':
4945 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
4946 need_barrier = 1;
4947 break;
4949 case 'E':
4950 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
4951 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
4952 need_barrier = 1;
4953 break;
4955 default:
4956 abort ();
4958 break;
4960 return need_barrier;
4963 /* Clear out the state for group_barrier_needed_p at the start of a
4964 sequence of insns. */
4966 static void
4967 init_insn_group_barriers (void)
4969 memset (rws_sum, 0, sizeof (rws_sum));
4970 first_instruction = 1;
4973 /* Given the current state, recorded by previous calls to this function,
4974 determine whether a group barrier (a stop bit) is necessary before INSN.
4975 Return nonzero if so. */
4977 static int
4978 group_barrier_needed_p (rtx insn)
4980 rtx pat;
4981 int need_barrier = 0;
4982 struct reg_flags flags;
4984 memset (&flags, 0, sizeof (flags));
4985 switch (GET_CODE (insn))
4987 case NOTE:
4988 break;
4990 case BARRIER:
4991 /* A barrier doesn't imply an instruction group boundary. */
4992 break;
4994 case CODE_LABEL:
4995 memset (rws_insn, 0, sizeof (rws_insn));
4996 return 1;
4998 case CALL_INSN:
4999 flags.is_branch = 1;
5000 flags.is_sibcall = SIBLING_CALL_P (insn);
5001 memset (rws_insn, 0, sizeof (rws_insn));
5003 /* Don't bundle a call following another call. */
5004 if ((pat = prev_active_insn (insn))
5005 && GET_CODE (pat) == CALL_INSN)
5007 need_barrier = 1;
5008 break;
5011 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
5012 break;
5014 case JUMP_INSN:
5015 flags.is_branch = 1;
5017 /* Don't bundle a jump following a call. */
5018 if ((pat = prev_active_insn (insn))
5019 && GET_CODE (pat) == CALL_INSN)
5021 need_barrier = 1;
5022 break;
5024 /* FALLTHRU */
5026 case INSN:
5027 if (GET_CODE (PATTERN (insn)) == USE
5028 || GET_CODE (PATTERN (insn)) == CLOBBER)
5029 /* Don't care about USE and CLOBBER "insns"---those are used to
5030 indicate to the optimizer that it shouldn't get rid of
5031 certain operations. */
5032 break;
5034 pat = PATTERN (insn);
5036 /* Ug. Hack hacks hacked elsewhere. */
5037 switch (recog_memoized (insn))
5039 /* We play dependency tricks with the epilogue in order
5040 to get proper schedules. Undo this for dv analysis. */
5041 case CODE_FOR_epilogue_deallocate_stack:
5042 case CODE_FOR_prologue_allocate_stack:
5043 pat = XVECEXP (pat, 0, 0);
5044 break;
5046 /* The pattern we use for br.cloop confuses the code above.
5047 The second element of the vector is representative. */
5048 case CODE_FOR_doloop_end_internal:
5049 pat = XVECEXP (pat, 0, 1);
5050 break;
5052 /* Doesn't generate code. */
5053 case CODE_FOR_pred_rel_mutex:
5054 case CODE_FOR_prologue_use:
5055 return 0;
5057 default:
5058 break;
5061 memset (rws_insn, 0, sizeof (rws_insn));
5062 need_barrier = rtx_needs_barrier (pat, flags, 0);
5064 /* Check to see if the previous instruction was a volatile
5065 asm. */
5066 if (! need_barrier)
5067 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
5068 break;
5070 default:
5071 abort ();
5074 if (first_instruction && INSN_P (insn)
5075 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
5076 && GET_CODE (PATTERN (insn)) != USE
5077 && GET_CODE (PATTERN (insn)) != CLOBBER)
5079 need_barrier = 0;
5080 first_instruction = 0;
5083 return need_barrier;
5086 /* Like group_barrier_needed_p, but do not clobber the current state. */
5088 static int
5089 safe_group_barrier_needed_p (rtx insn)
5091 struct reg_write_state rws_saved[NUM_REGS];
5092 int saved_first_instruction;
5093 int t;
5095 memcpy (rws_saved, rws_sum, NUM_REGS * sizeof *rws_saved);
5096 saved_first_instruction = first_instruction;
5098 t = group_barrier_needed_p (insn);
5100 memcpy (rws_sum, rws_saved, NUM_REGS * sizeof *rws_saved);
5101 first_instruction = saved_first_instruction;
5103 return t;
5106 /* Scan the current function and insert stop bits as necessary to
5107 eliminate dependencies. This function assumes that a final
5108 instruction scheduling pass has been run which has already
5109 inserted most of the necessary stop bits. This function only
5110 inserts new ones at basic block boundaries, since these are
5111 invisible to the scheduler. */
5113 static void
5114 emit_insn_group_barriers (FILE *dump)
5116 rtx insn;
5117 rtx last_label = 0;
5118 int insns_since_last_label = 0;
5120 init_insn_group_barriers ();
5122 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5124 if (GET_CODE (insn) == CODE_LABEL)
5126 if (insns_since_last_label)
5127 last_label = insn;
5128 insns_since_last_label = 0;
5130 else if (GET_CODE (insn) == NOTE
5131 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
5133 if (insns_since_last_label)
5134 last_label = insn;
5135 insns_since_last_label = 0;
5137 else if (GET_CODE (insn) == INSN
5138 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
5139 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
5141 init_insn_group_barriers ();
5142 last_label = 0;
5144 else if (INSN_P (insn))
5146 insns_since_last_label = 1;
5148 if (group_barrier_needed_p (insn))
5150 if (last_label)
5152 if (dump)
5153 fprintf (dump, "Emitting stop before label %d\n",
5154 INSN_UID (last_label));
5155 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
5156 insn = last_label;
5158 init_insn_group_barriers ();
5159 last_label = 0;
5166 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
5167 This function has to emit all necessary group barriers. */
5169 static void
5170 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
5172 rtx insn;
5174 init_insn_group_barriers ();
5176 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5178 if (GET_CODE (insn) == BARRIER)
5180 rtx last = prev_active_insn (insn);
5182 if (! last)
5183 continue;
5184 if (GET_CODE (last) == JUMP_INSN
5185 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
5186 last = prev_active_insn (last);
5187 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
5188 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
5190 init_insn_group_barriers ();
5192 else if (INSN_P (insn))
5194 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
5195 init_insn_group_barriers ();
5196 else if (group_barrier_needed_p (insn))
5198 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5199 init_insn_group_barriers ();
5200 group_barrier_needed_p (insn);
5207 static int errata_find_address_regs (rtx *, void *);
5208 static void errata_emit_nops (rtx);
5209 static void fixup_errata (void);
5211 /* This structure is used to track some details about the previous insns
5212 groups so we can determine if it may be necessary to insert NOPs to
5213 workaround hardware errata. */
5214 static struct group
5216 HARD_REG_SET p_reg_set;
5217 HARD_REG_SET gr_reg_conditionally_set;
5218 } last_group[2];
5220 /* Index into the last_group array. */
5221 static int group_idx;
5223 /* Called through for_each_rtx; determines if a hard register that was
5224 conditionally set in the previous group is used as an address register.
5225 It ensures that for_each_rtx returns 1 in that case. */
5226 static int
5227 errata_find_address_regs (rtx *xp, void *data ATTRIBUTE_UNUSED)
5229 rtx x = *xp;
5230 if (GET_CODE (x) != MEM)
5231 return 0;
5232 x = XEXP (x, 0);
5233 if (GET_CODE (x) == POST_MODIFY)
5234 x = XEXP (x, 0);
5235 if (GET_CODE (x) == REG)
5237 struct group *prev_group = last_group + (group_idx ^ 1);
5238 if (TEST_HARD_REG_BIT (prev_group->gr_reg_conditionally_set,
5239 REGNO (x)))
5240 return 1;
5241 return -1;
5243 return 0;
5246 /* Called for each insn; this function keeps track of the state in
5247 last_group and emits additional NOPs if necessary to work around
5248 an Itanium A/B step erratum. */
5249 static void
5250 errata_emit_nops (rtx insn)
5252 struct group *this_group = last_group + group_idx;
5253 struct group *prev_group = last_group + (group_idx ^ 1);
5254 rtx pat = PATTERN (insn);
5255 rtx cond = GET_CODE (pat) == COND_EXEC ? COND_EXEC_TEST (pat) : 0;
5256 rtx real_pat = cond ? COND_EXEC_CODE (pat) : pat;
5257 enum attr_type type;
5258 rtx set = real_pat;
5260 if (GET_CODE (real_pat) == USE
5261 || GET_CODE (real_pat) == CLOBBER
5262 || GET_CODE (real_pat) == ASM_INPUT
5263 || GET_CODE (real_pat) == ADDR_VEC
5264 || GET_CODE (real_pat) == ADDR_DIFF_VEC
5265 || asm_noperands (PATTERN (insn)) >= 0)
5266 return;
5268 /* single_set doesn't work for COND_EXEC insns, so we have to duplicate
5269 parts of it. */
5271 if (GET_CODE (set) == PARALLEL)
5273 int i;
5274 set = XVECEXP (real_pat, 0, 0);
5275 for (i = 1; i < XVECLEN (real_pat, 0); i++)
5276 if (GET_CODE (XVECEXP (real_pat, 0, i)) != USE
5277 && GET_CODE (XVECEXP (real_pat, 0, i)) != CLOBBER)
5279 set = 0;
5280 break;
5284 if (set && GET_CODE (set) != SET)
5285 set = 0;
5287 type = get_attr_type (insn);
5289 if (type == TYPE_F
5290 && set && REG_P (SET_DEST (set)) && PR_REGNO_P (REGNO (SET_DEST (set))))
5291 SET_HARD_REG_BIT (this_group->p_reg_set, REGNO (SET_DEST (set)));
5293 if ((type == TYPE_M || type == TYPE_A) && cond && set
5294 && REG_P (SET_DEST (set))
5295 && GET_CODE (SET_SRC (set)) != PLUS
5296 && GET_CODE (SET_SRC (set)) != MINUS
5297 && (GET_CODE (SET_SRC (set)) != ASHIFT
5298 || !shladd_operand (XEXP (SET_SRC (set), 1), VOIDmode))
5299 && (GET_CODE (SET_SRC (set)) != MEM
5300 || GET_CODE (XEXP (SET_SRC (set), 0)) != POST_MODIFY)
5301 && GENERAL_REGNO_P (REGNO (SET_DEST (set))))
5303 if (!COMPARISON_P (cond)
5304 || !REG_P (XEXP (cond, 0)))
5305 abort ();
5307 if (TEST_HARD_REG_BIT (prev_group->p_reg_set, REGNO (XEXP (cond, 0))))
5308 SET_HARD_REG_BIT (this_group->gr_reg_conditionally_set, REGNO (SET_DEST (set)));
5310 if (for_each_rtx (&real_pat, errata_find_address_regs, NULL))
5312 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5313 emit_insn_before (gen_nop (), insn);
5314 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5315 group_idx = 0;
5316 memset (last_group, 0, sizeof last_group);
5320 /* Emit extra nops if they are required to work around hardware errata. */
5322 static void
5323 fixup_errata (void)
5325 rtx insn;
5327 if (! TARGET_B_STEP)
5328 return;
5330 group_idx = 0;
5331 memset (last_group, 0, sizeof last_group);
5333 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5335 if (!INSN_P (insn))
5336 continue;
5338 if (ia64_safe_type (insn) == TYPE_S)
5340 group_idx ^= 1;
5341 memset (last_group + group_idx, 0, sizeof last_group[group_idx]);
5343 else
5344 errata_emit_nops (insn);
5349 /* Instruction scheduling support. */
5351 #define NR_BUNDLES 10
5353 /* A list of names of all available bundles. */
5355 static const char *bundle_name [NR_BUNDLES] =
5357 ".mii",
5358 ".mmi",
5359 ".mfi",
5360 ".mmf",
5361 #if NR_BUNDLES == 10
5362 ".bbb",
5363 ".mbb",
5364 #endif
5365 ".mib",
5366 ".mmb",
5367 ".mfb",
5368 ".mlx"
5371 /* Nonzero if we should insert stop bits into the schedule. */
5373 int ia64_final_schedule = 0;
5375 /* Codes of the corresponding quieryied units: */
5377 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
5378 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
5380 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
5381 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
5383 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
5385 /* The following variable value is an insn group barrier. */
5387 static rtx dfa_stop_insn;
5389 /* The following variable value is the last issued insn. */
5391 static rtx last_scheduled_insn;
5393 /* The following variable value is size of the DFA state. */
5395 static size_t dfa_state_size;
5397 /* The following variable value is pointer to a DFA state used as
5398 temporary variable. */
5400 static state_t temp_dfa_state = NULL;
5402 /* The following variable value is DFA state after issuing the last
5403 insn. */
5405 static state_t prev_cycle_state = NULL;
5407 /* The following array element values are TRUE if the corresponding
5408 insn requires to add stop bits before it. */
5410 static char *stops_p;
5412 /* The following variable is used to set up the mentioned above array. */
5414 static int stop_before_p = 0;
5416 /* The following variable value is length of the arrays `clocks' and
5417 `add_cycles'. */
5419 static int clocks_length;
5421 /* The following array element values are cycles on which the
5422 corresponding insn will be issued. The array is used only for
5423 Itanium1. */
5425 static int *clocks;
5427 /* The following array element values are numbers of cycles should be
5428 added to improve insn scheduling for MM_insns for Itanium1. */
5430 static int *add_cycles;
5432 static rtx ia64_single_set (rtx);
5433 static void ia64_emit_insn_before (rtx, rtx);
5435 /* Map a bundle number to its pseudo-op. */
5437 const char *
5438 get_bundle_name (int b)
5440 return bundle_name[b];
5444 /* Return the maximum number of instructions a cpu can issue. */
5446 static int
5447 ia64_issue_rate (void)
5449 return 6;
5452 /* Helper function - like single_set, but look inside COND_EXEC. */
5454 static rtx
5455 ia64_single_set (rtx insn)
5457 rtx x = PATTERN (insn), ret;
5458 if (GET_CODE (x) == COND_EXEC)
5459 x = COND_EXEC_CODE (x);
5460 if (GET_CODE (x) == SET)
5461 return x;
5463 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
5464 Although they are not classical single set, the second set is there just
5465 to protect it from moving past FP-relative stack accesses. */
5466 switch (recog_memoized (insn))
5468 case CODE_FOR_prologue_allocate_stack:
5469 case CODE_FOR_epilogue_deallocate_stack:
5470 ret = XVECEXP (x, 0, 0);
5471 break;
5473 default:
5474 ret = single_set_2 (insn, x);
5475 break;
5478 return ret;
5481 /* Adjust the cost of a scheduling dependency. Return the new cost of
5482 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
5484 static int
5485 ia64_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
5487 enum attr_itanium_class dep_class;
5488 enum attr_itanium_class insn_class;
5490 if (REG_NOTE_KIND (link) != REG_DEP_OUTPUT)
5491 return cost;
5493 insn_class = ia64_safe_itanium_class (insn);
5494 dep_class = ia64_safe_itanium_class (dep_insn);
5495 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
5496 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
5497 return 0;
5499 return cost;
5502 /* Like emit_insn_before, but skip cycle_display notes.
5503 ??? When cycle display notes are implemented, update this. */
5505 static void
5506 ia64_emit_insn_before (rtx insn, rtx before)
5508 emit_insn_before (insn, before);
5511 /* The following function marks insns who produce addresses for load
5512 and store insns. Such insns will be placed into M slots because it
5513 decrease latency time for Itanium1 (see function
5514 `ia64_produce_address_p' and the DFA descriptions). */
5516 static void
5517 ia64_dependencies_evaluation_hook (rtx head, rtx tail)
5519 rtx insn, link, next, next_tail;
5521 next_tail = NEXT_INSN (tail);
5522 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
5523 if (INSN_P (insn))
5524 insn->call = 0;
5525 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
5526 if (INSN_P (insn)
5527 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
5529 for (link = INSN_DEPEND (insn); link != 0; link = XEXP (link, 1))
5531 next = XEXP (link, 0);
5532 if ((ia64_safe_itanium_class (next) == ITANIUM_CLASS_ST
5533 || ia64_safe_itanium_class (next) == ITANIUM_CLASS_STF)
5534 && ia64_st_address_bypass_p (insn, next))
5535 break;
5536 else if ((ia64_safe_itanium_class (next) == ITANIUM_CLASS_LD
5537 || ia64_safe_itanium_class (next)
5538 == ITANIUM_CLASS_FLD)
5539 && ia64_ld_address_bypass_p (insn, next))
5540 break;
5542 insn->call = link != 0;
5546 /* We're beginning a new block. Initialize data structures as necessary. */
5548 static void
5549 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
5550 int sched_verbose ATTRIBUTE_UNUSED,
5551 int max_ready ATTRIBUTE_UNUSED)
5553 #ifdef ENABLE_CHECKING
5554 rtx insn;
5556 if (reload_completed)
5557 for (insn = NEXT_INSN (current_sched_info->prev_head);
5558 insn != current_sched_info->next_tail;
5559 insn = NEXT_INSN (insn))
5560 if (SCHED_GROUP_P (insn))
5561 abort ();
5562 #endif
5563 last_scheduled_insn = NULL_RTX;
5564 init_insn_group_barriers ();
5567 /* We are about to being issuing insns for this clock cycle.
5568 Override the default sort algorithm to better slot instructions. */
5570 static int
5571 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
5572 int *pn_ready, int clock_var ATTRIBUTE_UNUSED,
5573 int reorder_type)
5575 int n_asms;
5576 int n_ready = *pn_ready;
5577 rtx *e_ready = ready + n_ready;
5578 rtx *insnp;
5580 if (sched_verbose)
5581 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
5583 if (reorder_type == 0)
5585 /* First, move all USEs, CLOBBERs and other crud out of the way. */
5586 n_asms = 0;
5587 for (insnp = ready; insnp < e_ready; insnp++)
5588 if (insnp < e_ready)
5590 rtx insn = *insnp;
5591 enum attr_type t = ia64_safe_type (insn);
5592 if (t == TYPE_UNKNOWN)
5594 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
5595 || asm_noperands (PATTERN (insn)) >= 0)
5597 rtx lowest = ready[n_asms];
5598 ready[n_asms] = insn;
5599 *insnp = lowest;
5600 n_asms++;
5602 else
5604 rtx highest = ready[n_ready - 1];
5605 ready[n_ready - 1] = insn;
5606 *insnp = highest;
5607 return 1;
5612 if (n_asms < n_ready)
5614 /* Some normal insns to process. Skip the asms. */
5615 ready += n_asms;
5616 n_ready -= n_asms;
5618 else if (n_ready > 0)
5619 return 1;
5622 if (ia64_final_schedule)
5624 int deleted = 0;
5625 int nr_need_stop = 0;
5627 for (insnp = ready; insnp < e_ready; insnp++)
5628 if (safe_group_barrier_needed_p (*insnp))
5629 nr_need_stop++;
5631 if (reorder_type == 1 && n_ready == nr_need_stop)
5632 return 0;
5633 if (reorder_type == 0)
5634 return 1;
5635 insnp = e_ready;
5636 /* Move down everything that needs a stop bit, preserving
5637 relative order. */
5638 while (insnp-- > ready + deleted)
5639 while (insnp >= ready + deleted)
5641 rtx insn = *insnp;
5642 if (! safe_group_barrier_needed_p (insn))
5643 break;
5644 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
5645 *ready = insn;
5646 deleted++;
5648 n_ready -= deleted;
5649 ready += deleted;
5652 return 1;
5655 /* We are about to being issuing insns for this clock cycle. Override
5656 the default sort algorithm to better slot instructions. */
5658 static int
5659 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
5660 int clock_var)
5662 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
5663 pn_ready, clock_var, 0);
5666 /* Like ia64_sched_reorder, but called after issuing each insn.
5667 Override the default sort algorithm to better slot instructions. */
5669 static int
5670 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
5671 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
5672 int *pn_ready, int clock_var)
5674 if (ia64_tune == PROCESSOR_ITANIUM && reload_completed && last_scheduled_insn)
5675 clocks [INSN_UID (last_scheduled_insn)] = clock_var;
5676 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
5677 clock_var, 1);
5680 /* We are about to issue INSN. Return the number of insns left on the
5681 ready queue that can be issued this cycle. */
5683 static int
5684 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
5685 int sched_verbose ATTRIBUTE_UNUSED,
5686 rtx insn ATTRIBUTE_UNUSED,
5687 int can_issue_more ATTRIBUTE_UNUSED)
5689 last_scheduled_insn = insn;
5690 memcpy (prev_cycle_state, curr_state, dfa_state_size);
5691 if (reload_completed)
5693 if (group_barrier_needed_p (insn))
5694 abort ();
5695 if (GET_CODE (insn) == CALL_INSN)
5696 init_insn_group_barriers ();
5697 stops_p [INSN_UID (insn)] = stop_before_p;
5698 stop_before_p = 0;
5700 return 1;
5703 /* We are choosing insn from the ready queue. Return nonzero if INSN
5704 can be chosen. */
5706 static int
5707 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn)
5709 if (insn == NULL_RTX || !INSN_P (insn))
5710 abort ();
5711 return (!reload_completed
5712 || !safe_group_barrier_needed_p (insn));
5715 /* The following variable value is pseudo-insn used by the DFA insn
5716 scheduler to change the DFA state when the simulated clock is
5717 increased. */
5719 static rtx dfa_pre_cycle_insn;
5721 /* We are about to being issuing INSN. Return nonzero if we cannot
5722 issue it on given cycle CLOCK and return zero if we should not sort
5723 the ready queue on the next clock start. */
5725 static int
5726 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
5727 int clock, int *sort_p)
5729 int setup_clocks_p = FALSE;
5731 if (insn == NULL_RTX || !INSN_P (insn))
5732 abort ();
5733 if ((reload_completed && safe_group_barrier_needed_p (insn))
5734 || (last_scheduled_insn
5735 && (GET_CODE (last_scheduled_insn) == CALL_INSN
5736 || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
5737 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
5739 init_insn_group_barriers ();
5740 if (verbose && dump)
5741 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
5742 last_clock == clock ? " + cycle advance" : "");
5743 stop_before_p = 1;
5744 if (last_clock == clock)
5746 state_transition (curr_state, dfa_stop_insn);
5747 if (TARGET_EARLY_STOP_BITS)
5748 *sort_p = (last_scheduled_insn == NULL_RTX
5749 || GET_CODE (last_scheduled_insn) != CALL_INSN);
5750 else
5751 *sort_p = 0;
5752 return 1;
5754 else if (reload_completed)
5755 setup_clocks_p = TRUE;
5756 if (GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
5757 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)
5758 state_reset (curr_state);
5759 else
5761 memcpy (curr_state, prev_cycle_state, dfa_state_size);
5762 state_transition (curr_state, dfa_stop_insn);
5763 state_transition (curr_state, dfa_pre_cycle_insn);
5764 state_transition (curr_state, NULL);
5767 else if (reload_completed)
5768 setup_clocks_p = TRUE;
5769 if (setup_clocks_p && ia64_tune == PROCESSOR_ITANIUM
5770 && GET_CODE (PATTERN (insn)) != ASM_INPUT
5771 && asm_noperands (PATTERN (insn)) < 0)
5773 enum attr_itanium_class c = ia64_safe_itanium_class (insn);
5775 if (c != ITANIUM_CLASS_MMMUL && c != ITANIUM_CLASS_MMSHF)
5777 rtx link;
5778 int d = -1;
5780 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
5781 if (REG_NOTE_KIND (link) == 0)
5783 enum attr_itanium_class dep_class;
5784 rtx dep_insn = XEXP (link, 0);
5786 dep_class = ia64_safe_itanium_class (dep_insn);
5787 if ((dep_class == ITANIUM_CLASS_MMMUL
5788 || dep_class == ITANIUM_CLASS_MMSHF)
5789 && last_clock - clocks [INSN_UID (dep_insn)] < 4
5790 && (d < 0
5791 || last_clock - clocks [INSN_UID (dep_insn)] < d))
5792 d = last_clock - clocks [INSN_UID (dep_insn)];
5794 if (d >= 0)
5795 add_cycles [INSN_UID (insn)] = 3 - d;
5798 return 0;
5803 /* The following page contains abstract data `bundle states' which are
5804 used for bundling insns (inserting nops and template generation). */
5806 /* The following describes state of insn bundling. */
5808 struct bundle_state
5810 /* Unique bundle state number to identify them in the debugging
5811 output */
5812 int unique_num;
5813 rtx insn; /* corresponding insn, NULL for the 1st and the last state */
5814 /* number nops before and after the insn */
5815 short before_nops_num, after_nops_num;
5816 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
5817 insn */
5818 int cost; /* cost of the state in cycles */
5819 int accumulated_insns_num; /* number of all previous insns including
5820 nops. L is considered as 2 insns */
5821 int branch_deviation; /* deviation of previous branches from 3rd slots */
5822 struct bundle_state *next; /* next state with the same insn_num */
5823 struct bundle_state *originator; /* originator (previous insn state) */
5824 /* All bundle states are in the following chain. */
5825 struct bundle_state *allocated_states_chain;
5826 /* The DFA State after issuing the insn and the nops. */
5827 state_t dfa_state;
5830 /* The following is map insn number to the corresponding bundle state. */
5832 static struct bundle_state **index_to_bundle_states;
5834 /* The unique number of next bundle state. */
5836 static int bundle_states_num;
5838 /* All allocated bundle states are in the following chain. */
5840 static struct bundle_state *allocated_bundle_states_chain;
5842 /* All allocated but not used bundle states are in the following
5843 chain. */
5845 static struct bundle_state *free_bundle_state_chain;
5848 /* The following function returns a free bundle state. */
5850 static struct bundle_state *
5851 get_free_bundle_state (void)
5853 struct bundle_state *result;
5855 if (free_bundle_state_chain != NULL)
5857 result = free_bundle_state_chain;
5858 free_bundle_state_chain = result->next;
5860 else
5862 result = xmalloc (sizeof (struct bundle_state));
5863 result->dfa_state = xmalloc (dfa_state_size);
5864 result->allocated_states_chain = allocated_bundle_states_chain;
5865 allocated_bundle_states_chain = result;
5867 result->unique_num = bundle_states_num++;
5868 return result;
5872 /* The following function frees given bundle state. */
5874 static void
5875 free_bundle_state (struct bundle_state *state)
5877 state->next = free_bundle_state_chain;
5878 free_bundle_state_chain = state;
5881 /* Start work with abstract data `bundle states'. */
5883 static void
5884 initiate_bundle_states (void)
5886 bundle_states_num = 0;
5887 free_bundle_state_chain = NULL;
5888 allocated_bundle_states_chain = NULL;
5891 /* Finish work with abstract data `bundle states'. */
5893 static void
5894 finish_bundle_states (void)
5896 struct bundle_state *curr_state, *next_state;
5898 for (curr_state = allocated_bundle_states_chain;
5899 curr_state != NULL;
5900 curr_state = next_state)
5902 next_state = curr_state->allocated_states_chain;
5903 free (curr_state->dfa_state);
5904 free (curr_state);
5908 /* Hash table of the bundle states. The key is dfa_state and insn_num
5909 of the bundle states. */
5911 static htab_t bundle_state_table;
5913 /* The function returns hash of BUNDLE_STATE. */
5915 static unsigned
5916 bundle_state_hash (const void *bundle_state)
5918 const struct bundle_state *state = (struct bundle_state *) bundle_state;
5919 unsigned result, i;
5921 for (result = i = 0; i < dfa_state_size; i++)
5922 result += (((unsigned char *) state->dfa_state) [i]
5923 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
5924 return result + state->insn_num;
5927 /* The function returns nonzero if the bundle state keys are equal. */
5929 static int
5930 bundle_state_eq_p (const void *bundle_state_1, const void *bundle_state_2)
5932 const struct bundle_state * state1 = (struct bundle_state *) bundle_state_1;
5933 const struct bundle_state * state2 = (struct bundle_state *) bundle_state_2;
5935 return (state1->insn_num == state2->insn_num
5936 && memcmp (state1->dfa_state, state2->dfa_state,
5937 dfa_state_size) == 0);
5940 /* The function inserts the BUNDLE_STATE into the hash table. The
5941 function returns nonzero if the bundle has been inserted into the
5942 table. The table contains the best bundle state with given key. */
5944 static int
5945 insert_bundle_state (struct bundle_state *bundle_state)
5947 void **entry_ptr;
5949 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, 1);
5950 if (*entry_ptr == NULL)
5952 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
5953 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
5954 *entry_ptr = (void *) bundle_state;
5955 return TRUE;
5957 else if (bundle_state->cost < ((struct bundle_state *) *entry_ptr)->cost
5958 || (bundle_state->cost == ((struct bundle_state *) *entry_ptr)->cost
5959 && (((struct bundle_state *)*entry_ptr)->accumulated_insns_num
5960 > bundle_state->accumulated_insns_num
5961 || (((struct bundle_state *)
5962 *entry_ptr)->accumulated_insns_num
5963 == bundle_state->accumulated_insns_num
5964 && ((struct bundle_state *)
5965 *entry_ptr)->branch_deviation
5966 > bundle_state->branch_deviation))))
5969 struct bundle_state temp;
5971 temp = *(struct bundle_state *) *entry_ptr;
5972 *(struct bundle_state *) *entry_ptr = *bundle_state;
5973 ((struct bundle_state *) *entry_ptr)->next = temp.next;
5974 *bundle_state = temp;
5976 return FALSE;
5979 /* Start work with the hash table. */
5981 static void
5982 initiate_bundle_state_table (void)
5984 bundle_state_table = htab_create (50, bundle_state_hash, bundle_state_eq_p,
5985 (htab_del) 0);
5988 /* Finish work with the hash table. */
5990 static void
5991 finish_bundle_state_table (void)
5993 htab_delete (bundle_state_table);
5998 /* The following variable is a insn `nop' used to check bundle states
5999 with different number of inserted nops. */
6001 static rtx ia64_nop;
6003 /* The following function tries to issue NOPS_NUM nops for the current
6004 state without advancing processor cycle. If it failed, the
6005 function returns FALSE and frees the current state. */
6007 static int
6008 try_issue_nops (struct bundle_state *curr_state, int nops_num)
6010 int i;
6012 for (i = 0; i < nops_num; i++)
6013 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
6015 free_bundle_state (curr_state);
6016 return FALSE;
6018 return TRUE;
6021 /* The following function tries to issue INSN for the current
6022 state without advancing processor cycle. If it failed, the
6023 function returns FALSE and frees the current state. */
6025 static int
6026 try_issue_insn (struct bundle_state *curr_state, rtx insn)
6028 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
6030 free_bundle_state (curr_state);
6031 return FALSE;
6033 return TRUE;
6036 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
6037 starting with ORIGINATOR without advancing processor cycle. If
6038 TRY_BUNDLE_END_P is TRUE, the function also/only (if
6039 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
6040 If it was successful, the function creates new bundle state and
6041 insert into the hash table and into `index_to_bundle_states'. */
6043 static void
6044 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
6045 rtx insn, int try_bundle_end_p, int only_bundle_end_p)
6047 struct bundle_state *curr_state;
6049 curr_state = get_free_bundle_state ();
6050 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
6051 curr_state->insn = insn;
6052 curr_state->insn_num = originator->insn_num + 1;
6053 curr_state->cost = originator->cost;
6054 curr_state->originator = originator;
6055 curr_state->before_nops_num = before_nops_num;
6056 curr_state->after_nops_num = 0;
6057 curr_state->accumulated_insns_num
6058 = originator->accumulated_insns_num + before_nops_num;
6059 curr_state->branch_deviation = originator->branch_deviation;
6060 if (insn == NULL_RTX)
6061 abort ();
6062 else if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
6064 if (GET_MODE (insn) == TImode)
6065 abort ();
6066 if (!try_issue_nops (curr_state, before_nops_num))
6067 return;
6068 if (!try_issue_insn (curr_state, insn))
6069 return;
6070 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
6071 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
6072 && curr_state->accumulated_insns_num % 3 != 0)
6074 free_bundle_state (curr_state);
6075 return;
6078 else if (GET_MODE (insn) != TImode)
6080 if (!try_issue_nops (curr_state, before_nops_num))
6081 return;
6082 if (!try_issue_insn (curr_state, insn))
6083 return;
6084 curr_state->accumulated_insns_num++;
6085 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6086 || asm_noperands (PATTERN (insn)) >= 0)
6087 abort ();
6088 if (ia64_safe_type (insn) == TYPE_L)
6089 curr_state->accumulated_insns_num++;
6091 else
6093 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
6094 state_transition (curr_state->dfa_state, NULL);
6095 curr_state->cost++;
6096 if (!try_issue_nops (curr_state, before_nops_num))
6097 return;
6098 if (!try_issue_insn (curr_state, insn))
6099 return;
6100 curr_state->accumulated_insns_num++;
6101 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6102 || asm_noperands (PATTERN (insn)) >= 0)
6104 /* Finish bundle containing asm insn. */
6105 curr_state->after_nops_num
6106 = 3 - curr_state->accumulated_insns_num % 3;
6107 curr_state->accumulated_insns_num
6108 += 3 - curr_state->accumulated_insns_num % 3;
6110 else if (ia64_safe_type (insn) == TYPE_L)
6111 curr_state->accumulated_insns_num++;
6113 if (ia64_safe_type (insn) == TYPE_B)
6114 curr_state->branch_deviation
6115 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
6116 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
6118 if (!only_bundle_end_p && insert_bundle_state (curr_state))
6120 state_t dfa_state;
6121 struct bundle_state *curr_state1;
6122 struct bundle_state *allocated_states_chain;
6124 curr_state1 = get_free_bundle_state ();
6125 dfa_state = curr_state1->dfa_state;
6126 allocated_states_chain = curr_state1->allocated_states_chain;
6127 *curr_state1 = *curr_state;
6128 curr_state1->dfa_state = dfa_state;
6129 curr_state1->allocated_states_chain = allocated_states_chain;
6130 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
6131 dfa_state_size);
6132 curr_state = curr_state1;
6134 if (!try_issue_nops (curr_state,
6135 3 - curr_state->accumulated_insns_num % 3))
6136 return;
6137 curr_state->after_nops_num
6138 = 3 - curr_state->accumulated_insns_num % 3;
6139 curr_state->accumulated_insns_num
6140 += 3 - curr_state->accumulated_insns_num % 3;
6142 if (!insert_bundle_state (curr_state))
6143 free_bundle_state (curr_state);
6144 return;
6147 /* The following function returns position in the two window bundle
6148 for given STATE. */
6150 static int
6151 get_max_pos (state_t state)
6153 if (cpu_unit_reservation_p (state, pos_6))
6154 return 6;
6155 else if (cpu_unit_reservation_p (state, pos_5))
6156 return 5;
6157 else if (cpu_unit_reservation_p (state, pos_4))
6158 return 4;
6159 else if (cpu_unit_reservation_p (state, pos_3))
6160 return 3;
6161 else if (cpu_unit_reservation_p (state, pos_2))
6162 return 2;
6163 else if (cpu_unit_reservation_p (state, pos_1))
6164 return 1;
6165 else
6166 return 0;
6169 /* The function returns code of a possible template for given position
6170 and state. The function should be called only with 2 values of
6171 position equal to 3 or 6. */
6173 static int
6174 get_template (state_t state, int pos)
6176 switch (pos)
6178 case 3:
6179 if (cpu_unit_reservation_p (state, _0mii_))
6180 return 0;
6181 else if (cpu_unit_reservation_p (state, _0mmi_))
6182 return 1;
6183 else if (cpu_unit_reservation_p (state, _0mfi_))
6184 return 2;
6185 else if (cpu_unit_reservation_p (state, _0mmf_))
6186 return 3;
6187 else if (cpu_unit_reservation_p (state, _0bbb_))
6188 return 4;
6189 else if (cpu_unit_reservation_p (state, _0mbb_))
6190 return 5;
6191 else if (cpu_unit_reservation_p (state, _0mib_))
6192 return 6;
6193 else if (cpu_unit_reservation_p (state, _0mmb_))
6194 return 7;
6195 else if (cpu_unit_reservation_p (state, _0mfb_))
6196 return 8;
6197 else if (cpu_unit_reservation_p (state, _0mlx_))
6198 return 9;
6199 else
6200 abort ();
6201 case 6:
6202 if (cpu_unit_reservation_p (state, _1mii_))
6203 return 0;
6204 else if (cpu_unit_reservation_p (state, _1mmi_))
6205 return 1;
6206 else if (cpu_unit_reservation_p (state, _1mfi_))
6207 return 2;
6208 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
6209 return 3;
6210 else if (cpu_unit_reservation_p (state, _1bbb_))
6211 return 4;
6212 else if (cpu_unit_reservation_p (state, _1mbb_))
6213 return 5;
6214 else if (cpu_unit_reservation_p (state, _1mib_))
6215 return 6;
6216 else if (cpu_unit_reservation_p (state, _1mmb_))
6217 return 7;
6218 else if (cpu_unit_reservation_p (state, _1mfb_))
6219 return 8;
6220 else if (cpu_unit_reservation_p (state, _1mlx_))
6221 return 9;
6222 else
6223 abort ();
6224 default:
6225 abort ();
6229 /* The following function returns an insn important for insn bundling
6230 followed by INSN and before TAIL. */
6232 static rtx
6233 get_next_important_insn (rtx insn, rtx tail)
6235 for (; insn && insn != tail; insn = NEXT_INSN (insn))
6236 if (INSN_P (insn)
6237 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
6238 && GET_CODE (PATTERN (insn)) != USE
6239 && GET_CODE (PATTERN (insn)) != CLOBBER)
6240 return insn;
6241 return NULL_RTX;
6244 /* The following function does insn bundling. Bundling means
6245 inserting templates and nop insns to fit insn groups into permitted
6246 templates. Instruction scheduling uses NDFA (non-deterministic
6247 finite automata) encoding informations about the templates and the
6248 inserted nops. Nondeterminism of the automata permits follows
6249 all possible insn sequences very fast.
6251 Unfortunately it is not possible to get information about inserting
6252 nop insns and used templates from the automata states. The
6253 automata only says that we can issue an insn possibly inserting
6254 some nops before it and using some template. Therefore insn
6255 bundling in this function is implemented by using DFA
6256 (deterministic finite automata). We follows all possible insn
6257 sequences by inserting 0-2 nops (that is what the NDFA describe for
6258 insn scheduling) before/after each insn being bundled. We know the
6259 start of simulated processor cycle from insn scheduling (insn
6260 starting a new cycle has TImode).
6262 Simple implementation of insn bundling would create enormous
6263 number of possible insn sequences satisfying information about new
6264 cycle ticks taken from the insn scheduling. To make the algorithm
6265 practical we use dynamic programming. Each decision (about
6266 inserting nops and implicitly about previous decisions) is described
6267 by structure bundle_state (see above). If we generate the same
6268 bundle state (key is automaton state after issuing the insns and
6269 nops for it), we reuse already generated one. As consequence we
6270 reject some decisions which cannot improve the solution and
6271 reduce memory for the algorithm.
6273 When we reach the end of EBB (extended basic block), we choose the
6274 best sequence and then, moving back in EBB, insert templates for
6275 the best alternative. The templates are taken from querying
6276 automaton state for each insn in chosen bundle states.
6278 So the algorithm makes two (forward and backward) passes through
6279 EBB. There is an additional forward pass through EBB for Itanium1
6280 processor. This pass inserts more nops to make dependency between
6281 a producer insn and MMMUL/MMSHF at least 4 cycles long. */
6283 static void
6284 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
6286 struct bundle_state *curr_state, *next_state, *best_state;
6287 rtx insn, next_insn;
6288 int insn_num;
6289 int i, bundle_end_p, only_bundle_end_p, asm_p;
6290 int pos = 0, max_pos, template0, template1;
6291 rtx b;
6292 rtx nop;
6293 enum attr_type type;
6295 insn_num = 0;
6296 /* Count insns in the EBB. */
6297 for (insn = NEXT_INSN (prev_head_insn);
6298 insn && insn != tail;
6299 insn = NEXT_INSN (insn))
6300 if (INSN_P (insn))
6301 insn_num++;
6302 if (insn_num == 0)
6303 return;
6304 bundling_p = 1;
6305 dfa_clean_insn_cache ();
6306 initiate_bundle_state_table ();
6307 index_to_bundle_states = xmalloc ((insn_num + 2)
6308 * sizeof (struct bundle_state *));
6309 /* First (forward) pass -- generation of bundle states. */
6310 curr_state = get_free_bundle_state ();
6311 curr_state->insn = NULL;
6312 curr_state->before_nops_num = 0;
6313 curr_state->after_nops_num = 0;
6314 curr_state->insn_num = 0;
6315 curr_state->cost = 0;
6316 curr_state->accumulated_insns_num = 0;
6317 curr_state->branch_deviation = 0;
6318 curr_state->next = NULL;
6319 curr_state->originator = NULL;
6320 state_reset (curr_state->dfa_state);
6321 index_to_bundle_states [0] = curr_state;
6322 insn_num = 0;
6323 /* Shift cycle mark if it is put on insn which could be ignored. */
6324 for (insn = NEXT_INSN (prev_head_insn);
6325 insn != tail;
6326 insn = NEXT_INSN (insn))
6327 if (INSN_P (insn)
6328 && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
6329 || GET_CODE (PATTERN (insn)) == USE
6330 || GET_CODE (PATTERN (insn)) == CLOBBER)
6331 && GET_MODE (insn) == TImode)
6333 PUT_MODE (insn, VOIDmode);
6334 for (next_insn = NEXT_INSN (insn);
6335 next_insn != tail;
6336 next_insn = NEXT_INSN (next_insn))
6337 if (INSN_P (next_insn)
6338 && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
6339 && GET_CODE (PATTERN (next_insn)) != USE
6340 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
6342 PUT_MODE (next_insn, TImode);
6343 break;
6346 /* Froward pass: generation of bundle states. */
6347 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
6348 insn != NULL_RTX;
6349 insn = next_insn)
6351 if (!INSN_P (insn)
6352 || ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
6353 || GET_CODE (PATTERN (insn)) == USE
6354 || GET_CODE (PATTERN (insn)) == CLOBBER)
6355 abort ();
6356 type = ia64_safe_type (insn);
6357 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
6358 insn_num++;
6359 index_to_bundle_states [insn_num] = NULL;
6360 for (curr_state = index_to_bundle_states [insn_num - 1];
6361 curr_state != NULL;
6362 curr_state = next_state)
6364 pos = curr_state->accumulated_insns_num % 3;
6365 next_state = curr_state->next;
6366 /* We must fill up the current bundle in order to start a
6367 subsequent asm insn in a new bundle. Asm insn is always
6368 placed in a separate bundle. */
6369 only_bundle_end_p
6370 = (next_insn != NULL_RTX
6371 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
6372 && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
6373 /* We may fill up the current bundle if it is the cycle end
6374 without a group barrier. */
6375 bundle_end_p
6376 = (only_bundle_end_p || next_insn == NULL_RTX
6377 || (GET_MODE (next_insn) == TImode
6378 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
6379 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
6380 || type == TYPE_S
6381 /* We need to insert 2 nops for cases like M_MII. To
6382 guarantee issuing all insns on the same cycle for
6383 Itanium 1, we need to issue 2 nops after the first M
6384 insn (MnnMII where n is a nop insn). */
6385 || ((type == TYPE_M || type == TYPE_A)
6386 && ia64_tune == PROCESSOR_ITANIUM
6387 && !bundle_end_p && pos == 1))
6388 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
6389 only_bundle_end_p);
6390 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
6391 only_bundle_end_p);
6392 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
6393 only_bundle_end_p);
6395 if (index_to_bundle_states [insn_num] == NULL)
6396 abort ();
6397 for (curr_state = index_to_bundle_states [insn_num];
6398 curr_state != NULL;
6399 curr_state = curr_state->next)
6400 if (verbose >= 2 && dump)
6402 /* This structure is taken from generated code of the
6403 pipeline hazard recognizer (see file insn-attrtab.c).
6404 Please don't forget to change the structure if a new
6405 automaton is added to .md file. */
6406 struct DFA_chip
6408 unsigned short one_automaton_state;
6409 unsigned short oneb_automaton_state;
6410 unsigned short two_automaton_state;
6411 unsigned short twob_automaton_state;
6414 fprintf
6415 (dump,
6416 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
6417 curr_state->unique_num,
6418 (curr_state->originator == NULL
6419 ? -1 : curr_state->originator->unique_num),
6420 curr_state->cost,
6421 curr_state->before_nops_num, curr_state->after_nops_num,
6422 curr_state->accumulated_insns_num, curr_state->branch_deviation,
6423 (ia64_tune == PROCESSOR_ITANIUM
6424 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
6425 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
6426 INSN_UID (insn));
6429 if (index_to_bundle_states [insn_num] == NULL)
6430 /* We should find a solution because the 2nd insn scheduling has
6431 found one. */
6432 abort ();
6433 /* Find a state corresponding to the best insn sequence. */
6434 best_state = NULL;
6435 for (curr_state = index_to_bundle_states [insn_num];
6436 curr_state != NULL;
6437 curr_state = curr_state->next)
6438 /* We are just looking at the states with fully filled up last
6439 bundle. The first we prefer insn sequences with minimal cost
6440 then with minimal inserted nops and finally with branch insns
6441 placed in the 3rd slots. */
6442 if (curr_state->accumulated_insns_num % 3 == 0
6443 && (best_state == NULL || best_state->cost > curr_state->cost
6444 || (best_state->cost == curr_state->cost
6445 && (curr_state->accumulated_insns_num
6446 < best_state->accumulated_insns_num
6447 || (curr_state->accumulated_insns_num
6448 == best_state->accumulated_insns_num
6449 && curr_state->branch_deviation
6450 < best_state->branch_deviation)))))
6451 best_state = curr_state;
6452 /* Second (backward) pass: adding nops and templates. */
6453 insn_num = best_state->before_nops_num;
6454 template0 = template1 = -1;
6455 for (curr_state = best_state;
6456 curr_state->originator != NULL;
6457 curr_state = curr_state->originator)
6459 insn = curr_state->insn;
6460 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
6461 || asm_noperands (PATTERN (insn)) >= 0);
6462 insn_num++;
6463 if (verbose >= 2 && dump)
6465 struct DFA_chip
6467 unsigned short one_automaton_state;
6468 unsigned short oneb_automaton_state;
6469 unsigned short two_automaton_state;
6470 unsigned short twob_automaton_state;
6473 fprintf
6474 (dump,
6475 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
6476 curr_state->unique_num,
6477 (curr_state->originator == NULL
6478 ? -1 : curr_state->originator->unique_num),
6479 curr_state->cost,
6480 curr_state->before_nops_num, curr_state->after_nops_num,
6481 curr_state->accumulated_insns_num, curr_state->branch_deviation,
6482 (ia64_tune == PROCESSOR_ITANIUM
6483 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
6484 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
6485 INSN_UID (insn));
6487 /* Find the position in the current bundle window. The window can
6488 contain at most two bundles. Two bundle window means that
6489 the processor will make two bundle rotation. */
6490 max_pos = get_max_pos (curr_state->dfa_state);
6491 if (max_pos == 6
6492 /* The following (negative template number) means that the
6493 processor did one bundle rotation. */
6494 || (max_pos == 3 && template0 < 0))
6496 /* We are at the end of the window -- find template(s) for
6497 its bundle(s). */
6498 pos = max_pos;
6499 if (max_pos == 3)
6500 template0 = get_template (curr_state->dfa_state, 3);
6501 else
6503 template1 = get_template (curr_state->dfa_state, 3);
6504 template0 = get_template (curr_state->dfa_state, 6);
6507 if (max_pos > 3 && template1 < 0)
6508 /* It may happen when we have the stop inside a bundle. */
6510 if (pos > 3)
6511 abort ();
6512 template1 = get_template (curr_state->dfa_state, 3);
6513 pos += 3;
6515 if (!asm_p)
6516 /* Emit nops after the current insn. */
6517 for (i = 0; i < curr_state->after_nops_num; i++)
6519 nop = gen_nop ();
6520 emit_insn_after (nop, insn);
6521 pos--;
6522 if (pos < 0)
6523 abort ();
6524 if (pos % 3 == 0)
6526 /* We are at the start of a bundle: emit the template
6527 (it should be defined). */
6528 if (template0 < 0)
6529 abort ();
6530 b = gen_bundle_selector (GEN_INT (template0));
6531 ia64_emit_insn_before (b, nop);
6532 /* If we have two bundle window, we make one bundle
6533 rotation. Otherwise template0 will be undefined
6534 (negative value). */
6535 template0 = template1;
6536 template1 = -1;
6539 /* Move the position backward in the window. Group barrier has
6540 no slot. Asm insn takes all bundle. */
6541 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
6542 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6543 && asm_noperands (PATTERN (insn)) < 0)
6544 pos--;
6545 /* Long insn takes 2 slots. */
6546 if (ia64_safe_type (insn) == TYPE_L)
6547 pos--;
6548 if (pos < 0)
6549 abort ();
6550 if (pos % 3 == 0
6551 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
6552 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6553 && asm_noperands (PATTERN (insn)) < 0)
6555 /* The current insn is at the bundle start: emit the
6556 template. */
6557 if (template0 < 0)
6558 abort ();
6559 b = gen_bundle_selector (GEN_INT (template0));
6560 ia64_emit_insn_before (b, insn);
6561 b = PREV_INSN (insn);
6562 insn = b;
6563 /* See comment above in analogous place for emitting nops
6564 after the insn. */
6565 template0 = template1;
6566 template1 = -1;
6568 /* Emit nops after the current insn. */
6569 for (i = 0; i < curr_state->before_nops_num; i++)
6571 nop = gen_nop ();
6572 ia64_emit_insn_before (nop, insn);
6573 nop = PREV_INSN (insn);
6574 insn = nop;
6575 pos--;
6576 if (pos < 0)
6577 abort ();
6578 if (pos % 3 == 0)
6580 /* See comment above in analogous place for emitting nops
6581 after the insn. */
6582 if (template0 < 0)
6583 abort ();
6584 b = gen_bundle_selector (GEN_INT (template0));
6585 ia64_emit_insn_before (b, insn);
6586 b = PREV_INSN (insn);
6587 insn = b;
6588 template0 = template1;
6589 template1 = -1;
6593 if (ia64_tune == PROCESSOR_ITANIUM)
6594 /* Insert additional cycles for MM-insns (MMMUL and MMSHF).
6595 Itanium1 has a strange design, if the distance between an insn
6596 and dependent MM-insn is less 4 then we have a 6 additional
6597 cycles stall. So we make the distance equal to 4 cycles if it
6598 is less. */
6599 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
6600 insn != NULL_RTX;
6601 insn = next_insn)
6603 if (!INSN_P (insn)
6604 || ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
6605 || GET_CODE (PATTERN (insn)) == USE
6606 || GET_CODE (PATTERN (insn)) == CLOBBER)
6607 abort ();
6608 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
6609 if (INSN_UID (insn) < clocks_length && add_cycles [INSN_UID (insn)])
6610 /* We found a MM-insn which needs additional cycles. */
6612 rtx last;
6613 int i, j, n;
6614 int pred_stop_p;
6616 /* Now we are searching for a template of the bundle in
6617 which the MM-insn is placed and the position of the
6618 insn in the bundle (0, 1, 2). Also we are searching
6619 for that there is a stop before the insn. */
6620 last = prev_active_insn (insn);
6621 pred_stop_p = recog_memoized (last) == CODE_FOR_insn_group_barrier;
6622 if (pred_stop_p)
6623 last = prev_active_insn (last);
6624 n = 0;
6625 for (;; last = prev_active_insn (last))
6626 if (recog_memoized (last) == CODE_FOR_bundle_selector)
6628 template0 = XINT (XVECEXP (PATTERN (last), 0, 0), 0);
6629 if (template0 == 9)
6630 /* The insn is in MLX bundle. Change the template
6631 onto MFI because we will add nops before the
6632 insn. It simplifies subsequent code a lot. */
6633 PATTERN (last)
6634 = gen_bundle_selector (const2_rtx); /* -> MFI */
6635 break;
6637 else if (recog_memoized (last) != CODE_FOR_insn_group_barrier
6638 && (ia64_safe_itanium_class (last)
6639 != ITANIUM_CLASS_IGNORE))
6640 n++;
6641 /* Some check of correctness: the stop is not at the
6642 bundle start, there are no more 3 insns in the bundle,
6643 and the MM-insn is not at the start of bundle with
6644 template MLX. */
6645 if ((pred_stop_p && n == 0) || n > 2
6646 || (template0 == 9 && n != 0))
6647 abort ();
6648 /* Put nops after the insn in the bundle. */
6649 for (j = 3 - n; j > 0; j --)
6650 ia64_emit_insn_before (gen_nop (), insn);
6651 /* It takes into account that we will add more N nops
6652 before the insn lately -- please see code below. */
6653 add_cycles [INSN_UID (insn)]--;
6654 if (!pred_stop_p || add_cycles [INSN_UID (insn)])
6655 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
6656 insn);
6657 if (pred_stop_p)
6658 add_cycles [INSN_UID (insn)]--;
6659 for (i = add_cycles [INSN_UID (insn)]; i > 0; i--)
6661 /* Insert "MII;" template. */
6662 ia64_emit_insn_before (gen_bundle_selector (const0_rtx),
6663 insn);
6664 ia64_emit_insn_before (gen_nop (), insn);
6665 ia64_emit_insn_before (gen_nop (), insn);
6666 if (i > 1)
6668 /* To decrease code size, we use "MI;I;"
6669 template. */
6670 ia64_emit_insn_before
6671 (gen_insn_group_barrier (GEN_INT (3)), insn);
6672 i--;
6674 ia64_emit_insn_before (gen_nop (), insn);
6675 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
6676 insn);
6678 /* Put the MM-insn in the same slot of a bundle with the
6679 same template as the original one. */
6680 ia64_emit_insn_before (gen_bundle_selector (GEN_INT (template0)),
6681 insn);
6682 /* To put the insn in the same slot, add necessary number
6683 of nops. */
6684 for (j = n; j > 0; j --)
6685 ia64_emit_insn_before (gen_nop (), insn);
6686 /* Put the stop if the original bundle had it. */
6687 if (pred_stop_p)
6688 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
6689 insn);
6692 free (index_to_bundle_states);
6693 finish_bundle_state_table ();
6694 bundling_p = 0;
6695 dfa_clean_insn_cache ();
6698 /* The following function is called at the end of scheduling BB or
6699 EBB. After reload, it inserts stop bits and does insn bundling. */
6701 static void
6702 ia64_sched_finish (FILE *dump, int sched_verbose)
6704 if (sched_verbose)
6705 fprintf (dump, "// Finishing schedule.\n");
6706 if (!reload_completed)
6707 return;
6708 if (reload_completed)
6710 final_emit_insn_group_barriers (dump);
6711 bundling (dump, sched_verbose, current_sched_info->prev_head,
6712 current_sched_info->next_tail);
6713 if (sched_verbose && dump)
6714 fprintf (dump, "// finishing %d-%d\n",
6715 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
6716 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
6718 return;
6722 /* The following function inserts stop bits in scheduled BB or EBB. */
6724 static void
6725 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
6727 rtx insn;
6728 int need_barrier_p = 0;
6729 rtx prev_insn = NULL_RTX;
6731 init_insn_group_barriers ();
6733 for (insn = NEXT_INSN (current_sched_info->prev_head);
6734 insn != current_sched_info->next_tail;
6735 insn = NEXT_INSN (insn))
6737 if (GET_CODE (insn) == BARRIER)
6739 rtx last = prev_active_insn (insn);
6741 if (! last)
6742 continue;
6743 if (GET_CODE (last) == JUMP_INSN
6744 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
6745 last = prev_active_insn (last);
6746 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
6747 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
6749 init_insn_group_barriers ();
6750 need_barrier_p = 0;
6751 prev_insn = NULL_RTX;
6753 else if (INSN_P (insn))
6755 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
6757 init_insn_group_barriers ();
6758 need_barrier_p = 0;
6759 prev_insn = NULL_RTX;
6761 else if (need_barrier_p || group_barrier_needed_p (insn))
6763 if (TARGET_EARLY_STOP_BITS)
6765 rtx last;
6767 for (last = insn;
6768 last != current_sched_info->prev_head;
6769 last = PREV_INSN (last))
6770 if (INSN_P (last) && GET_MODE (last) == TImode
6771 && stops_p [INSN_UID (last)])
6772 break;
6773 if (last == current_sched_info->prev_head)
6774 last = insn;
6775 last = prev_active_insn (last);
6776 if (last
6777 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
6778 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
6779 last);
6780 init_insn_group_barriers ();
6781 for (last = NEXT_INSN (last);
6782 last != insn;
6783 last = NEXT_INSN (last))
6784 if (INSN_P (last))
6785 group_barrier_needed_p (last);
6787 else
6789 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
6790 insn);
6791 init_insn_group_barriers ();
6793 group_barrier_needed_p (insn);
6794 prev_insn = NULL_RTX;
6796 else if (recog_memoized (insn) >= 0)
6797 prev_insn = insn;
6798 need_barrier_p = (GET_CODE (insn) == CALL_INSN
6799 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6800 || asm_noperands (PATTERN (insn)) >= 0);
6807 /* If the following function returns TRUE, we will use the the DFA
6808 insn scheduler. */
6810 static int
6811 ia64_first_cycle_multipass_dfa_lookahead (void)
6813 return (reload_completed ? 6 : 4);
6816 /* The following function initiates variable `dfa_pre_cycle_insn'. */
6818 static void
6819 ia64_init_dfa_pre_cycle_insn (void)
6821 if (temp_dfa_state == NULL)
6823 dfa_state_size = state_size ();
6824 temp_dfa_state = xmalloc (dfa_state_size);
6825 prev_cycle_state = xmalloc (dfa_state_size);
6827 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
6828 PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
6829 recog_memoized (dfa_pre_cycle_insn);
6830 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
6831 PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX;
6832 recog_memoized (dfa_stop_insn);
6835 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
6836 used by the DFA insn scheduler. */
6838 static rtx
6839 ia64_dfa_pre_cycle_insn (void)
6841 return dfa_pre_cycle_insn;
6844 /* The following function returns TRUE if PRODUCER (of type ilog or
6845 ld) produces address for CONSUMER (of type st or stf). */
6848 ia64_st_address_bypass_p (rtx producer, rtx consumer)
6850 rtx dest, reg, mem;
6852 if (producer == NULL_RTX || consumer == NULL_RTX)
6853 abort ();
6854 dest = ia64_single_set (producer);
6855 if (dest == NULL_RTX || (reg = SET_DEST (dest)) == NULL_RTX
6856 || (GET_CODE (reg) != REG && GET_CODE (reg) != SUBREG))
6857 abort ();
6858 if (GET_CODE (reg) == SUBREG)
6859 reg = SUBREG_REG (reg);
6860 dest = ia64_single_set (consumer);
6861 if (dest == NULL_RTX || (mem = SET_DEST (dest)) == NULL_RTX
6862 || GET_CODE (mem) != MEM)
6863 abort ();
6864 return reg_mentioned_p (reg, mem);
6867 /* The following function returns TRUE if PRODUCER (of type ilog or
6868 ld) produces address for CONSUMER (of type ld or fld). */
6871 ia64_ld_address_bypass_p (rtx producer, rtx consumer)
6873 rtx dest, src, reg, mem;
6875 if (producer == NULL_RTX || consumer == NULL_RTX)
6876 abort ();
6877 dest = ia64_single_set (producer);
6878 if (dest == NULL_RTX || (reg = SET_DEST (dest)) == NULL_RTX
6879 || (GET_CODE (reg) != REG && GET_CODE (reg) != SUBREG))
6880 abort ();
6881 if (GET_CODE (reg) == SUBREG)
6882 reg = SUBREG_REG (reg);
6883 src = ia64_single_set (consumer);
6884 if (src == NULL_RTX || (mem = SET_SRC (src)) == NULL_RTX)
6885 abort ();
6886 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
6887 mem = XVECEXP (mem, 0, 0);
6888 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
6889 mem = XEXP (mem, 0);
6891 /* Note that LO_SUM is used for GOT loads. */
6892 if (GET_CODE (mem) != LO_SUM && GET_CODE (mem) != MEM)
6893 abort ();
6895 return reg_mentioned_p (reg, mem);
6898 /* The following function returns TRUE if INSN produces address for a
6899 load/store insn. We will place such insns into M slot because it
6900 decreases its latency time. */
6903 ia64_produce_address_p (rtx insn)
6905 return insn->call;
6909 /* Emit pseudo-ops for the assembler to describe predicate relations.
6910 At present this assumes that we only consider predicate pairs to
6911 be mutex, and that the assembler can deduce proper values from
6912 straight-line code. */
6914 static void
6915 emit_predicate_relation_info (void)
6917 basic_block bb;
6919 FOR_EACH_BB_REVERSE (bb)
6921 int r;
6922 rtx head = BB_HEAD (bb);
6924 /* We only need such notes at code labels. */
6925 if (GET_CODE (head) != CODE_LABEL)
6926 continue;
6927 if (GET_CODE (NEXT_INSN (head)) == NOTE
6928 && NOTE_LINE_NUMBER (NEXT_INSN (head)) == NOTE_INSN_BASIC_BLOCK)
6929 head = NEXT_INSN (head);
6931 for (r = PR_REG (0); r < PR_REG (64); r += 2)
6932 if (REGNO_REG_SET_P (bb->global_live_at_start, r))
6934 rtx p = gen_rtx_REG (BImode, r);
6935 rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
6936 if (head == BB_END (bb))
6937 BB_END (bb) = n;
6938 head = n;
6942 /* Look for conditional calls that do not return, and protect predicate
6943 relations around them. Otherwise the assembler will assume the call
6944 returns, and complain about uses of call-clobbered predicates after
6945 the call. */
6946 FOR_EACH_BB_REVERSE (bb)
6948 rtx insn = BB_HEAD (bb);
6950 while (1)
6952 if (GET_CODE (insn) == CALL_INSN
6953 && GET_CODE (PATTERN (insn)) == COND_EXEC
6954 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
6956 rtx b = emit_insn_before (gen_safe_across_calls_all (), insn);
6957 rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn);
6958 if (BB_HEAD (bb) == insn)
6959 BB_HEAD (bb) = b;
6960 if (BB_END (bb) == insn)
6961 BB_END (bb) = a;
6964 if (insn == BB_END (bb))
6965 break;
6966 insn = NEXT_INSN (insn);
6971 /* Perform machine dependent operations on the rtl chain INSNS. */
6973 static void
6974 ia64_reorg (void)
6976 /* We are freeing block_for_insn in the toplev to keep compatibility
6977 with old MDEP_REORGS that are not CFG based. Recompute it now. */
6978 compute_bb_for_insn ();
6980 /* If optimizing, we'll have split before scheduling. */
6981 if (optimize == 0)
6982 split_all_insns (0);
6984 /* ??? update_life_info_in_dirty_blocks fails to terminate during
6985 non-optimizing bootstrap. */
6986 update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES, PROP_DEATH_NOTES);
6988 if (ia64_flag_schedule_insns2)
6990 timevar_push (TV_SCHED2);
6991 ia64_final_schedule = 1;
6993 initiate_bundle_states ();
6994 ia64_nop = make_insn_raw (gen_nop ());
6995 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
6996 recog_memoized (ia64_nop);
6997 clocks_length = get_max_uid () + 1;
6998 stops_p = xcalloc (1, clocks_length);
6999 if (ia64_tune == PROCESSOR_ITANIUM)
7001 clocks = xcalloc (clocks_length, sizeof (int));
7002 add_cycles = xcalloc (clocks_length, sizeof (int));
7004 if (ia64_tune == PROCESSOR_ITANIUM2)
7006 pos_1 = get_cpu_unit_code ("2_1");
7007 pos_2 = get_cpu_unit_code ("2_2");
7008 pos_3 = get_cpu_unit_code ("2_3");
7009 pos_4 = get_cpu_unit_code ("2_4");
7010 pos_5 = get_cpu_unit_code ("2_5");
7011 pos_6 = get_cpu_unit_code ("2_6");
7012 _0mii_ = get_cpu_unit_code ("2b_0mii.");
7013 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
7014 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
7015 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
7016 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
7017 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
7018 _0mib_ = get_cpu_unit_code ("2b_0mib.");
7019 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
7020 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
7021 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
7022 _1mii_ = get_cpu_unit_code ("2b_1mii.");
7023 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
7024 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
7025 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
7026 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
7027 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
7028 _1mib_ = get_cpu_unit_code ("2b_1mib.");
7029 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
7030 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
7031 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
7033 else
7035 pos_1 = get_cpu_unit_code ("1_1");
7036 pos_2 = get_cpu_unit_code ("1_2");
7037 pos_3 = get_cpu_unit_code ("1_3");
7038 pos_4 = get_cpu_unit_code ("1_4");
7039 pos_5 = get_cpu_unit_code ("1_5");
7040 pos_6 = get_cpu_unit_code ("1_6");
7041 _0mii_ = get_cpu_unit_code ("1b_0mii.");
7042 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
7043 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
7044 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
7045 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
7046 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
7047 _0mib_ = get_cpu_unit_code ("1b_0mib.");
7048 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
7049 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
7050 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
7051 _1mii_ = get_cpu_unit_code ("1b_1mii.");
7052 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
7053 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
7054 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
7055 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
7056 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
7057 _1mib_ = get_cpu_unit_code ("1b_1mib.");
7058 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
7059 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
7060 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
7062 schedule_ebbs (dump_file);
7063 finish_bundle_states ();
7064 if (ia64_tune == PROCESSOR_ITANIUM)
7066 free (add_cycles);
7067 free (clocks);
7069 free (stops_p);
7070 emit_insn_group_barriers (dump_file);
7072 ia64_final_schedule = 0;
7073 timevar_pop (TV_SCHED2);
7075 else
7076 emit_all_insn_group_barriers (dump_file);
7078 /* A call must not be the last instruction in a function, so that the
7079 return address is still within the function, so that unwinding works
7080 properly. Note that IA-64 differs from dwarf2 on this point. */
7081 if (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
7083 rtx insn;
7084 int saw_stop = 0;
7086 insn = get_last_insn ();
7087 if (! INSN_P (insn))
7088 insn = prev_active_insn (insn);
7089 /* Skip over insns that expand to nothing. */
7090 while (GET_CODE (insn) == INSN && get_attr_empty (insn) == EMPTY_YES)
7092 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
7093 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
7094 saw_stop = 1;
7095 insn = prev_active_insn (insn);
7097 if (GET_CODE (insn) == CALL_INSN)
7099 if (! saw_stop)
7100 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7101 emit_insn (gen_break_f ());
7102 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7106 fixup_errata ();
7107 emit_predicate_relation_info ();
7109 if (ia64_flag_var_tracking)
7111 timevar_push (TV_VAR_TRACKING);
7112 variable_tracking_main ();
7113 timevar_pop (TV_VAR_TRACKING);
7117 /* Return true if REGNO is used by the epilogue. */
7120 ia64_epilogue_uses (int regno)
7122 switch (regno)
7124 case R_GR (1):
7125 /* With a call to a function in another module, we will write a new
7126 value to "gp". After returning from such a call, we need to make
7127 sure the function restores the original gp-value, even if the
7128 function itself does not use the gp anymore. */
7129 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
7131 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
7132 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
7133 /* For functions defined with the syscall_linkage attribute, all
7134 input registers are marked as live at all function exits. This
7135 prevents the register allocator from using the input registers,
7136 which in turn makes it possible to restart a system call after
7137 an interrupt without having to save/restore the input registers.
7138 This also prevents kernel data from leaking to application code. */
7139 return lookup_attribute ("syscall_linkage",
7140 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
7142 case R_BR (0):
7143 /* Conditional return patterns can't represent the use of `b0' as
7144 the return address, so we force the value live this way. */
7145 return 1;
7147 case AR_PFS_REGNUM:
7148 /* Likewise for ar.pfs, which is used by br.ret. */
7149 return 1;
7151 default:
7152 return 0;
7156 /* Return true if REGNO is used by the frame unwinder. */
7159 ia64_eh_uses (int regno)
7161 if (! reload_completed)
7162 return 0;
7164 if (current_frame_info.reg_save_b0
7165 && regno == current_frame_info.reg_save_b0)
7166 return 1;
7167 if (current_frame_info.reg_save_pr
7168 && regno == current_frame_info.reg_save_pr)
7169 return 1;
7170 if (current_frame_info.reg_save_ar_pfs
7171 && regno == current_frame_info.reg_save_ar_pfs)
7172 return 1;
7173 if (current_frame_info.reg_save_ar_unat
7174 && regno == current_frame_info.reg_save_ar_unat)
7175 return 1;
7176 if (current_frame_info.reg_save_ar_lc
7177 && regno == current_frame_info.reg_save_ar_lc)
7178 return 1;
7180 return 0;
7183 /* Return true if this goes in small data/bss. */
7185 /* ??? We could also support own long data here. Generating movl/add/ld8
7186 instead of addl,ld8/ld8. This makes the code bigger, but should make the
7187 code faster because there is one less load. This also includes incomplete
7188 types which can't go in sdata/sbss. */
7190 static bool
7191 ia64_in_small_data_p (tree exp)
7193 if (TARGET_NO_SDATA)
7194 return false;
7196 /* We want to merge strings, so we never consider them small data. */
7197 if (TREE_CODE (exp) == STRING_CST)
7198 return false;
7200 /* Functions are never small data. */
7201 if (TREE_CODE (exp) == FUNCTION_DECL)
7202 return false;
7204 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
7206 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
7207 if (strcmp (section, ".sdata") == 0
7208 || strcmp (section, ".sbss") == 0)
7209 return true;
7211 else
7213 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
7215 /* If this is an incomplete type with size 0, then we can't put it
7216 in sdata because it might be too big when completed. */
7217 if (size > 0 && size <= ia64_section_threshold)
7218 return true;
7221 return false;
7224 /* Output assembly directives for prologue regions. */
7226 /* The current basic block number. */
7228 static bool last_block;
7230 /* True if we need a copy_state command at the start of the next block. */
7232 static bool need_copy_state;
7234 /* The function emits unwind directives for the start of an epilogue. */
7236 static void
7237 process_epilogue (void)
7239 /* If this isn't the last block of the function, then we need to label the
7240 current state, and copy it back in at the start of the next block. */
7242 if (!last_block)
7244 fprintf (asm_out_file, "\t.label_state 1\n");
7245 need_copy_state = true;
7248 fprintf (asm_out_file, "\t.restore sp\n");
7251 /* This function processes a SET pattern looking for specific patterns
7252 which result in emitting an assembly directive required for unwinding. */
7254 static int
7255 process_set (FILE *asm_out_file, rtx pat)
7257 rtx src = SET_SRC (pat);
7258 rtx dest = SET_DEST (pat);
7259 int src_regno, dest_regno;
7261 /* Look for the ALLOC insn. */
7262 if (GET_CODE (src) == UNSPEC_VOLATILE
7263 && XINT (src, 1) == UNSPECV_ALLOC
7264 && GET_CODE (dest) == REG)
7266 dest_regno = REGNO (dest);
7268 /* If this isn't the final destination for ar.pfs, the alloc
7269 shouldn't have been marked frame related. */
7270 if (dest_regno != current_frame_info.reg_save_ar_pfs)
7271 abort ();
7273 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
7274 ia64_dbx_register_number (dest_regno));
7275 return 1;
7278 /* Look for SP = .... */
7279 if (GET_CODE (dest) == REG && REGNO (dest) == STACK_POINTER_REGNUM)
7281 if (GET_CODE (src) == PLUS)
7283 rtx op0 = XEXP (src, 0);
7284 rtx op1 = XEXP (src, 1);
7285 if (op0 == dest && GET_CODE (op1) == CONST_INT)
7287 if (INTVAL (op1) < 0)
7288 fprintf (asm_out_file, "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
7289 -INTVAL (op1));
7290 else
7291 process_epilogue ();
7293 else
7294 abort ();
7296 else if (GET_CODE (src) == REG
7297 && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
7298 process_epilogue ();
7299 else
7300 abort ();
7302 return 1;
7305 /* Register move we need to look at. */
7306 if (GET_CODE (dest) == REG && GET_CODE (src) == REG)
7308 src_regno = REGNO (src);
7309 dest_regno = REGNO (dest);
7311 switch (src_regno)
7313 case BR_REG (0):
7314 /* Saving return address pointer. */
7315 if (dest_regno != current_frame_info.reg_save_b0)
7316 abort ();
7317 fprintf (asm_out_file, "\t.save rp, r%d\n",
7318 ia64_dbx_register_number (dest_regno));
7319 return 1;
7321 case PR_REG (0):
7322 if (dest_regno != current_frame_info.reg_save_pr)
7323 abort ();
7324 fprintf (asm_out_file, "\t.save pr, r%d\n",
7325 ia64_dbx_register_number (dest_regno));
7326 return 1;
7328 case AR_UNAT_REGNUM:
7329 if (dest_regno != current_frame_info.reg_save_ar_unat)
7330 abort ();
7331 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
7332 ia64_dbx_register_number (dest_regno));
7333 return 1;
7335 case AR_LC_REGNUM:
7336 if (dest_regno != current_frame_info.reg_save_ar_lc)
7337 abort ();
7338 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
7339 ia64_dbx_register_number (dest_regno));
7340 return 1;
7342 case STACK_POINTER_REGNUM:
7343 if (dest_regno != HARD_FRAME_POINTER_REGNUM
7344 || ! frame_pointer_needed)
7345 abort ();
7346 fprintf (asm_out_file, "\t.vframe r%d\n",
7347 ia64_dbx_register_number (dest_regno));
7348 return 1;
7350 default:
7351 /* Everything else should indicate being stored to memory. */
7352 abort ();
7356 /* Memory store we need to look at. */
7357 if (GET_CODE (dest) == MEM && GET_CODE (src) == REG)
7359 long off;
7360 rtx base;
7361 const char *saveop;
7363 if (GET_CODE (XEXP (dest, 0)) == REG)
7365 base = XEXP (dest, 0);
7366 off = 0;
7368 else if (GET_CODE (XEXP (dest, 0)) == PLUS
7369 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT)
7371 base = XEXP (XEXP (dest, 0), 0);
7372 off = INTVAL (XEXP (XEXP (dest, 0), 1));
7374 else
7375 abort ();
7377 if (base == hard_frame_pointer_rtx)
7379 saveop = ".savepsp";
7380 off = - off;
7382 else if (base == stack_pointer_rtx)
7383 saveop = ".savesp";
7384 else
7385 abort ();
7387 src_regno = REGNO (src);
7388 switch (src_regno)
7390 case BR_REG (0):
7391 if (current_frame_info.reg_save_b0 != 0)
7392 abort ();
7393 fprintf (asm_out_file, "\t%s rp, %ld\n", saveop, off);
7394 return 1;
7396 case PR_REG (0):
7397 if (current_frame_info.reg_save_pr != 0)
7398 abort ();
7399 fprintf (asm_out_file, "\t%s pr, %ld\n", saveop, off);
7400 return 1;
7402 case AR_LC_REGNUM:
7403 if (current_frame_info.reg_save_ar_lc != 0)
7404 abort ();
7405 fprintf (asm_out_file, "\t%s ar.lc, %ld\n", saveop, off);
7406 return 1;
7408 case AR_PFS_REGNUM:
7409 if (current_frame_info.reg_save_ar_pfs != 0)
7410 abort ();
7411 fprintf (asm_out_file, "\t%s ar.pfs, %ld\n", saveop, off);
7412 return 1;
7414 case AR_UNAT_REGNUM:
7415 if (current_frame_info.reg_save_ar_unat != 0)
7416 abort ();
7417 fprintf (asm_out_file, "\t%s ar.unat, %ld\n", saveop, off);
7418 return 1;
7420 case GR_REG (4):
7421 case GR_REG (5):
7422 case GR_REG (6):
7423 case GR_REG (7):
7424 fprintf (asm_out_file, "\t.save.g 0x%x\n",
7425 1 << (src_regno - GR_REG (4)));
7426 return 1;
7428 case BR_REG (1):
7429 case BR_REG (2):
7430 case BR_REG (3):
7431 case BR_REG (4):
7432 case BR_REG (5):
7433 fprintf (asm_out_file, "\t.save.b 0x%x\n",
7434 1 << (src_regno - BR_REG (1)));
7435 return 1;
7437 case FR_REG (2):
7438 case FR_REG (3):
7439 case FR_REG (4):
7440 case FR_REG (5):
7441 fprintf (asm_out_file, "\t.save.f 0x%x\n",
7442 1 << (src_regno - FR_REG (2)));
7443 return 1;
7445 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
7446 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
7447 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
7448 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
7449 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
7450 1 << (src_regno - FR_REG (12)));
7451 return 1;
7453 default:
7454 return 0;
7458 return 0;
7462 /* This function looks at a single insn and emits any directives
7463 required to unwind this insn. */
7464 void
7465 process_for_unwind_directive (FILE *asm_out_file, rtx insn)
7467 if (flag_unwind_tables
7468 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
7470 rtx pat;
7472 if (GET_CODE (insn) == NOTE
7473 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
7475 last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
7477 /* Restore unwind state from immediately before the epilogue. */
7478 if (need_copy_state)
7480 fprintf (asm_out_file, "\t.body\n");
7481 fprintf (asm_out_file, "\t.copy_state 1\n");
7482 need_copy_state = false;
7486 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
7487 return;
7489 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
7490 if (pat)
7491 pat = XEXP (pat, 0);
7492 else
7493 pat = PATTERN (insn);
7495 switch (GET_CODE (pat))
7497 case SET:
7498 process_set (asm_out_file, pat);
7499 break;
7501 case PARALLEL:
7503 int par_index;
7504 int limit = XVECLEN (pat, 0);
7505 for (par_index = 0; par_index < limit; par_index++)
7507 rtx x = XVECEXP (pat, 0, par_index);
7508 if (GET_CODE (x) == SET)
7509 process_set (asm_out_file, x);
7511 break;
7514 default:
7515 abort ();
7521 void
7522 ia64_init_builtins (void)
7524 tree psi_type_node = build_pointer_type (integer_type_node);
7525 tree pdi_type_node = build_pointer_type (long_integer_type_node);
7527 /* __sync_val_compare_and_swap_si, __sync_bool_compare_and_swap_si */
7528 tree si_ftype_psi_si_si
7529 = build_function_type_list (integer_type_node,
7530 psi_type_node, integer_type_node,
7531 integer_type_node, NULL_TREE);
7533 /* __sync_val_compare_and_swap_di */
7534 tree di_ftype_pdi_di_di
7535 = build_function_type_list (long_integer_type_node,
7536 pdi_type_node, long_integer_type_node,
7537 long_integer_type_node, NULL_TREE);
7538 /* __sync_bool_compare_and_swap_di */
7539 tree si_ftype_pdi_di_di
7540 = build_function_type_list (integer_type_node,
7541 pdi_type_node, long_integer_type_node,
7542 long_integer_type_node, NULL_TREE);
7543 /* __sync_synchronize */
7544 tree void_ftype_void
7545 = build_function_type (void_type_node, void_list_node);
7547 /* __sync_lock_test_and_set_si */
7548 tree si_ftype_psi_si
7549 = build_function_type_list (integer_type_node,
7550 psi_type_node, integer_type_node, NULL_TREE);
7552 /* __sync_lock_test_and_set_di */
7553 tree di_ftype_pdi_di
7554 = build_function_type_list (long_integer_type_node,
7555 pdi_type_node, long_integer_type_node,
7556 NULL_TREE);
7558 /* __sync_lock_release_si */
7559 tree void_ftype_psi
7560 = build_function_type_list (void_type_node, psi_type_node, NULL_TREE);
7562 /* __sync_lock_release_di */
7563 tree void_ftype_pdi
7564 = build_function_type_list (void_type_node, pdi_type_node, NULL_TREE);
7566 tree fpreg_type;
7567 tree float80_type;
7569 /* The __fpreg type. */
7570 fpreg_type = make_node (REAL_TYPE);
7571 /* ??? The back end should know to load/save __fpreg variables using
7572 the ldf.fill and stf.spill instructions. */
7573 TYPE_PRECISION (fpreg_type) = 80;
7574 layout_type (fpreg_type);
7575 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
7577 /* The __float80 type. */
7578 float80_type = make_node (REAL_TYPE);
7579 TYPE_PRECISION (float80_type) = 80;
7580 layout_type (float80_type);
7581 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
7583 /* The __float128 type. */
7584 if (!TARGET_HPUX)
7586 tree float128_type = make_node (REAL_TYPE);
7587 TYPE_PRECISION (float128_type) = 128;
7588 layout_type (float128_type);
7589 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
7591 else
7592 /* Under HPUX, this is a synonym for "long double". */
7593 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
7594 "__float128");
7596 #define def_builtin(name, type, code) \
7597 lang_hooks.builtin_function ((name), (type), (code), BUILT_IN_MD, \
7598 NULL, NULL_TREE)
7600 def_builtin ("__sync_val_compare_and_swap_si", si_ftype_psi_si_si,
7601 IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI);
7602 def_builtin ("__sync_val_compare_and_swap_di", di_ftype_pdi_di_di,
7603 IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI);
7604 def_builtin ("__sync_bool_compare_and_swap_si", si_ftype_psi_si_si,
7605 IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI);
7606 def_builtin ("__sync_bool_compare_and_swap_di", si_ftype_pdi_di_di,
7607 IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI);
7609 def_builtin ("__sync_synchronize", void_ftype_void,
7610 IA64_BUILTIN_SYNCHRONIZE);
7612 def_builtin ("__sync_lock_test_and_set_si", si_ftype_psi_si,
7613 IA64_BUILTIN_LOCK_TEST_AND_SET_SI);
7614 def_builtin ("__sync_lock_test_and_set_di", di_ftype_pdi_di,
7615 IA64_BUILTIN_LOCK_TEST_AND_SET_DI);
7616 def_builtin ("__sync_lock_release_si", void_ftype_psi,
7617 IA64_BUILTIN_LOCK_RELEASE_SI);
7618 def_builtin ("__sync_lock_release_di", void_ftype_pdi,
7619 IA64_BUILTIN_LOCK_RELEASE_DI);
7621 def_builtin ("__builtin_ia64_bsp",
7622 build_function_type (ptr_type_node, void_list_node),
7623 IA64_BUILTIN_BSP);
7625 def_builtin ("__builtin_ia64_flushrs",
7626 build_function_type (void_type_node, void_list_node),
7627 IA64_BUILTIN_FLUSHRS);
7629 def_builtin ("__sync_fetch_and_add_si", si_ftype_psi_si,
7630 IA64_BUILTIN_FETCH_AND_ADD_SI);
7631 def_builtin ("__sync_fetch_and_sub_si", si_ftype_psi_si,
7632 IA64_BUILTIN_FETCH_AND_SUB_SI);
7633 def_builtin ("__sync_fetch_and_or_si", si_ftype_psi_si,
7634 IA64_BUILTIN_FETCH_AND_OR_SI);
7635 def_builtin ("__sync_fetch_and_and_si", si_ftype_psi_si,
7636 IA64_BUILTIN_FETCH_AND_AND_SI);
7637 def_builtin ("__sync_fetch_and_xor_si", si_ftype_psi_si,
7638 IA64_BUILTIN_FETCH_AND_XOR_SI);
7639 def_builtin ("__sync_fetch_and_nand_si", si_ftype_psi_si,
7640 IA64_BUILTIN_FETCH_AND_NAND_SI);
7642 def_builtin ("__sync_add_and_fetch_si", si_ftype_psi_si,
7643 IA64_BUILTIN_ADD_AND_FETCH_SI);
7644 def_builtin ("__sync_sub_and_fetch_si", si_ftype_psi_si,
7645 IA64_BUILTIN_SUB_AND_FETCH_SI);
7646 def_builtin ("__sync_or_and_fetch_si", si_ftype_psi_si,
7647 IA64_BUILTIN_OR_AND_FETCH_SI);
7648 def_builtin ("__sync_and_and_fetch_si", si_ftype_psi_si,
7649 IA64_BUILTIN_AND_AND_FETCH_SI);
7650 def_builtin ("__sync_xor_and_fetch_si", si_ftype_psi_si,
7651 IA64_BUILTIN_XOR_AND_FETCH_SI);
7652 def_builtin ("__sync_nand_and_fetch_si", si_ftype_psi_si,
7653 IA64_BUILTIN_NAND_AND_FETCH_SI);
7655 def_builtin ("__sync_fetch_and_add_di", di_ftype_pdi_di,
7656 IA64_BUILTIN_FETCH_AND_ADD_DI);
7657 def_builtin ("__sync_fetch_and_sub_di", di_ftype_pdi_di,
7658 IA64_BUILTIN_FETCH_AND_SUB_DI);
7659 def_builtin ("__sync_fetch_and_or_di", di_ftype_pdi_di,
7660 IA64_BUILTIN_FETCH_AND_OR_DI);
7661 def_builtin ("__sync_fetch_and_and_di", di_ftype_pdi_di,
7662 IA64_BUILTIN_FETCH_AND_AND_DI);
7663 def_builtin ("__sync_fetch_and_xor_di", di_ftype_pdi_di,
7664 IA64_BUILTIN_FETCH_AND_XOR_DI);
7665 def_builtin ("__sync_fetch_and_nand_di", di_ftype_pdi_di,
7666 IA64_BUILTIN_FETCH_AND_NAND_DI);
7668 def_builtin ("__sync_add_and_fetch_di", di_ftype_pdi_di,
7669 IA64_BUILTIN_ADD_AND_FETCH_DI);
7670 def_builtin ("__sync_sub_and_fetch_di", di_ftype_pdi_di,
7671 IA64_BUILTIN_SUB_AND_FETCH_DI);
7672 def_builtin ("__sync_or_and_fetch_di", di_ftype_pdi_di,
7673 IA64_BUILTIN_OR_AND_FETCH_DI);
7674 def_builtin ("__sync_and_and_fetch_di", di_ftype_pdi_di,
7675 IA64_BUILTIN_AND_AND_FETCH_DI);
7676 def_builtin ("__sync_xor_and_fetch_di", di_ftype_pdi_di,
7677 IA64_BUILTIN_XOR_AND_FETCH_DI);
7678 def_builtin ("__sync_nand_and_fetch_di", di_ftype_pdi_di,
7679 IA64_BUILTIN_NAND_AND_FETCH_DI);
7681 #undef def_builtin
7684 /* Expand fetch_and_op intrinsics. The basic code sequence is:
7687 tmp = [ptr];
7688 do {
7689 ret = tmp;
7690 ar.ccv = tmp;
7691 tmp <op>= value;
7692 cmpxchgsz.acq tmp = [ptr], tmp
7693 } while (tmp != ret)
7696 static rtx
7697 ia64_expand_fetch_and_op (optab binoptab, enum machine_mode mode,
7698 tree arglist, rtx target)
7700 rtx ret, label, tmp, ccv, insn, mem, value;
7701 tree arg0, arg1;
7703 arg0 = TREE_VALUE (arglist);
7704 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
7705 mem = expand_expr (arg0, NULL_RTX, Pmode, 0);
7706 #ifdef POINTERS_EXTEND_UNSIGNED
7707 if (GET_MODE(mem) != Pmode)
7708 mem = convert_memory_address (Pmode, mem);
7709 #endif
7710 value = expand_expr (arg1, NULL_RTX, mode, 0);
7712 mem = gen_rtx_MEM (mode, force_reg (Pmode, mem));
7713 MEM_VOLATILE_P (mem) = 1;
7715 if (target && register_operand (target, mode))
7716 ret = target;
7717 else
7718 ret = gen_reg_rtx (mode);
7720 emit_insn (gen_mf ());
7722 /* Special case for fetchadd instructions. */
7723 if (binoptab == add_optab && fetchadd_operand (value, VOIDmode))
7725 if (mode == SImode)
7726 insn = gen_fetchadd_acq_si (ret, mem, value);
7727 else
7728 insn = gen_fetchadd_acq_di (ret, mem, value);
7729 emit_insn (insn);
7730 return ret;
7733 tmp = gen_reg_rtx (mode);
7734 /* ar.ccv must always be loaded with a zero-extended DImode value. */
7735 ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
7736 emit_move_insn (tmp, mem);
7738 label = gen_label_rtx ();
7739 emit_label (label);
7740 emit_move_insn (ret, tmp);
7741 convert_move (ccv, tmp, /*unsignedp=*/1);
7743 /* Perform the specific operation. Special case NAND by noticing
7744 one_cmpl_optab instead. */
7745 if (binoptab == one_cmpl_optab)
7747 tmp = expand_unop (mode, binoptab, tmp, NULL, OPTAB_WIDEN);
7748 binoptab = and_optab;
7750 tmp = expand_binop (mode, binoptab, tmp, value, tmp, 1, OPTAB_WIDEN);
7752 if (mode == SImode)
7753 insn = gen_cmpxchg_acq_si (tmp, mem, tmp, ccv);
7754 else
7755 insn = gen_cmpxchg_acq_di (tmp, mem, tmp, ccv);
7756 emit_insn (insn);
7758 emit_cmp_and_jump_insns (tmp, ret, NE, 0, mode, 1, label);
7760 return ret;
7763 /* Expand op_and_fetch intrinsics. The basic code sequence is:
7766 tmp = [ptr];
7767 do {
7768 old = tmp;
7769 ar.ccv = tmp;
7770 ret = tmp <op> value;
7771 cmpxchgsz.acq tmp = [ptr], ret
7772 } while (tmp != old)
7775 static rtx
7776 ia64_expand_op_and_fetch (optab binoptab, enum machine_mode mode,
7777 tree arglist, rtx target)
7779 rtx old, label, tmp, ret, ccv, insn, mem, value;
7780 tree arg0, arg1;
7782 arg0 = TREE_VALUE (arglist);
7783 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
7784 mem = expand_expr (arg0, NULL_RTX, Pmode, 0);
7785 #ifdef POINTERS_EXTEND_UNSIGNED
7786 if (GET_MODE(mem) != Pmode)
7787 mem = convert_memory_address (Pmode, mem);
7788 #endif
7790 value = expand_expr (arg1, NULL_RTX, mode, 0);
7792 mem = gen_rtx_MEM (mode, force_reg (Pmode, mem));
7793 MEM_VOLATILE_P (mem) = 1;
7795 if (target && ! register_operand (target, mode))
7796 target = NULL_RTX;
7798 emit_insn (gen_mf ());
7799 tmp = gen_reg_rtx (mode);
7800 old = gen_reg_rtx (mode);
7801 /* ar.ccv must always be loaded with a zero-extended DImode value. */
7802 ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
7804 emit_move_insn (tmp, mem);
7806 label = gen_label_rtx ();
7807 emit_label (label);
7808 emit_move_insn (old, tmp);
7809 convert_move (ccv, tmp, /*unsignedp=*/1);
7811 /* Perform the specific operation. Special case NAND by noticing
7812 one_cmpl_optab instead. */
7813 if (binoptab == one_cmpl_optab)
7815 tmp = expand_unop (mode, binoptab, tmp, NULL, OPTAB_WIDEN);
7816 binoptab = and_optab;
7818 ret = expand_binop (mode, binoptab, tmp, value, target, 1, OPTAB_WIDEN);
7820 if (mode == SImode)
7821 insn = gen_cmpxchg_acq_si (tmp, mem, ret, ccv);
7822 else
7823 insn = gen_cmpxchg_acq_di (tmp, mem, ret, ccv);
7824 emit_insn (insn);
7826 emit_cmp_and_jump_insns (tmp, old, NE, 0, mode, 1, label);
7828 return ret;
7831 /* Expand val_ and bool_compare_and_swap. For val_ we want:
7833 ar.ccv = oldval
7835 cmpxchgsz.acq ret = [ptr], newval, ar.ccv
7836 return ret
7838 For bool_ it's the same except return ret == oldval.
7841 static rtx
7842 ia64_expand_compare_and_swap (enum machine_mode rmode, enum machine_mode mode,
7843 int boolp, tree arglist, rtx target)
7845 tree arg0, arg1, arg2;
7846 rtx mem, old, new, ccv, tmp, insn;
7848 arg0 = TREE_VALUE (arglist);
7849 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
7850 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
7851 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
7852 old = expand_expr (arg1, NULL_RTX, mode, 0);
7853 new = expand_expr (arg2, NULL_RTX, mode, 0);
7855 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
7856 MEM_VOLATILE_P (mem) = 1;
7858 if (GET_MODE (old) != mode)
7859 old = convert_to_mode (mode, old, /*unsignedp=*/1);
7860 if (GET_MODE (new) != mode)
7861 new = convert_to_mode (mode, new, /*unsignedp=*/1);
7863 if (! register_operand (old, mode))
7864 old = copy_to_mode_reg (mode, old);
7865 if (! register_operand (new, mode))
7866 new = copy_to_mode_reg (mode, new);
7868 if (! boolp && target && register_operand (target, mode))
7869 tmp = target;
7870 else
7871 tmp = gen_reg_rtx (mode);
7873 ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
7874 convert_move (ccv, old, /*unsignedp=*/1);
7875 emit_insn (gen_mf ());
7876 if (mode == SImode)
7877 insn = gen_cmpxchg_acq_si (tmp, mem, new, ccv);
7878 else
7879 insn = gen_cmpxchg_acq_di (tmp, mem, new, ccv);
7880 emit_insn (insn);
7882 if (boolp)
7884 if (! target)
7885 target = gen_reg_rtx (rmode);
7886 return emit_store_flag_force (target, EQ, tmp, old, mode, 1, 1);
7888 else
7889 return tmp;
7892 /* Expand lock_test_and_set. I.e. `xchgsz ret = [ptr], new'. */
7894 static rtx
7895 ia64_expand_lock_test_and_set (enum machine_mode mode, tree arglist,
7896 rtx target)
7898 tree arg0, arg1;
7899 rtx mem, new, ret, insn;
7901 arg0 = TREE_VALUE (arglist);
7902 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
7903 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
7904 new = expand_expr (arg1, NULL_RTX, mode, 0);
7906 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
7907 MEM_VOLATILE_P (mem) = 1;
7908 if (! register_operand (new, mode))
7909 new = copy_to_mode_reg (mode, new);
7911 if (target && register_operand (target, mode))
7912 ret = target;
7913 else
7914 ret = gen_reg_rtx (mode);
7916 if (mode == SImode)
7917 insn = gen_xchgsi (ret, mem, new);
7918 else
7919 insn = gen_xchgdi (ret, mem, new);
7920 emit_insn (insn);
7922 return ret;
7925 /* Expand lock_release. I.e. `stsz.rel [ptr] = r0'. */
7927 static rtx
7928 ia64_expand_lock_release (enum machine_mode mode, tree arglist,
7929 rtx target ATTRIBUTE_UNUSED)
7931 tree arg0;
7932 rtx mem;
7934 arg0 = TREE_VALUE (arglist);
7935 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
7937 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
7938 MEM_VOLATILE_P (mem) = 1;
7940 emit_move_insn (mem, const0_rtx);
7942 return const0_rtx;
7946 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
7947 enum machine_mode mode ATTRIBUTE_UNUSED,
7948 int ignore ATTRIBUTE_UNUSED)
7950 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
7951 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
7952 tree arglist = TREE_OPERAND (exp, 1);
7953 enum machine_mode rmode = VOIDmode;
7955 switch (fcode)
7957 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI:
7958 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI:
7959 mode = SImode;
7960 rmode = SImode;
7961 break;
7963 case IA64_BUILTIN_LOCK_TEST_AND_SET_SI:
7964 case IA64_BUILTIN_LOCK_RELEASE_SI:
7965 case IA64_BUILTIN_FETCH_AND_ADD_SI:
7966 case IA64_BUILTIN_FETCH_AND_SUB_SI:
7967 case IA64_BUILTIN_FETCH_AND_OR_SI:
7968 case IA64_BUILTIN_FETCH_AND_AND_SI:
7969 case IA64_BUILTIN_FETCH_AND_XOR_SI:
7970 case IA64_BUILTIN_FETCH_AND_NAND_SI:
7971 case IA64_BUILTIN_ADD_AND_FETCH_SI:
7972 case IA64_BUILTIN_SUB_AND_FETCH_SI:
7973 case IA64_BUILTIN_OR_AND_FETCH_SI:
7974 case IA64_BUILTIN_AND_AND_FETCH_SI:
7975 case IA64_BUILTIN_XOR_AND_FETCH_SI:
7976 case IA64_BUILTIN_NAND_AND_FETCH_SI:
7977 mode = SImode;
7978 break;
7980 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI:
7981 mode = DImode;
7982 rmode = SImode;
7983 break;
7985 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI:
7986 mode = DImode;
7987 rmode = DImode;
7988 break;
7990 case IA64_BUILTIN_LOCK_TEST_AND_SET_DI:
7991 case IA64_BUILTIN_LOCK_RELEASE_DI:
7992 case IA64_BUILTIN_FETCH_AND_ADD_DI:
7993 case IA64_BUILTIN_FETCH_AND_SUB_DI:
7994 case IA64_BUILTIN_FETCH_AND_OR_DI:
7995 case IA64_BUILTIN_FETCH_AND_AND_DI:
7996 case IA64_BUILTIN_FETCH_AND_XOR_DI:
7997 case IA64_BUILTIN_FETCH_AND_NAND_DI:
7998 case IA64_BUILTIN_ADD_AND_FETCH_DI:
7999 case IA64_BUILTIN_SUB_AND_FETCH_DI:
8000 case IA64_BUILTIN_OR_AND_FETCH_DI:
8001 case IA64_BUILTIN_AND_AND_FETCH_DI:
8002 case IA64_BUILTIN_XOR_AND_FETCH_DI:
8003 case IA64_BUILTIN_NAND_AND_FETCH_DI:
8004 mode = DImode;
8005 break;
8007 default:
8008 break;
8011 switch (fcode)
8013 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI:
8014 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI:
8015 return ia64_expand_compare_and_swap (rmode, mode, 1, arglist,
8016 target);
8018 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI:
8019 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI:
8020 return ia64_expand_compare_and_swap (rmode, mode, 0, arglist,
8021 target);
8023 case IA64_BUILTIN_SYNCHRONIZE:
8024 emit_insn (gen_mf ());
8025 return const0_rtx;
8027 case IA64_BUILTIN_LOCK_TEST_AND_SET_SI:
8028 case IA64_BUILTIN_LOCK_TEST_AND_SET_DI:
8029 return ia64_expand_lock_test_and_set (mode, arglist, target);
8031 case IA64_BUILTIN_LOCK_RELEASE_SI:
8032 case IA64_BUILTIN_LOCK_RELEASE_DI:
8033 return ia64_expand_lock_release (mode, arglist, target);
8035 case IA64_BUILTIN_BSP:
8036 if (! target || ! register_operand (target, DImode))
8037 target = gen_reg_rtx (DImode);
8038 emit_insn (gen_bsp_value (target));
8039 #ifdef POINTERS_EXTEND_UNSIGNED
8040 target = convert_memory_address (ptr_mode, target);
8041 #endif
8042 return target;
8044 case IA64_BUILTIN_FLUSHRS:
8045 emit_insn (gen_flushrs ());
8046 return const0_rtx;
8048 case IA64_BUILTIN_FETCH_AND_ADD_SI:
8049 case IA64_BUILTIN_FETCH_AND_ADD_DI:
8050 return ia64_expand_fetch_and_op (add_optab, mode, arglist, target);
8052 case IA64_BUILTIN_FETCH_AND_SUB_SI:
8053 case IA64_BUILTIN_FETCH_AND_SUB_DI:
8054 return ia64_expand_fetch_and_op (sub_optab, mode, arglist, target);
8056 case IA64_BUILTIN_FETCH_AND_OR_SI:
8057 case IA64_BUILTIN_FETCH_AND_OR_DI:
8058 return ia64_expand_fetch_and_op (ior_optab, mode, arglist, target);
8060 case IA64_BUILTIN_FETCH_AND_AND_SI:
8061 case IA64_BUILTIN_FETCH_AND_AND_DI:
8062 return ia64_expand_fetch_and_op (and_optab, mode, arglist, target);
8064 case IA64_BUILTIN_FETCH_AND_XOR_SI:
8065 case IA64_BUILTIN_FETCH_AND_XOR_DI:
8066 return ia64_expand_fetch_and_op (xor_optab, mode, arglist, target);
8068 case IA64_BUILTIN_FETCH_AND_NAND_SI:
8069 case IA64_BUILTIN_FETCH_AND_NAND_DI:
8070 return ia64_expand_fetch_and_op (one_cmpl_optab, mode, arglist, target);
8072 case IA64_BUILTIN_ADD_AND_FETCH_SI:
8073 case IA64_BUILTIN_ADD_AND_FETCH_DI:
8074 return ia64_expand_op_and_fetch (add_optab, mode, arglist, target);
8076 case IA64_BUILTIN_SUB_AND_FETCH_SI:
8077 case IA64_BUILTIN_SUB_AND_FETCH_DI:
8078 return ia64_expand_op_and_fetch (sub_optab, mode, arglist, target);
8080 case IA64_BUILTIN_OR_AND_FETCH_SI:
8081 case IA64_BUILTIN_OR_AND_FETCH_DI:
8082 return ia64_expand_op_and_fetch (ior_optab, mode, arglist, target);
8084 case IA64_BUILTIN_AND_AND_FETCH_SI:
8085 case IA64_BUILTIN_AND_AND_FETCH_DI:
8086 return ia64_expand_op_and_fetch (and_optab, mode, arglist, target);
8088 case IA64_BUILTIN_XOR_AND_FETCH_SI:
8089 case IA64_BUILTIN_XOR_AND_FETCH_DI:
8090 return ia64_expand_op_and_fetch (xor_optab, mode, arglist, target);
8092 case IA64_BUILTIN_NAND_AND_FETCH_SI:
8093 case IA64_BUILTIN_NAND_AND_FETCH_DI:
8094 return ia64_expand_op_and_fetch (one_cmpl_optab, mode, arglist, target);
8096 default:
8097 break;
8100 return NULL_RTX;
8103 /* For the HP-UX IA64 aggregate parameters are passed stored in the
8104 most significant bits of the stack slot. */
8106 enum direction
8107 ia64_hpux_function_arg_padding (enum machine_mode mode, tree type)
8109 /* Exception to normal case for structures/unions/etc. */
8111 if (type && AGGREGATE_TYPE_P (type)
8112 && int_size_in_bytes (type) < UNITS_PER_WORD)
8113 return upward;
8115 /* Fall back to the default. */
8116 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
8119 /* Linked list of all external functions that are to be emitted by GCC.
8120 We output the name if and only if TREE_SYMBOL_REFERENCED is set in
8121 order to avoid putting out names that are never really used. */
8123 struct extern_func_list GTY(())
8125 struct extern_func_list *next;
8126 tree decl;
8129 static GTY(()) struct extern_func_list *extern_func_head;
8131 static void
8132 ia64_hpux_add_extern_decl (tree decl)
8134 struct extern_func_list *p = ggc_alloc (sizeof (struct extern_func_list));
8136 p->decl = decl;
8137 p->next = extern_func_head;
8138 extern_func_head = p;
8141 /* Print out the list of used global functions. */
8143 static void
8144 ia64_hpux_file_end (void)
8146 struct extern_func_list *p;
8148 for (p = extern_func_head; p; p = p->next)
8150 tree decl = p->decl;
8151 tree id = DECL_ASSEMBLER_NAME (decl);
8153 if (!id)
8154 abort ();
8156 if (!TREE_ASM_WRITTEN (decl) && TREE_SYMBOL_REFERENCED (id))
8158 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
8160 TREE_ASM_WRITTEN (decl) = 1;
8161 (*targetm.asm_out.globalize_label) (asm_out_file, name);
8162 fputs (TYPE_ASM_OP, asm_out_file);
8163 assemble_name (asm_out_file, name);
8164 fprintf (asm_out_file, "," TYPE_OPERAND_FMT "\n", "function");
8168 extern_func_head = 0;
8171 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
8172 modes of word_mode and larger. Rename the TFmode libfuncs using the
8173 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
8174 backward compatibility. */
8176 static void
8177 ia64_init_libfuncs (void)
8179 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
8180 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
8181 set_optab_libfunc (smod_optab, SImode, "__modsi3");
8182 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
8184 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
8185 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
8186 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
8187 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
8188 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
8190 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
8191 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
8192 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
8193 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
8194 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
8195 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
8197 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
8198 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
8199 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
8200 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
8202 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
8203 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
8206 /* Rename all the TFmode libfuncs using the HPUX conventions. */
8208 static void
8209 ia64_hpux_init_libfuncs (void)
8211 ia64_init_libfuncs ();
8213 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
8214 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
8215 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
8217 /* ia64_expand_compare uses this. */
8218 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
8220 /* These should never be used. */
8221 set_optab_libfunc (eq_optab, TFmode, 0);
8222 set_optab_libfunc (ne_optab, TFmode, 0);
8223 set_optab_libfunc (gt_optab, TFmode, 0);
8224 set_optab_libfunc (ge_optab, TFmode, 0);
8225 set_optab_libfunc (lt_optab, TFmode, 0);
8226 set_optab_libfunc (le_optab, TFmode, 0);
8229 /* Rename the division and modulus functions in VMS. */
8231 static void
8232 ia64_vms_init_libfuncs (void)
8234 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
8235 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
8236 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
8237 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
8238 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
8239 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
8240 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
8241 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
8244 /* Rename the TFmode libfuncs available from soft-fp in glibc using
8245 the HPUX conventions. */
8247 static void
8248 ia64_sysv4_init_libfuncs (void)
8250 ia64_init_libfuncs ();
8252 /* These functions are not part of the HPUX TFmode interface. We
8253 use them instead of _U_Qfcmp, which doesn't work the way we
8254 expect. */
8255 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
8256 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
8257 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
8258 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
8259 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
8260 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
8262 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
8263 glibc doesn't have them. */
8266 /* Switch to the section to which we should output X. The only thing
8267 special we do here is to honor small data. */
8269 static void
8270 ia64_select_rtx_section (enum machine_mode mode, rtx x,
8271 unsigned HOST_WIDE_INT align)
8273 if (GET_MODE_SIZE (mode) > 0
8274 && GET_MODE_SIZE (mode) <= ia64_section_threshold)
8275 sdata_section ();
8276 else
8277 default_elf_select_rtx_section (mode, x, align);
8280 /* It is illegal to have relocations in shared segments on AIX and HPUX.
8281 Pretend flag_pic is always set. */
8283 static void
8284 ia64_rwreloc_select_section (tree exp, int reloc, unsigned HOST_WIDE_INT align)
8286 default_elf_select_section_1 (exp, reloc, align, true);
8289 static void
8290 ia64_rwreloc_unique_section (tree decl, int reloc)
8292 default_unique_section_1 (decl, reloc, true);
8295 static void
8296 ia64_rwreloc_select_rtx_section (enum machine_mode mode, rtx x,
8297 unsigned HOST_WIDE_INT align)
8299 int save_pic = flag_pic;
8300 flag_pic = 1;
8301 ia64_select_rtx_section (mode, x, align);
8302 flag_pic = save_pic;
8305 static unsigned int
8306 ia64_rwreloc_section_type_flags (tree decl, const char *name, int reloc)
8308 return default_section_type_flags_1 (decl, name, reloc, true);
8311 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
8312 structure type and that the address of that type should be passed
8313 in out0, rather than in r8. */
8315 static bool
8316 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
8318 tree ret_type = TREE_TYPE (fntype);
8320 /* The Itanium C++ ABI requires that out0, rather than r8, be used
8321 as the structure return address parameter, if the return value
8322 type has a non-trivial copy constructor or destructor. It is not
8323 clear if this same convention should be used for other
8324 programming languages. Until G++ 3.4, we incorrectly used r8 for
8325 these return values. */
8326 return (abi_version_at_least (2)
8327 && ret_type
8328 && TYPE_MODE (ret_type) == BLKmode
8329 && TREE_ADDRESSABLE (ret_type)
8330 && strcmp (lang_hooks.name, "GNU C++") == 0);
8333 /* Output the assembler code for a thunk function. THUNK_DECL is the
8334 declaration for the thunk function itself, FUNCTION is the decl for
8335 the target function. DELTA is an immediate constant offset to be
8336 added to THIS. If VCALL_OFFSET is nonzero, the word at
8337 *(*this + vcall_offset) should be added to THIS. */
8339 static void
8340 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
8341 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8342 tree function)
8344 rtx this, insn, funexp;
8345 unsigned int this_parmno;
8346 unsigned int this_regno;
8348 reload_completed = 1;
8349 epilogue_completed = 1;
8350 no_new_pseudos = 1;
8351 reset_block_changes ();
8353 /* Set things up as ia64_expand_prologue might. */
8354 last_scratch_gr_reg = 15;
8356 memset (&current_frame_info, 0, sizeof (current_frame_info));
8357 current_frame_info.spill_cfa_off = -16;
8358 current_frame_info.n_input_regs = 1;
8359 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
8361 /* Mark the end of the (empty) prologue. */
8362 emit_note (NOTE_INSN_PROLOGUE_END);
8364 /* Figure out whether "this" will be the first parameter (the
8365 typical case) or the second parameter (as happens when the
8366 virtual function returns certain class objects). */
8367 this_parmno
8368 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
8369 ? 1 : 0);
8370 this_regno = IN_REG (this_parmno);
8371 if (!TARGET_REG_NAMES)
8372 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
8374 this = gen_rtx_REG (Pmode, this_regno);
8375 if (TARGET_ILP32)
8377 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
8378 REG_POINTER (tmp) = 1;
8379 if (delta && CONST_OK_FOR_I (delta))
8381 emit_insn (gen_ptr_extend_plus_imm (this, tmp, GEN_INT (delta)));
8382 delta = 0;
8384 else
8385 emit_insn (gen_ptr_extend (this, tmp));
8388 /* Apply the constant offset, if required. */
8389 if (delta)
8391 rtx delta_rtx = GEN_INT (delta);
8393 if (!CONST_OK_FOR_I (delta))
8395 rtx tmp = gen_rtx_REG (Pmode, 2);
8396 emit_move_insn (tmp, delta_rtx);
8397 delta_rtx = tmp;
8399 emit_insn (gen_adddi3 (this, this, delta_rtx));
8402 /* Apply the offset from the vtable, if required. */
8403 if (vcall_offset)
8405 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8406 rtx tmp = gen_rtx_REG (Pmode, 2);
8408 if (TARGET_ILP32)
8410 rtx t = gen_rtx_REG (ptr_mode, 2);
8411 REG_POINTER (t) = 1;
8412 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this));
8413 if (CONST_OK_FOR_I (vcall_offset))
8415 emit_insn (gen_ptr_extend_plus_imm (tmp, t,
8416 vcall_offset_rtx));
8417 vcall_offset = 0;
8419 else
8420 emit_insn (gen_ptr_extend (tmp, t));
8422 else
8423 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8425 if (vcall_offset)
8427 if (!CONST_OK_FOR_J (vcall_offset))
8429 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
8430 emit_move_insn (tmp2, vcall_offset_rtx);
8431 vcall_offset_rtx = tmp2;
8433 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
8436 if (TARGET_ILP32)
8437 emit_move_insn (gen_rtx_REG (ptr_mode, 2),
8438 gen_rtx_MEM (ptr_mode, tmp));
8439 else
8440 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
8442 emit_insn (gen_adddi3 (this, this, tmp));
8445 /* Generate a tail call to the target function. */
8446 if (! TREE_USED (function))
8448 assemble_external (function);
8449 TREE_USED (function) = 1;
8451 funexp = XEXP (DECL_RTL (function), 0);
8452 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8453 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
8454 insn = get_last_insn ();
8455 SIBLING_CALL_P (insn) = 1;
8457 /* Code generation for calls relies on splitting. */
8458 reload_completed = 1;
8459 epilogue_completed = 1;
8460 try_split (PATTERN (insn), insn, 0);
8462 emit_barrier ();
8464 /* Run just enough of rest_of_compilation to get the insns emitted.
8465 There's not really enough bulk here to make other passes such as
8466 instruction scheduling worth while. Note that use_thunk calls
8467 assemble_start_function and assemble_end_function. */
8469 insn_locators_initialize ();
8470 emit_all_insn_group_barriers (NULL);
8471 insn = get_insns ();
8472 shorten_branches (insn);
8473 final_start_function (insn, file, 1);
8474 final (insn, file, 1, 0);
8475 final_end_function ();
8477 reload_completed = 0;
8478 epilogue_completed = 0;
8479 no_new_pseudos = 0;
8482 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
8484 static rtx
8485 ia64_struct_value_rtx (tree fntype,
8486 int incoming ATTRIBUTE_UNUSED)
8488 if (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype))
8489 return NULL_RTX;
8490 return gen_rtx_REG (Pmode, GR_REG (8));
8493 static bool
8494 ia64_scalar_mode_supported_p (enum machine_mode mode)
8496 switch (mode)
8498 case QImode:
8499 case HImode:
8500 case SImode:
8501 case DImode:
8502 case TImode:
8503 return true;
8505 case SFmode:
8506 case DFmode:
8507 case XFmode:
8508 return true;
8510 case TFmode:
8511 return TARGET_HPUX;
8513 default:
8514 return false;
8518 #include "gt-ia64.h"