* target.h (targetm.calls.arg_partial_bytes): New.
[official-gcc.git] / gcc / config / ia64 / ia64.c
blob6478cb1ba213d41d91e3f81029a837e4d94fe3c2
1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004
3 Free Software Foundation, Inc.
4 Contributed by James E. Wilson <wilson@cygnus.com> and
5 David Mosberger <davidm@hpl.hp.com>.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
12 any later version.
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to
21 the Free Software Foundation, 59 Temple Place - Suite 330,
22 Boston, MA 02111-1307, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "recog.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "except.h"
42 #include "function.h"
43 #include "ggc.h"
44 #include "basic-block.h"
45 #include "toplev.h"
46 #include "sched-int.h"
47 #include "timevar.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "tm_p.h"
51 #include "hashtab.h"
52 #include "langhooks.h"
53 #include "cfglayout.h"
54 #include "tree-gimple.h"
56 /* This is used for communication between ASM_OUTPUT_LABEL and
57 ASM_OUTPUT_LABELREF. */
58 int ia64_asm_output_label = 0;
60 /* Define the information needed to generate branch and scc insns. This is
61 stored from the compare operation. */
62 struct rtx_def * ia64_compare_op0;
63 struct rtx_def * ia64_compare_op1;
65 /* Register names for ia64_expand_prologue. */
66 static const char * const ia64_reg_numbers[96] =
67 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
68 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
69 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
70 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
71 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
72 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
73 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
74 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
75 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
76 "r104","r105","r106","r107","r108","r109","r110","r111",
77 "r112","r113","r114","r115","r116","r117","r118","r119",
78 "r120","r121","r122","r123","r124","r125","r126","r127"};
80 /* ??? These strings could be shared with REGISTER_NAMES. */
81 static const char * const ia64_input_reg_names[8] =
82 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
84 /* ??? These strings could be shared with REGISTER_NAMES. */
85 static const char * const ia64_local_reg_names[80] =
86 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
87 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
88 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
89 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
90 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
91 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
92 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
93 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
94 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
95 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
97 /* ??? These strings could be shared with REGISTER_NAMES. */
98 static const char * const ia64_output_reg_names[8] =
99 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
101 /* String used with the -mfixed-range= option. */
102 const char *ia64_fixed_range_string;
104 /* Determines whether we use adds, addl, or movl to generate our
105 TLS immediate offsets. */
106 int ia64_tls_size = 22;
108 /* String used with the -mtls-size= option. */
109 const char *ia64_tls_size_string;
111 /* Which cpu are we scheduling for. */
112 enum processor_type ia64_tune;
114 /* String used with the -tune= option. */
115 const char *ia64_tune_string;
117 /* Determines whether we run our final scheduling pass or not. We always
118 avoid the normal second scheduling pass. */
119 static int ia64_flag_schedule_insns2;
121 /* Determines whether we run variable tracking in machine dependent
122 reorganization. */
123 static int ia64_flag_var_tracking;
125 /* Variables which are this size or smaller are put in the sdata/sbss
126 sections. */
128 unsigned int ia64_section_threshold;
130 /* The following variable is used by the DFA insn scheduler. The value is
131 TRUE if we do insn bundling instead of insn scheduling. */
132 int bundling_p = 0;
134 /* Structure to be filled in by ia64_compute_frame_size with register
135 save masks and offsets for the current function. */
137 struct ia64_frame_info
139 HOST_WIDE_INT total_size; /* size of the stack frame, not including
140 the caller's scratch area. */
141 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
142 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
143 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
144 HARD_REG_SET mask; /* mask of saved registers. */
145 unsigned int gr_used_mask; /* mask of registers in use as gr spill
146 registers or long-term scratches. */
147 int n_spilled; /* number of spilled registers. */
148 int reg_fp; /* register for fp. */
149 int reg_save_b0; /* save register for b0. */
150 int reg_save_pr; /* save register for prs. */
151 int reg_save_ar_pfs; /* save register for ar.pfs. */
152 int reg_save_ar_unat; /* save register for ar.unat. */
153 int reg_save_ar_lc; /* save register for ar.lc. */
154 int reg_save_gp; /* save register for gp. */
155 int n_input_regs; /* number of input registers used. */
156 int n_local_regs; /* number of local registers used. */
157 int n_output_regs; /* number of output registers used. */
158 int n_rotate_regs; /* number of rotating registers used. */
160 char need_regstk; /* true if a .regstk directive needed. */
161 char initialized; /* true if the data is finalized. */
164 /* Current frame information calculated by ia64_compute_frame_size. */
165 static struct ia64_frame_info current_frame_info;
167 static int ia64_first_cycle_multipass_dfa_lookahead (void);
168 static void ia64_dependencies_evaluation_hook (rtx, rtx);
169 static void ia64_init_dfa_pre_cycle_insn (void);
170 static rtx ia64_dfa_pre_cycle_insn (void);
171 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
172 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
173 static rtx gen_tls_get_addr (void);
174 static rtx gen_thread_pointer (void);
175 static rtx ia64_expand_tls_address (enum tls_model, rtx, rtx);
176 static int find_gr_spill (int);
177 static int next_scratch_gr_reg (void);
178 static void mark_reg_gr_used_mask (rtx, void *);
179 static void ia64_compute_frame_size (HOST_WIDE_INT);
180 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
181 static void finish_spill_pointers (void);
182 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
183 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
184 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
185 static rtx gen_movdi_x (rtx, rtx, rtx);
186 static rtx gen_fr_spill_x (rtx, rtx, rtx);
187 static rtx gen_fr_restore_x (rtx, rtx, rtx);
189 static enum machine_mode hfa_element_mode (tree, int);
190 static void ia64_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
191 tree, int *, int);
192 static bool ia64_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
193 tree, bool);
194 static int ia64_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
195 tree, bool);
196 static bool ia64_function_ok_for_sibcall (tree, tree);
197 static bool ia64_return_in_memory (tree, tree);
198 static bool ia64_rtx_costs (rtx, int, int, int *);
199 static void fix_range (const char *);
200 static struct machine_function * ia64_init_machine_status (void);
201 static void emit_insn_group_barriers (FILE *);
202 static void emit_all_insn_group_barriers (FILE *);
203 static void final_emit_insn_group_barriers (FILE *);
204 static void emit_predicate_relation_info (void);
205 static void ia64_reorg (void);
206 static bool ia64_in_small_data_p (tree);
207 static void process_epilogue (void);
208 static int process_set (FILE *, rtx);
210 static rtx ia64_expand_fetch_and_op (optab, enum machine_mode, tree, rtx);
211 static rtx ia64_expand_op_and_fetch (optab, enum machine_mode, tree, rtx);
212 static rtx ia64_expand_compare_and_swap (enum machine_mode, enum machine_mode,
213 int, tree, rtx);
214 static rtx ia64_expand_lock_test_and_set (enum machine_mode, tree, rtx);
215 static rtx ia64_expand_lock_release (enum machine_mode, tree, rtx);
216 static bool ia64_assemble_integer (rtx, unsigned int, int);
217 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
218 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
219 static void ia64_output_function_end_prologue (FILE *);
221 static int ia64_issue_rate (void);
222 static int ia64_adjust_cost (rtx, rtx, rtx, int);
223 static void ia64_sched_init (FILE *, int, int);
224 static void ia64_sched_finish (FILE *, int);
225 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
226 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
227 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
228 static int ia64_variable_issue (FILE *, int, rtx, int);
230 static struct bundle_state *get_free_bundle_state (void);
231 static void free_bundle_state (struct bundle_state *);
232 static void initiate_bundle_states (void);
233 static void finish_bundle_states (void);
234 static unsigned bundle_state_hash (const void *);
235 static int bundle_state_eq_p (const void *, const void *);
236 static int insert_bundle_state (struct bundle_state *);
237 static void initiate_bundle_state_table (void);
238 static void finish_bundle_state_table (void);
239 static int try_issue_nops (struct bundle_state *, int);
240 static int try_issue_insn (struct bundle_state *, rtx);
241 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
242 static int get_max_pos (state_t);
243 static int get_template (state_t, int);
245 static rtx get_next_important_insn (rtx, rtx);
246 static void bundling (FILE *, int, rtx, rtx);
248 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
249 HOST_WIDE_INT, tree);
250 static void ia64_file_start (void);
252 static void ia64_select_rtx_section (enum machine_mode, rtx,
253 unsigned HOST_WIDE_INT);
254 static void ia64_rwreloc_select_section (tree, int, unsigned HOST_WIDE_INT)
255 ATTRIBUTE_UNUSED;
256 static void ia64_rwreloc_unique_section (tree, int)
257 ATTRIBUTE_UNUSED;
258 static void ia64_rwreloc_select_rtx_section (enum machine_mode, rtx,
259 unsigned HOST_WIDE_INT)
260 ATTRIBUTE_UNUSED;
261 static unsigned int ia64_rwreloc_section_type_flags (tree, const char *, int)
262 ATTRIBUTE_UNUSED;
264 static void ia64_hpux_add_extern_decl (tree decl)
265 ATTRIBUTE_UNUSED;
266 static void ia64_hpux_file_end (void)
267 ATTRIBUTE_UNUSED;
268 static void ia64_init_libfuncs (void)
269 ATTRIBUTE_UNUSED;
270 static void ia64_hpux_init_libfuncs (void)
271 ATTRIBUTE_UNUSED;
272 static void ia64_sysv4_init_libfuncs (void)
273 ATTRIBUTE_UNUSED;
274 static void ia64_vms_init_libfuncs (void)
275 ATTRIBUTE_UNUSED;
277 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
278 static void ia64_encode_section_info (tree, rtx, int);
279 static rtx ia64_struct_value_rtx (tree, int);
280 static tree ia64_gimplify_va_arg (tree, tree, tree *, tree *);
281 static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
284 /* Table of valid machine attributes. */
285 static const struct attribute_spec ia64_attribute_table[] =
287 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
288 { "syscall_linkage", 0, 0, false, true, true, NULL },
289 { "model", 1, 1, true, false, false, ia64_handle_model_attribute },
290 { NULL, 0, 0, false, false, false, NULL }
293 /* Initialize the GCC target structure. */
294 #undef TARGET_ATTRIBUTE_TABLE
295 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
297 #undef TARGET_INIT_BUILTINS
298 #define TARGET_INIT_BUILTINS ia64_init_builtins
300 #undef TARGET_EXPAND_BUILTIN
301 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
303 #undef TARGET_ASM_BYTE_OP
304 #define TARGET_ASM_BYTE_OP "\tdata1\t"
305 #undef TARGET_ASM_ALIGNED_HI_OP
306 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
307 #undef TARGET_ASM_ALIGNED_SI_OP
308 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
309 #undef TARGET_ASM_ALIGNED_DI_OP
310 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
311 #undef TARGET_ASM_UNALIGNED_HI_OP
312 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
313 #undef TARGET_ASM_UNALIGNED_SI_OP
314 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
315 #undef TARGET_ASM_UNALIGNED_DI_OP
316 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
317 #undef TARGET_ASM_INTEGER
318 #define TARGET_ASM_INTEGER ia64_assemble_integer
320 #undef TARGET_ASM_FUNCTION_PROLOGUE
321 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
322 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
323 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
324 #undef TARGET_ASM_FUNCTION_EPILOGUE
325 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
327 #undef TARGET_IN_SMALL_DATA_P
328 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
330 #undef TARGET_SCHED_ADJUST_COST
331 #define TARGET_SCHED_ADJUST_COST ia64_adjust_cost
332 #undef TARGET_SCHED_ISSUE_RATE
333 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
334 #undef TARGET_SCHED_VARIABLE_ISSUE
335 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
336 #undef TARGET_SCHED_INIT
337 #define TARGET_SCHED_INIT ia64_sched_init
338 #undef TARGET_SCHED_FINISH
339 #define TARGET_SCHED_FINISH ia64_sched_finish
340 #undef TARGET_SCHED_REORDER
341 #define TARGET_SCHED_REORDER ia64_sched_reorder
342 #undef TARGET_SCHED_REORDER2
343 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
345 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
346 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
348 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
349 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
351 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
352 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
353 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
354 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
356 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
357 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
358 ia64_first_cycle_multipass_dfa_lookahead_guard
360 #undef TARGET_SCHED_DFA_NEW_CYCLE
361 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
363 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
364 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
365 #undef TARGET_PASS_BY_REFERENCE
366 #define TARGET_PASS_BY_REFERENCE ia64_pass_by_reference
367 #undef TARGET_ARG_PARTIAL_BYTES
368 #define TARGET_ARG_PARTIAL_BYTES ia64_arg_partial_bytes
370 #undef TARGET_ASM_OUTPUT_MI_THUNK
371 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
372 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
373 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
375 #undef TARGET_ASM_FILE_START
376 #define TARGET_ASM_FILE_START ia64_file_start
378 #undef TARGET_RTX_COSTS
379 #define TARGET_RTX_COSTS ia64_rtx_costs
380 #undef TARGET_ADDRESS_COST
381 #define TARGET_ADDRESS_COST hook_int_rtx_0
383 #undef TARGET_MACHINE_DEPENDENT_REORG
384 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
386 #undef TARGET_ENCODE_SECTION_INFO
387 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
389 /* ??? ABI doesn't allow us to define this. */
390 #if 0
391 #undef TARGET_PROMOTE_FUNCTION_ARGS
392 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
393 #endif
395 /* ??? ABI doesn't allow us to define this. */
396 #if 0
397 #undef TARGET_PROMOTE_FUNCTION_RETURN
398 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
399 #endif
401 /* ??? Investigate. */
402 #if 0
403 #undef TARGET_PROMOTE_PROTOTYPES
404 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
405 #endif
407 #undef TARGET_STRUCT_VALUE_RTX
408 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
409 #undef TARGET_RETURN_IN_MEMORY
410 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
411 #undef TARGET_SETUP_INCOMING_VARARGS
412 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
413 #undef TARGET_STRICT_ARGUMENT_NAMING
414 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
415 #undef TARGET_MUST_PASS_IN_STACK
416 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
418 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
419 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
421 #undef TARGET_UNWIND_EMIT
422 #define TARGET_UNWIND_EMIT process_for_unwind_directive
424 #undef TARGET_SCALAR_MODE_SUPPORTED_P
425 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
427 /* ia64 architecture manual 4.4.7: ... reads, writes, and flushes may occur
428 in an order different from the specified program order. */
429 #undef TARGET_RELAXED_ORDERING
430 #define TARGET_RELAXED_ORDERING true
432 struct gcc_target targetm = TARGET_INITIALIZER;
434 typedef enum
436 ADDR_AREA_NORMAL, /* normal address area */
437 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
439 ia64_addr_area;
441 static GTY(()) tree small_ident1;
442 static GTY(()) tree small_ident2;
444 static void
445 init_idents (void)
447 if (small_ident1 == 0)
449 small_ident1 = get_identifier ("small");
450 small_ident2 = get_identifier ("__small__");
454 /* Retrieve the address area that has been chosen for the given decl. */
456 static ia64_addr_area
457 ia64_get_addr_area (tree decl)
459 tree model_attr;
461 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
462 if (model_attr)
464 tree id;
466 init_idents ();
467 id = TREE_VALUE (TREE_VALUE (model_attr));
468 if (id == small_ident1 || id == small_ident2)
469 return ADDR_AREA_SMALL;
471 return ADDR_AREA_NORMAL;
474 static tree
475 ia64_handle_model_attribute (tree *node, tree name, tree args, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
477 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
478 ia64_addr_area area;
479 tree arg, decl = *node;
481 init_idents ();
482 arg = TREE_VALUE (args);
483 if (arg == small_ident1 || arg == small_ident2)
485 addr_area = ADDR_AREA_SMALL;
487 else
489 warning ("invalid argument of %qs attribute",
490 IDENTIFIER_POINTER (name));
491 *no_add_attrs = true;
494 switch (TREE_CODE (decl))
496 case VAR_DECL:
497 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
498 == FUNCTION_DECL)
499 && !TREE_STATIC (decl))
501 error ("%Jan address area attribute cannot be specified for "
502 "local variables", decl, decl);
503 *no_add_attrs = true;
505 area = ia64_get_addr_area (decl);
506 if (area != ADDR_AREA_NORMAL && addr_area != area)
508 error ("%Jaddress area of '%s' conflicts with previous "
509 "declaration", decl, decl);
510 *no_add_attrs = true;
512 break;
514 case FUNCTION_DECL:
515 error ("%Jaddress area attribute cannot be specified for functions",
516 decl, decl);
517 *no_add_attrs = true;
518 break;
520 default:
521 warning ("%qs attribute ignored", IDENTIFIER_POINTER (name));
522 *no_add_attrs = true;
523 break;
526 return NULL_TREE;
529 static void
530 ia64_encode_addr_area (tree decl, rtx symbol)
532 int flags;
534 flags = SYMBOL_REF_FLAGS (symbol);
535 switch (ia64_get_addr_area (decl))
537 case ADDR_AREA_NORMAL: break;
538 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
539 default: abort ();
541 SYMBOL_REF_FLAGS (symbol) = flags;
544 static void
545 ia64_encode_section_info (tree decl, rtx rtl, int first)
547 default_encode_section_info (decl, rtl, first);
549 /* Careful not to prod global register variables. */
550 if (TREE_CODE (decl) == VAR_DECL
551 && GET_CODE (DECL_RTL (decl)) == MEM
552 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
553 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
554 ia64_encode_addr_area (decl, XEXP (rtl, 0));
557 /* Return 1 if the operands of a move are ok. */
560 ia64_move_ok (rtx dst, rtx src)
562 /* If we're under init_recog_no_volatile, we'll not be able to use
563 memory_operand. So check the code directly and don't worry about
564 the validity of the underlying address, which should have been
565 checked elsewhere anyway. */
566 if (GET_CODE (dst) != MEM)
567 return 1;
568 if (GET_CODE (src) == MEM)
569 return 0;
570 if (register_operand (src, VOIDmode))
571 return 1;
573 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
574 if (INTEGRAL_MODE_P (GET_MODE (dst)))
575 return src == const0_rtx;
576 else
577 return GET_CODE (src) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_G (src);
581 addp4_optimize_ok (rtx op1, rtx op2)
583 return (basereg_operand (op1, GET_MODE(op1)) !=
584 basereg_operand (op2, GET_MODE(op2)));
587 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
588 Return the length of the field, or <= 0 on failure. */
591 ia64_depz_field_mask (rtx rop, rtx rshift)
593 unsigned HOST_WIDE_INT op = INTVAL (rop);
594 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
596 /* Get rid of the zero bits we're shifting in. */
597 op >>= shift;
599 /* We must now have a solid block of 1's at bit 0. */
600 return exact_log2 (op + 1);
603 /* Expand a symbolic constant load. */
605 void
606 ia64_expand_load_address (rtx dest, rtx src)
608 if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (src))
609 abort ();
610 if (GET_CODE (dest) != REG)
611 abort ();
613 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
614 having to pointer-extend the value afterward. Other forms of address
615 computation below are also more natural to compute as 64-bit quantities.
616 If we've been given an SImode destination register, change it. */
617 if (GET_MODE (dest) != Pmode)
618 dest = gen_rtx_REG (Pmode, REGNO (dest));
620 if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_SMALL_ADDR_P (src))
622 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
623 return;
625 else if (TARGET_AUTO_PIC)
627 emit_insn (gen_load_gprel64 (dest, src));
628 return;
630 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
632 emit_insn (gen_load_fptr (dest, src));
633 return;
635 else if (sdata_symbolic_operand (src, VOIDmode))
637 emit_insn (gen_load_gprel (dest, src));
638 return;
641 if (GET_CODE (src) == CONST
642 && GET_CODE (XEXP (src, 0)) == PLUS
643 && GET_CODE (XEXP (XEXP (src, 0), 1)) == CONST_INT
644 && (INTVAL (XEXP (XEXP (src, 0), 1)) & 0x3fff) != 0)
646 rtx sym = XEXP (XEXP (src, 0), 0);
647 HOST_WIDE_INT ofs, hi, lo;
649 /* Split the offset into a sign extended 14-bit low part
650 and a complementary high part. */
651 ofs = INTVAL (XEXP (XEXP (src, 0), 1));
652 lo = ((ofs & 0x3fff) ^ 0x2000) - 0x2000;
653 hi = ofs - lo;
655 ia64_expand_load_address (dest, plus_constant (sym, hi));
656 emit_insn (gen_adddi3 (dest, dest, GEN_INT (lo)));
658 else
660 rtx tmp;
662 tmp = gen_rtx_HIGH (Pmode, src);
663 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
664 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
666 tmp = gen_rtx_LO_SUM (GET_MODE (dest), dest, src);
667 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
671 static GTY(()) rtx gen_tls_tga;
672 static rtx
673 gen_tls_get_addr (void)
675 if (!gen_tls_tga)
676 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
677 return gen_tls_tga;
680 static GTY(()) rtx thread_pointer_rtx;
681 static rtx
682 gen_thread_pointer (void)
684 if (!thread_pointer_rtx)
685 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
686 return thread_pointer_rtx;
689 static rtx
690 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1)
692 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
693 rtx orig_op0 = op0;
695 switch (tls_kind)
697 case TLS_MODEL_GLOBAL_DYNAMIC:
698 start_sequence ();
700 tga_op1 = gen_reg_rtx (Pmode);
701 emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1));
702 tga_op1 = gen_const_mem (Pmode, tga_op1);
704 tga_op2 = gen_reg_rtx (Pmode);
705 emit_insn (gen_load_ltoff_dtprel (tga_op2, op1));
706 tga_op2 = gen_const_mem (Pmode, tga_op2);
708 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
709 LCT_CONST, Pmode, 2, tga_op1,
710 Pmode, tga_op2, Pmode);
712 insns = get_insns ();
713 end_sequence ();
715 if (GET_MODE (op0) != Pmode)
716 op0 = tga_ret;
717 emit_libcall_block (insns, op0, tga_ret, op1);
718 break;
720 case TLS_MODEL_LOCAL_DYNAMIC:
721 /* ??? This isn't the completely proper way to do local-dynamic
722 If the call to __tls_get_addr is used only by a single symbol,
723 then we should (somehow) move the dtprel to the second arg
724 to avoid the extra add. */
725 start_sequence ();
727 tga_op1 = gen_reg_rtx (Pmode);
728 emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1));
729 tga_op1 = gen_const_mem (Pmode, tga_op1);
731 tga_op2 = const0_rtx;
733 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
734 LCT_CONST, Pmode, 2, tga_op1,
735 Pmode, tga_op2, Pmode);
737 insns = get_insns ();
738 end_sequence ();
740 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
741 UNSPEC_LD_BASE);
742 tmp = gen_reg_rtx (Pmode);
743 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
745 if (!register_operand (op0, Pmode))
746 op0 = gen_reg_rtx (Pmode);
747 if (TARGET_TLS64)
749 emit_insn (gen_load_dtprel (op0, op1));
750 emit_insn (gen_adddi3 (op0, tmp, op0));
752 else
753 emit_insn (gen_add_dtprel (op0, tmp, op1));
754 break;
756 case TLS_MODEL_INITIAL_EXEC:
757 tmp = gen_reg_rtx (Pmode);
758 emit_insn (gen_load_ltoff_tprel (tmp, op1));
759 tmp = gen_const_mem (Pmode, tmp);
760 tmp = force_reg (Pmode, tmp);
762 if (!register_operand (op0, Pmode))
763 op0 = gen_reg_rtx (Pmode);
764 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
765 break;
767 case TLS_MODEL_LOCAL_EXEC:
768 if (!register_operand (op0, Pmode))
769 op0 = gen_reg_rtx (Pmode);
770 if (TARGET_TLS64)
772 emit_insn (gen_load_tprel (op0, op1));
773 emit_insn (gen_adddi3 (op0, gen_thread_pointer (), op0));
775 else
776 emit_insn (gen_add_tprel (op0, gen_thread_pointer (), op1));
777 break;
779 default:
780 abort ();
783 if (orig_op0 == op0)
784 return NULL_RTX;
785 if (GET_MODE (orig_op0) == Pmode)
786 return op0;
787 return gen_lowpart (GET_MODE (orig_op0), op0);
791 ia64_expand_move (rtx op0, rtx op1)
793 enum machine_mode mode = GET_MODE (op0);
795 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
796 op1 = force_reg (mode, op1);
798 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
800 enum tls_model tls_kind;
801 if (GET_CODE (op1) == SYMBOL_REF
802 && (tls_kind = SYMBOL_REF_TLS_MODEL (op1)))
803 return ia64_expand_tls_address (tls_kind, op0, op1);
805 if (!TARGET_NO_PIC && reload_completed)
807 ia64_expand_load_address (op0, op1);
808 return NULL_RTX;
812 return op1;
815 /* Split a move from OP1 to OP0 conditional on COND. */
817 void
818 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
820 rtx insn, first = get_last_insn ();
822 emit_move_insn (op0, op1);
824 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
825 if (INSN_P (insn))
826 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
827 PATTERN (insn));
830 /* Split a post-reload TImode or TFmode reference into two DImode
831 components. This is made extra difficult by the fact that we do
832 not get any scratch registers to work with, because reload cannot
833 be prevented from giving us a scratch that overlaps the register
834 pair involved. So instead, when addressing memory, we tweak the
835 pointer register up and back down with POST_INCs. Or up and not
836 back down when we can get away with it.
838 REVERSED is true when the loads must be done in reversed order
839 (high word first) for correctness. DEAD is true when the pointer
840 dies with the second insn we generate and therefore the second
841 address must not carry a postmodify.
843 May return an insn which is to be emitted after the moves. */
845 static rtx
846 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
848 rtx fixup = 0;
850 switch (GET_CODE (in))
852 case REG:
853 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
854 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
855 break;
857 case CONST_INT:
858 case CONST_DOUBLE:
859 /* Cannot occur reversed. */
860 if (reversed) abort ();
862 if (GET_MODE (in) != TFmode)
863 split_double (in, &out[0], &out[1]);
864 else
865 /* split_double does not understand how to split a TFmode
866 quantity into a pair of DImode constants. */
868 REAL_VALUE_TYPE r;
869 unsigned HOST_WIDE_INT p[2];
870 long l[4]; /* TFmode is 128 bits */
872 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
873 real_to_target (l, &r, TFmode);
875 if (FLOAT_WORDS_BIG_ENDIAN)
877 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
878 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
880 else
882 p[0] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
883 p[1] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
885 out[0] = GEN_INT (p[0]);
886 out[1] = GEN_INT (p[1]);
888 break;
890 case MEM:
892 rtx base = XEXP (in, 0);
893 rtx offset;
895 switch (GET_CODE (base))
897 case REG:
898 if (!reversed)
900 out[0] = adjust_automodify_address
901 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
902 out[1] = adjust_automodify_address
903 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
905 else
907 /* Reversal requires a pre-increment, which can only
908 be done as a separate insn. */
909 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
910 out[0] = adjust_automodify_address
911 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
912 out[1] = adjust_address (in, DImode, 0);
914 break;
916 case POST_INC:
917 if (reversed || dead) abort ();
918 /* Just do the increment in two steps. */
919 out[0] = adjust_automodify_address (in, DImode, 0, 0);
920 out[1] = adjust_automodify_address (in, DImode, 0, 8);
921 break;
923 case POST_DEC:
924 if (reversed || dead) abort ();
925 /* Add 8, subtract 24. */
926 base = XEXP (base, 0);
927 out[0] = adjust_automodify_address
928 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
929 out[1] = adjust_automodify_address
930 (in, DImode,
931 gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -24)),
933 break;
935 case POST_MODIFY:
936 if (reversed || dead) abort ();
937 /* Extract and adjust the modification. This case is
938 trickier than the others, because we might have an
939 index register, or we might have a combined offset that
940 doesn't fit a signed 9-bit displacement field. We can
941 assume the incoming expression is already legitimate. */
942 offset = XEXP (base, 1);
943 base = XEXP (base, 0);
945 out[0] = adjust_automodify_address
946 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
948 if (GET_CODE (XEXP (offset, 1)) == REG)
950 /* Can't adjust the postmodify to match. Emit the
951 original, then a separate addition insn. */
952 out[1] = adjust_automodify_address (in, DImode, 0, 8);
953 fixup = gen_adddi3 (base, base, GEN_INT (-8));
955 else if (GET_CODE (XEXP (offset, 1)) != CONST_INT)
956 abort ();
957 else if (INTVAL (XEXP (offset, 1)) < -256 + 8)
959 /* Again the postmodify cannot be made to match, but
960 in this case it's more efficient to get rid of the
961 postmodify entirely and fix up with an add insn. */
962 out[1] = adjust_automodify_address (in, DImode, base, 8);
963 fixup = gen_adddi3 (base, base,
964 GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
966 else
968 /* Combined offset still fits in the displacement field.
969 (We cannot overflow it at the high end.) */
970 out[1] = adjust_automodify_address
971 (in, DImode,
972 gen_rtx_POST_MODIFY (Pmode, base,
973 gen_rtx_PLUS (Pmode, base,
974 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
977 break;
979 default:
980 abort ();
982 break;
985 default:
986 abort ();
989 return fixup;
992 /* Split a TImode or TFmode move instruction after reload.
993 This is used by *movtf_internal and *movti_internal. */
994 void
995 ia64_split_tmode_move (rtx operands[])
997 rtx in[2], out[2], insn;
998 rtx fixup[2];
999 bool dead = false;
1000 bool reversed = false;
1002 /* It is possible for reload to decide to overwrite a pointer with
1003 the value it points to. In that case we have to do the loads in
1004 the appropriate order so that the pointer is not destroyed too
1005 early. Also we must not generate a postmodify for that second
1006 load, or rws_access_regno will abort. */
1007 if (GET_CODE (operands[1]) == MEM
1008 && reg_overlap_mentioned_p (operands[0], operands[1]))
1010 rtx base = XEXP (operands[1], 0);
1011 while (GET_CODE (base) != REG)
1012 base = XEXP (base, 0);
1014 if (REGNO (base) == REGNO (operands[0]))
1015 reversed = true;
1016 dead = true;
1018 /* Another reason to do the moves in reversed order is if the first
1019 element of the target register pair is also the second element of
1020 the source register pair. */
1021 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1022 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1023 reversed = true;
1025 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1026 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1028 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1029 if (GET_CODE (EXP) == MEM \
1030 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1031 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1032 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1033 REG_NOTES (INSN) = gen_rtx_EXPR_LIST (REG_INC, \
1034 XEXP (XEXP (EXP, 0), 0), \
1035 REG_NOTES (INSN))
1037 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1038 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1039 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1041 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1042 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1043 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1045 if (fixup[0])
1046 emit_insn (fixup[0]);
1047 if (fixup[1])
1048 emit_insn (fixup[1]);
1050 #undef MAYBE_ADD_REG_INC_NOTE
1053 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1054 through memory plus an extra GR scratch register. Except that you can
1055 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1056 SECONDARY_RELOAD_CLASS, but not both.
1058 We got into problems in the first place by allowing a construct like
1059 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1060 This solution attempts to prevent this situation from occurring. When
1061 we see something like the above, we spill the inner register to memory. */
1064 spill_xfmode_operand (rtx in, int force)
1066 if (GET_CODE (in) == SUBREG
1067 && GET_MODE (SUBREG_REG (in)) == TImode
1068 && GET_CODE (SUBREG_REG (in)) == REG)
1070 rtx memt = assign_stack_temp (TImode, 16, 0);
1071 emit_move_insn (memt, SUBREG_REG (in));
1072 return adjust_address (memt, XFmode, 0);
1074 else if (force && GET_CODE (in) == REG)
1076 rtx memx = assign_stack_temp (XFmode, 16, 0);
1077 emit_move_insn (memx, in);
1078 return memx;
1080 else
1081 return in;
1084 /* Emit comparison instruction if necessary, returning the expression
1085 that holds the compare result in the proper mode. */
1087 static GTY(()) rtx cmptf_libfunc;
1090 ia64_expand_compare (enum rtx_code code, enum machine_mode mode)
1092 rtx op0 = ia64_compare_op0, op1 = ia64_compare_op1;
1093 rtx cmp;
1095 /* If we have a BImode input, then we already have a compare result, and
1096 do not need to emit another comparison. */
1097 if (GET_MODE (op0) == BImode)
1099 if ((code == NE || code == EQ) && op1 == const0_rtx)
1100 cmp = op0;
1101 else
1102 abort ();
1104 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1105 magic number as its third argument, that indicates what to do.
1106 The return value is an integer to be compared against zero. */
1107 else if (GET_MODE (op0) == TFmode)
1109 enum qfcmp_magic {
1110 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1111 QCMP_UNORD = 2,
1112 QCMP_EQ = 4,
1113 QCMP_LT = 8,
1114 QCMP_GT = 16
1115 } magic;
1116 enum rtx_code ncode;
1117 rtx ret, insns;
1118 if (!cmptf_libfunc || GET_MODE (op1) != TFmode)
1119 abort ();
1120 switch (code)
1122 /* 1 = equal, 0 = not equal. Equality operators do
1123 not raise FP_INVALID when given an SNaN operand. */
1124 case EQ: magic = QCMP_EQ; ncode = NE; break;
1125 case NE: magic = QCMP_EQ; ncode = EQ; break;
1126 /* isunordered() from C99. */
1127 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1128 case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
1129 /* Relational operators raise FP_INVALID when given
1130 an SNaN operand. */
1131 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1132 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1133 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1134 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1135 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1136 Expanders for buneq etc. weuld have to be added to ia64.md
1137 for this to be useful. */
1138 default: abort ();
1141 start_sequence ();
1143 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1144 op0, TFmode, op1, TFmode,
1145 GEN_INT (magic), DImode);
1146 cmp = gen_reg_rtx (BImode);
1147 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1148 gen_rtx_fmt_ee (ncode, BImode,
1149 ret, const0_rtx)));
1151 insns = get_insns ();
1152 end_sequence ();
1154 emit_libcall_block (insns, cmp, cmp,
1155 gen_rtx_fmt_ee (code, BImode, op0, op1));
1156 code = NE;
1158 else
1160 cmp = gen_reg_rtx (BImode);
1161 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1162 gen_rtx_fmt_ee (code, BImode, op0, op1)));
1163 code = NE;
1166 return gen_rtx_fmt_ee (code, mode, cmp, const0_rtx);
1169 /* Emit the appropriate sequence for a call. */
1171 void
1172 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
1173 int sibcall_p)
1175 rtx insn, b0;
1177 addr = XEXP (addr, 0);
1178 addr = convert_memory_address (DImode, addr);
1179 b0 = gen_rtx_REG (DImode, R_BR (0));
1181 /* ??? Should do this for functions known to bind local too. */
1182 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
1184 if (sibcall_p)
1185 insn = gen_sibcall_nogp (addr);
1186 else if (! retval)
1187 insn = gen_call_nogp (addr, b0);
1188 else
1189 insn = gen_call_value_nogp (retval, addr, b0);
1190 insn = emit_call_insn (insn);
1192 else
1194 if (sibcall_p)
1195 insn = gen_sibcall_gp (addr);
1196 else if (! retval)
1197 insn = gen_call_gp (addr, b0);
1198 else
1199 insn = gen_call_value_gp (retval, addr, b0);
1200 insn = emit_call_insn (insn);
1202 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
1205 if (sibcall_p)
1206 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
1209 void
1210 ia64_reload_gp (void)
1212 rtx tmp;
1214 if (current_frame_info.reg_save_gp)
1215 tmp = gen_rtx_REG (DImode, current_frame_info.reg_save_gp);
1216 else
1218 HOST_WIDE_INT offset;
1220 offset = (current_frame_info.spill_cfa_off
1221 + current_frame_info.spill_size);
1222 if (frame_pointer_needed)
1224 tmp = hard_frame_pointer_rtx;
1225 offset = -offset;
1227 else
1229 tmp = stack_pointer_rtx;
1230 offset = current_frame_info.total_size - offset;
1233 if (CONST_OK_FOR_I (offset))
1234 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1235 tmp, GEN_INT (offset)));
1236 else
1238 emit_move_insn (pic_offset_table_rtx, GEN_INT (offset));
1239 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1240 pic_offset_table_rtx, tmp));
1243 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
1246 emit_move_insn (pic_offset_table_rtx, tmp);
1249 void
1250 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
1251 rtx scratch_b, int noreturn_p, int sibcall_p)
1253 rtx insn;
1254 bool is_desc = false;
1256 /* If we find we're calling through a register, then we're actually
1257 calling through a descriptor, so load up the values. */
1258 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
1260 rtx tmp;
1261 bool addr_dead_p;
1263 /* ??? We are currently constrained to *not* use peep2, because
1264 we can legitimately change the global lifetime of the GP
1265 (in the form of killing where previously live). This is
1266 because a call through a descriptor doesn't use the previous
1267 value of the GP, while a direct call does, and we do not
1268 commit to either form until the split here.
1270 That said, this means that we lack precise life info for
1271 whether ADDR is dead after this call. This is not terribly
1272 important, since we can fix things up essentially for free
1273 with the POST_DEC below, but it's nice to not use it when we
1274 can immediately tell it's not necessary. */
1275 addr_dead_p = ((noreturn_p || sibcall_p
1276 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
1277 REGNO (addr)))
1278 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
1280 /* Load the code address into scratch_b. */
1281 tmp = gen_rtx_POST_INC (Pmode, addr);
1282 tmp = gen_rtx_MEM (Pmode, tmp);
1283 emit_move_insn (scratch_r, tmp);
1284 emit_move_insn (scratch_b, scratch_r);
1286 /* Load the GP address. If ADDR is not dead here, then we must
1287 revert the change made above via the POST_INCREMENT. */
1288 if (!addr_dead_p)
1289 tmp = gen_rtx_POST_DEC (Pmode, addr);
1290 else
1291 tmp = addr;
1292 tmp = gen_rtx_MEM (Pmode, tmp);
1293 emit_move_insn (pic_offset_table_rtx, tmp);
1295 is_desc = true;
1296 addr = scratch_b;
1299 if (sibcall_p)
1300 insn = gen_sibcall_nogp (addr);
1301 else if (retval)
1302 insn = gen_call_value_nogp (retval, addr, retaddr);
1303 else
1304 insn = gen_call_nogp (addr, retaddr);
1305 emit_call_insn (insn);
1307 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
1308 ia64_reload_gp ();
1311 /* Begin the assembly file. */
1313 static void
1314 ia64_file_start (void)
1316 default_file_start ();
1317 emit_safe_across_calls ();
1320 void
1321 emit_safe_across_calls (void)
1323 unsigned int rs, re;
1324 int out_state;
1326 rs = 1;
1327 out_state = 0;
1328 while (1)
1330 while (rs < 64 && call_used_regs[PR_REG (rs)])
1331 rs++;
1332 if (rs >= 64)
1333 break;
1334 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
1335 continue;
1336 if (out_state == 0)
1338 fputs ("\t.pred.safe_across_calls ", asm_out_file);
1339 out_state = 1;
1341 else
1342 fputc (',', asm_out_file);
1343 if (re == rs + 1)
1344 fprintf (asm_out_file, "p%u", rs);
1345 else
1346 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
1347 rs = re + 1;
1349 if (out_state)
1350 fputc ('\n', asm_out_file);
1353 /* Helper function for ia64_compute_frame_size: find an appropriate general
1354 register to spill some special register to. SPECIAL_SPILL_MASK contains
1355 bits in GR0 to GR31 that have already been allocated by this routine.
1356 TRY_LOCALS is true if we should attempt to locate a local regnum. */
1358 static int
1359 find_gr_spill (int try_locals)
1361 int regno;
1363 /* If this is a leaf function, first try an otherwise unused
1364 call-clobbered register. */
1365 if (current_function_is_leaf)
1367 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
1368 if (! regs_ever_live[regno]
1369 && call_used_regs[regno]
1370 && ! fixed_regs[regno]
1371 && ! global_regs[regno]
1372 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
1374 current_frame_info.gr_used_mask |= 1 << regno;
1375 return regno;
1379 if (try_locals)
1381 regno = current_frame_info.n_local_regs;
1382 /* If there is a frame pointer, then we can't use loc79, because
1383 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
1384 reg_name switching code in ia64_expand_prologue. */
1385 if (regno < (80 - frame_pointer_needed))
1387 current_frame_info.n_local_regs = regno + 1;
1388 return LOC_REG (0) + regno;
1392 /* Failed to find a general register to spill to. Must use stack. */
1393 return 0;
1396 /* In order to make for nice schedules, we try to allocate every temporary
1397 to a different register. We must of course stay away from call-saved,
1398 fixed, and global registers. We must also stay away from registers
1399 allocated in current_frame_info.gr_used_mask, since those include regs
1400 used all through the prologue.
1402 Any register allocated here must be used immediately. The idea is to
1403 aid scheduling, not to solve data flow problems. */
1405 static int last_scratch_gr_reg;
1407 static int
1408 next_scratch_gr_reg (void)
1410 int i, regno;
1412 for (i = 0; i < 32; ++i)
1414 regno = (last_scratch_gr_reg + i + 1) & 31;
1415 if (call_used_regs[regno]
1416 && ! fixed_regs[regno]
1417 && ! global_regs[regno]
1418 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
1420 last_scratch_gr_reg = regno;
1421 return regno;
1425 /* There must be _something_ available. */
1426 abort ();
1429 /* Helper function for ia64_compute_frame_size, called through
1430 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
1432 static void
1433 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
1435 unsigned int regno = REGNO (reg);
1436 if (regno < 32)
1438 unsigned int i, n = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1439 for (i = 0; i < n; ++i)
1440 current_frame_info.gr_used_mask |= 1 << (regno + i);
1444 /* Returns the number of bytes offset between the frame pointer and the stack
1445 pointer for the current function. SIZE is the number of bytes of space
1446 needed for local variables. */
1448 static void
1449 ia64_compute_frame_size (HOST_WIDE_INT size)
1451 HOST_WIDE_INT total_size;
1452 HOST_WIDE_INT spill_size = 0;
1453 HOST_WIDE_INT extra_spill_size = 0;
1454 HOST_WIDE_INT pretend_args_size;
1455 HARD_REG_SET mask;
1456 int n_spilled = 0;
1457 int spilled_gr_p = 0;
1458 int spilled_fr_p = 0;
1459 unsigned int regno;
1460 int i;
1462 if (current_frame_info.initialized)
1463 return;
1465 memset (&current_frame_info, 0, sizeof current_frame_info);
1466 CLEAR_HARD_REG_SET (mask);
1468 /* Don't allocate scratches to the return register. */
1469 diddle_return_value (mark_reg_gr_used_mask, NULL);
1471 /* Don't allocate scratches to the EH scratch registers. */
1472 if (cfun->machine->ia64_eh_epilogue_sp)
1473 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
1474 if (cfun->machine->ia64_eh_epilogue_bsp)
1475 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
1477 /* Find the size of the register stack frame. We have only 80 local
1478 registers, because we reserve 8 for the inputs and 8 for the
1479 outputs. */
1481 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
1482 since we'll be adjusting that down later. */
1483 regno = LOC_REG (78) + ! frame_pointer_needed;
1484 for (; regno >= LOC_REG (0); regno--)
1485 if (regs_ever_live[regno])
1486 break;
1487 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
1489 /* For functions marked with the syscall_linkage attribute, we must mark
1490 all eight input registers as in use, so that locals aren't visible to
1491 the caller. */
1493 if (cfun->machine->n_varargs > 0
1494 || lookup_attribute ("syscall_linkage",
1495 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
1496 current_frame_info.n_input_regs = 8;
1497 else
1499 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
1500 if (regs_ever_live[regno])
1501 break;
1502 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
1505 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
1506 if (regs_ever_live[regno])
1507 break;
1508 i = regno - OUT_REG (0) + 1;
1510 /* When -p profiling, we need one output register for the mcount argument.
1511 Likewise for -a profiling for the bb_init_func argument. For -ax
1512 profiling, we need two output registers for the two bb_init_trace_func
1513 arguments. */
1514 if (current_function_profile)
1515 i = MAX (i, 1);
1516 current_frame_info.n_output_regs = i;
1518 /* ??? No rotating register support yet. */
1519 current_frame_info.n_rotate_regs = 0;
1521 /* Discover which registers need spilling, and how much room that
1522 will take. Begin with floating point and general registers,
1523 which will always wind up on the stack. */
1525 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
1526 if (regs_ever_live[regno] && ! call_used_regs[regno])
1528 SET_HARD_REG_BIT (mask, regno);
1529 spill_size += 16;
1530 n_spilled += 1;
1531 spilled_fr_p = 1;
1534 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
1535 if (regs_ever_live[regno] && ! call_used_regs[regno])
1537 SET_HARD_REG_BIT (mask, regno);
1538 spill_size += 8;
1539 n_spilled += 1;
1540 spilled_gr_p = 1;
1543 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
1544 if (regs_ever_live[regno] && ! call_used_regs[regno])
1546 SET_HARD_REG_BIT (mask, regno);
1547 spill_size += 8;
1548 n_spilled += 1;
1551 /* Now come all special registers that might get saved in other
1552 general registers. */
1554 if (frame_pointer_needed)
1556 current_frame_info.reg_fp = find_gr_spill (1);
1557 /* If we did not get a register, then we take LOC79. This is guaranteed
1558 to be free, even if regs_ever_live is already set, because this is
1559 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
1560 as we don't count loc79 above. */
1561 if (current_frame_info.reg_fp == 0)
1563 current_frame_info.reg_fp = LOC_REG (79);
1564 current_frame_info.n_local_regs++;
1568 if (! current_function_is_leaf)
1570 /* Emit a save of BR0 if we call other functions. Do this even
1571 if this function doesn't return, as EH depends on this to be
1572 able to unwind the stack. */
1573 SET_HARD_REG_BIT (mask, BR_REG (0));
1575 current_frame_info.reg_save_b0 = find_gr_spill (1);
1576 if (current_frame_info.reg_save_b0 == 0)
1578 spill_size += 8;
1579 n_spilled += 1;
1582 /* Similarly for ar.pfs. */
1583 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
1584 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
1585 if (current_frame_info.reg_save_ar_pfs == 0)
1587 extra_spill_size += 8;
1588 n_spilled += 1;
1591 /* Similarly for gp. Note that if we're calling setjmp, the stacked
1592 registers are clobbered, so we fall back to the stack. */
1593 current_frame_info.reg_save_gp
1594 = (current_function_calls_setjmp ? 0 : find_gr_spill (1));
1595 if (current_frame_info.reg_save_gp == 0)
1597 SET_HARD_REG_BIT (mask, GR_REG (1));
1598 spill_size += 8;
1599 n_spilled += 1;
1602 else
1604 if (regs_ever_live[BR_REG (0)] && ! call_used_regs[BR_REG (0)])
1606 SET_HARD_REG_BIT (mask, BR_REG (0));
1607 spill_size += 8;
1608 n_spilled += 1;
1611 if (regs_ever_live[AR_PFS_REGNUM])
1613 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
1614 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
1615 if (current_frame_info.reg_save_ar_pfs == 0)
1617 extra_spill_size += 8;
1618 n_spilled += 1;
1623 /* Unwind descriptor hackery: things are most efficient if we allocate
1624 consecutive GR save registers for RP, PFS, FP in that order. However,
1625 it is absolutely critical that FP get the only hard register that's
1626 guaranteed to be free, so we allocated it first. If all three did
1627 happen to be allocated hard regs, and are consecutive, rearrange them
1628 into the preferred order now. */
1629 if (current_frame_info.reg_fp != 0
1630 && current_frame_info.reg_save_b0 == current_frame_info.reg_fp + 1
1631 && current_frame_info.reg_save_ar_pfs == current_frame_info.reg_fp + 2)
1633 current_frame_info.reg_save_b0 = current_frame_info.reg_fp;
1634 current_frame_info.reg_save_ar_pfs = current_frame_info.reg_fp + 1;
1635 current_frame_info.reg_fp = current_frame_info.reg_fp + 2;
1638 /* See if we need to store the predicate register block. */
1639 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
1640 if (regs_ever_live[regno] && ! call_used_regs[regno])
1641 break;
1642 if (regno <= PR_REG (63))
1644 SET_HARD_REG_BIT (mask, PR_REG (0));
1645 current_frame_info.reg_save_pr = find_gr_spill (1);
1646 if (current_frame_info.reg_save_pr == 0)
1648 extra_spill_size += 8;
1649 n_spilled += 1;
1652 /* ??? Mark them all as used so that register renaming and such
1653 are free to use them. */
1654 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
1655 regs_ever_live[regno] = 1;
1658 /* If we're forced to use st8.spill, we're forced to save and restore
1659 ar.unat as well. The check for existing liveness allows inline asm
1660 to touch ar.unat. */
1661 if (spilled_gr_p || cfun->machine->n_varargs
1662 || regs_ever_live[AR_UNAT_REGNUM])
1664 regs_ever_live[AR_UNAT_REGNUM] = 1;
1665 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
1666 current_frame_info.reg_save_ar_unat = find_gr_spill (spill_size == 0);
1667 if (current_frame_info.reg_save_ar_unat == 0)
1669 extra_spill_size += 8;
1670 n_spilled += 1;
1674 if (regs_ever_live[AR_LC_REGNUM])
1676 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
1677 current_frame_info.reg_save_ar_lc = find_gr_spill (spill_size == 0);
1678 if (current_frame_info.reg_save_ar_lc == 0)
1680 extra_spill_size += 8;
1681 n_spilled += 1;
1685 /* If we have an odd number of words of pretend arguments written to
1686 the stack, then the FR save area will be unaligned. We round the
1687 size of this area up to keep things 16 byte aligned. */
1688 if (spilled_fr_p)
1689 pretend_args_size = IA64_STACK_ALIGN (current_function_pretend_args_size);
1690 else
1691 pretend_args_size = current_function_pretend_args_size;
1693 total_size = (spill_size + extra_spill_size + size + pretend_args_size
1694 + current_function_outgoing_args_size);
1695 total_size = IA64_STACK_ALIGN (total_size);
1697 /* We always use the 16-byte scratch area provided by the caller, but
1698 if we are a leaf function, there's no one to which we need to provide
1699 a scratch area. */
1700 if (current_function_is_leaf)
1701 total_size = MAX (0, total_size - 16);
1703 current_frame_info.total_size = total_size;
1704 current_frame_info.spill_cfa_off = pretend_args_size - 16;
1705 current_frame_info.spill_size = spill_size;
1706 current_frame_info.extra_spill_size = extra_spill_size;
1707 COPY_HARD_REG_SET (current_frame_info.mask, mask);
1708 current_frame_info.n_spilled = n_spilled;
1709 current_frame_info.initialized = reload_completed;
1712 /* Compute the initial difference between the specified pair of registers. */
1714 HOST_WIDE_INT
1715 ia64_initial_elimination_offset (int from, int to)
1717 HOST_WIDE_INT offset;
1719 ia64_compute_frame_size (get_frame_size ());
1720 switch (from)
1722 case FRAME_POINTER_REGNUM:
1723 if (to == HARD_FRAME_POINTER_REGNUM)
1725 if (current_function_is_leaf)
1726 offset = -current_frame_info.total_size;
1727 else
1728 offset = -(current_frame_info.total_size
1729 - current_function_outgoing_args_size - 16);
1731 else if (to == STACK_POINTER_REGNUM)
1733 if (current_function_is_leaf)
1734 offset = 0;
1735 else
1736 offset = 16 + current_function_outgoing_args_size;
1738 else
1739 abort ();
1740 break;
1742 case ARG_POINTER_REGNUM:
1743 /* Arguments start above the 16 byte save area, unless stdarg
1744 in which case we store through the 16 byte save area. */
1745 if (to == HARD_FRAME_POINTER_REGNUM)
1746 offset = 16 - current_function_pretend_args_size;
1747 else if (to == STACK_POINTER_REGNUM)
1748 offset = (current_frame_info.total_size
1749 + 16 - current_function_pretend_args_size);
1750 else
1751 abort ();
1752 break;
1754 default:
1755 abort ();
1758 return offset;
1761 /* If there are more than a trivial number of register spills, we use
1762 two interleaved iterators so that we can get two memory references
1763 per insn group.
1765 In order to simplify things in the prologue and epilogue expanders,
1766 we use helper functions to fix up the memory references after the
1767 fact with the appropriate offsets to a POST_MODIFY memory mode.
1768 The following data structure tracks the state of the two iterators
1769 while insns are being emitted. */
1771 struct spill_fill_data
1773 rtx init_after; /* point at which to emit initializations */
1774 rtx init_reg[2]; /* initial base register */
1775 rtx iter_reg[2]; /* the iterator registers */
1776 rtx *prev_addr[2]; /* address of last memory use */
1777 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
1778 HOST_WIDE_INT prev_off[2]; /* last offset */
1779 int n_iter; /* number of iterators in use */
1780 int next_iter; /* next iterator to use */
1781 unsigned int save_gr_used_mask;
1784 static struct spill_fill_data spill_fill_data;
1786 static void
1787 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
1789 int i;
1791 spill_fill_data.init_after = get_last_insn ();
1792 spill_fill_data.init_reg[0] = init_reg;
1793 spill_fill_data.init_reg[1] = init_reg;
1794 spill_fill_data.prev_addr[0] = NULL;
1795 spill_fill_data.prev_addr[1] = NULL;
1796 spill_fill_data.prev_insn[0] = NULL;
1797 spill_fill_data.prev_insn[1] = NULL;
1798 spill_fill_data.prev_off[0] = cfa_off;
1799 spill_fill_data.prev_off[1] = cfa_off;
1800 spill_fill_data.next_iter = 0;
1801 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
1803 spill_fill_data.n_iter = 1 + (n_spills > 2);
1804 for (i = 0; i < spill_fill_data.n_iter; ++i)
1806 int regno = next_scratch_gr_reg ();
1807 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
1808 current_frame_info.gr_used_mask |= 1 << regno;
1812 static void
1813 finish_spill_pointers (void)
1815 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
1818 static rtx
1819 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
1821 int iter = spill_fill_data.next_iter;
1822 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
1823 rtx disp_rtx = GEN_INT (disp);
1824 rtx mem;
1826 if (spill_fill_data.prev_addr[iter])
1828 if (CONST_OK_FOR_N (disp))
1830 *spill_fill_data.prev_addr[iter]
1831 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
1832 gen_rtx_PLUS (DImode,
1833 spill_fill_data.iter_reg[iter],
1834 disp_rtx));
1835 REG_NOTES (spill_fill_data.prev_insn[iter])
1836 = gen_rtx_EXPR_LIST (REG_INC, spill_fill_data.iter_reg[iter],
1837 REG_NOTES (spill_fill_data.prev_insn[iter]));
1839 else
1841 /* ??? Could use register post_modify for loads. */
1842 if (! CONST_OK_FOR_I (disp))
1844 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
1845 emit_move_insn (tmp, disp_rtx);
1846 disp_rtx = tmp;
1848 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
1849 spill_fill_data.iter_reg[iter], disp_rtx));
1852 /* Micro-optimization: if we've created a frame pointer, it's at
1853 CFA 0, which may allow the real iterator to be initialized lower,
1854 slightly increasing parallelism. Also, if there are few saves
1855 it may eliminate the iterator entirely. */
1856 else if (disp == 0
1857 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
1858 && frame_pointer_needed)
1860 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
1861 set_mem_alias_set (mem, get_varargs_alias_set ());
1862 return mem;
1864 else
1866 rtx seq, insn;
1868 if (disp == 0)
1869 seq = gen_movdi (spill_fill_data.iter_reg[iter],
1870 spill_fill_data.init_reg[iter]);
1871 else
1873 start_sequence ();
1875 if (! CONST_OK_FOR_I (disp))
1877 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
1878 emit_move_insn (tmp, disp_rtx);
1879 disp_rtx = tmp;
1882 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
1883 spill_fill_data.init_reg[iter],
1884 disp_rtx));
1886 seq = get_insns ();
1887 end_sequence ();
1890 /* Careful for being the first insn in a sequence. */
1891 if (spill_fill_data.init_after)
1892 insn = emit_insn_after (seq, spill_fill_data.init_after);
1893 else
1895 rtx first = get_insns ();
1896 if (first)
1897 insn = emit_insn_before (seq, first);
1898 else
1899 insn = emit_insn (seq);
1901 spill_fill_data.init_after = insn;
1903 /* If DISP is 0, we may or may not have a further adjustment
1904 afterward. If we do, then the load/store insn may be modified
1905 to be a post-modify. If we don't, then this copy may be
1906 eliminated by copyprop_hardreg_forward, which makes this
1907 insn garbage, which runs afoul of the sanity check in
1908 propagate_one_insn. So mark this insn as legal to delete. */
1909 if (disp == 0)
1910 REG_NOTES(insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx,
1911 REG_NOTES (insn));
1914 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
1916 /* ??? Not all of the spills are for varargs, but some of them are.
1917 The rest of the spills belong in an alias set of their own. But
1918 it doesn't actually hurt to include them here. */
1919 set_mem_alias_set (mem, get_varargs_alias_set ());
1921 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
1922 spill_fill_data.prev_off[iter] = cfa_off;
1924 if (++iter >= spill_fill_data.n_iter)
1925 iter = 0;
1926 spill_fill_data.next_iter = iter;
1928 return mem;
1931 static void
1932 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
1933 rtx frame_reg)
1935 int iter = spill_fill_data.next_iter;
1936 rtx mem, insn;
1938 mem = spill_restore_mem (reg, cfa_off);
1939 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
1940 spill_fill_data.prev_insn[iter] = insn;
1942 if (frame_reg)
1944 rtx base;
1945 HOST_WIDE_INT off;
1947 RTX_FRAME_RELATED_P (insn) = 1;
1949 /* Don't even pretend that the unwind code can intuit its way
1950 through a pair of interleaved post_modify iterators. Just
1951 provide the correct answer. */
1953 if (frame_pointer_needed)
1955 base = hard_frame_pointer_rtx;
1956 off = - cfa_off;
1958 else
1960 base = stack_pointer_rtx;
1961 off = current_frame_info.total_size - cfa_off;
1964 REG_NOTES (insn)
1965 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
1966 gen_rtx_SET (VOIDmode,
1967 gen_rtx_MEM (GET_MODE (reg),
1968 plus_constant (base, off)),
1969 frame_reg),
1970 REG_NOTES (insn));
1974 static void
1975 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
1977 int iter = spill_fill_data.next_iter;
1978 rtx insn;
1980 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
1981 GEN_INT (cfa_off)));
1982 spill_fill_data.prev_insn[iter] = insn;
1985 /* Wrapper functions that discards the CONST_INT spill offset. These
1986 exist so that we can give gr_spill/gr_fill the offset they need and
1987 use a consistent function interface. */
1989 static rtx
1990 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
1992 return gen_movdi (dest, src);
1995 static rtx
1996 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
1998 return gen_fr_spill (dest, src);
2001 static rtx
2002 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2004 return gen_fr_restore (dest, src);
2007 /* Called after register allocation to add any instructions needed for the
2008 prologue. Using a prologue insn is favored compared to putting all of the
2009 instructions in output_function_prologue(), since it allows the scheduler
2010 to intermix instructions with the saves of the caller saved registers. In
2011 some cases, it might be necessary to emit a barrier instruction as the last
2012 insn to prevent such scheduling.
2014 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
2015 so that the debug info generation code can handle them properly.
2017 The register save area is layed out like so:
2018 cfa+16
2019 [ varargs spill area ]
2020 [ fr register spill area ]
2021 [ br register spill area ]
2022 [ ar register spill area ]
2023 [ pr register spill area ]
2024 [ gr register spill area ] */
2026 /* ??? Get inefficient code when the frame size is larger than can fit in an
2027 adds instruction. */
2029 void
2030 ia64_expand_prologue (void)
2032 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
2033 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
2034 rtx reg, alt_reg;
2036 ia64_compute_frame_size (get_frame_size ());
2037 last_scratch_gr_reg = 15;
2039 /* If there is no epilogue, then we don't need some prologue insns.
2040 We need to avoid emitting the dead prologue insns, because flow
2041 will complain about them. */
2042 if (optimize)
2044 edge e;
2045 edge_iterator ei;
2047 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
2048 if ((e->flags & EDGE_FAKE) == 0
2049 && (e->flags & EDGE_FALLTHRU) != 0)
2050 break;
2051 epilogue_p = (e != NULL);
2053 else
2054 epilogue_p = 1;
2056 /* Set the local, input, and output register names. We need to do this
2057 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
2058 half. If we use in/loc/out register names, then we get assembler errors
2059 in crtn.S because there is no alloc insn or regstk directive in there. */
2060 if (! TARGET_REG_NAMES)
2062 int inputs = current_frame_info.n_input_regs;
2063 int locals = current_frame_info.n_local_regs;
2064 int outputs = current_frame_info.n_output_regs;
2066 for (i = 0; i < inputs; i++)
2067 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
2068 for (i = 0; i < locals; i++)
2069 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
2070 for (i = 0; i < outputs; i++)
2071 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
2074 /* Set the frame pointer register name. The regnum is logically loc79,
2075 but of course we'll not have allocated that many locals. Rather than
2076 worrying about renumbering the existing rtxs, we adjust the name. */
2077 /* ??? This code means that we can never use one local register when
2078 there is a frame pointer. loc79 gets wasted in this case, as it is
2079 renamed to a register that will never be used. See also the try_locals
2080 code in find_gr_spill. */
2081 if (current_frame_info.reg_fp)
2083 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
2084 reg_names[HARD_FRAME_POINTER_REGNUM]
2085 = reg_names[current_frame_info.reg_fp];
2086 reg_names[current_frame_info.reg_fp] = tmp;
2089 /* We don't need an alloc instruction if we've used no outputs or locals. */
2090 if (current_frame_info.n_local_regs == 0
2091 && current_frame_info.n_output_regs == 0
2092 && current_frame_info.n_input_regs <= current_function_args_info.int_regs
2093 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
2095 /* If there is no alloc, but there are input registers used, then we
2096 need a .regstk directive. */
2097 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
2098 ar_pfs_save_reg = NULL_RTX;
2100 else
2102 current_frame_info.need_regstk = 0;
2104 if (current_frame_info.reg_save_ar_pfs)
2105 regno = current_frame_info.reg_save_ar_pfs;
2106 else
2107 regno = next_scratch_gr_reg ();
2108 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
2110 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
2111 GEN_INT (current_frame_info.n_input_regs),
2112 GEN_INT (current_frame_info.n_local_regs),
2113 GEN_INT (current_frame_info.n_output_regs),
2114 GEN_INT (current_frame_info.n_rotate_regs)));
2115 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_pfs != 0);
2118 /* Set up frame pointer, stack pointer, and spill iterators. */
2120 n_varargs = cfun->machine->n_varargs;
2121 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
2122 stack_pointer_rtx, 0);
2124 if (frame_pointer_needed)
2126 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
2127 RTX_FRAME_RELATED_P (insn) = 1;
2130 if (current_frame_info.total_size != 0)
2132 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
2133 rtx offset;
2135 if (CONST_OK_FOR_I (- current_frame_info.total_size))
2136 offset = frame_size_rtx;
2137 else
2139 regno = next_scratch_gr_reg ();
2140 offset = gen_rtx_REG (DImode, regno);
2141 emit_move_insn (offset, frame_size_rtx);
2144 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
2145 stack_pointer_rtx, offset));
2147 if (! frame_pointer_needed)
2149 RTX_FRAME_RELATED_P (insn) = 1;
2150 if (GET_CODE (offset) != CONST_INT)
2152 REG_NOTES (insn)
2153 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2154 gen_rtx_SET (VOIDmode,
2155 stack_pointer_rtx,
2156 gen_rtx_PLUS (DImode,
2157 stack_pointer_rtx,
2158 frame_size_rtx)),
2159 REG_NOTES (insn));
2163 /* ??? At this point we must generate a magic insn that appears to
2164 modify the stack pointer, the frame pointer, and all spill
2165 iterators. This would allow the most scheduling freedom. For
2166 now, just hard stop. */
2167 emit_insn (gen_blockage ());
2170 /* Must copy out ar.unat before doing any integer spills. */
2171 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2173 if (current_frame_info.reg_save_ar_unat)
2174 ar_unat_save_reg
2175 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
2176 else
2178 alt_regno = next_scratch_gr_reg ();
2179 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
2180 current_frame_info.gr_used_mask |= 1 << alt_regno;
2183 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2184 insn = emit_move_insn (ar_unat_save_reg, reg);
2185 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_unat != 0);
2187 /* Even if we're not going to generate an epilogue, we still
2188 need to save the register so that EH works. */
2189 if (! epilogue_p && current_frame_info.reg_save_ar_unat)
2190 emit_insn (gen_prologue_use (ar_unat_save_reg));
2192 else
2193 ar_unat_save_reg = NULL_RTX;
2195 /* Spill all varargs registers. Do this before spilling any GR registers,
2196 since we want the UNAT bits for the GR registers to override the UNAT
2197 bits from varargs, which we don't care about. */
2199 cfa_off = -16;
2200 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
2202 reg = gen_rtx_REG (DImode, regno);
2203 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
2206 /* Locate the bottom of the register save area. */
2207 cfa_off = (current_frame_info.spill_cfa_off
2208 + current_frame_info.spill_size
2209 + current_frame_info.extra_spill_size);
2211 /* Save the predicate register block either in a register or in memory. */
2212 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
2214 reg = gen_rtx_REG (DImode, PR_REG (0));
2215 if (current_frame_info.reg_save_pr != 0)
2217 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
2218 insn = emit_move_insn (alt_reg, reg);
2220 /* ??? Denote pr spill/fill by a DImode move that modifies all
2221 64 hard registers. */
2222 RTX_FRAME_RELATED_P (insn) = 1;
2223 REG_NOTES (insn)
2224 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2225 gen_rtx_SET (VOIDmode, alt_reg, reg),
2226 REG_NOTES (insn));
2228 /* Even if we're not going to generate an epilogue, we still
2229 need to save the register so that EH works. */
2230 if (! epilogue_p)
2231 emit_insn (gen_prologue_use (alt_reg));
2233 else
2235 alt_regno = next_scratch_gr_reg ();
2236 alt_reg = gen_rtx_REG (DImode, alt_regno);
2237 insn = emit_move_insn (alt_reg, reg);
2238 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2239 cfa_off -= 8;
2243 /* Handle AR regs in numerical order. All of them get special handling. */
2244 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
2245 && current_frame_info.reg_save_ar_unat == 0)
2247 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2248 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
2249 cfa_off -= 8;
2252 /* The alloc insn already copied ar.pfs into a general register. The
2253 only thing we have to do now is copy that register to a stack slot
2254 if we'd not allocated a local register for the job. */
2255 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
2256 && current_frame_info.reg_save_ar_pfs == 0)
2258 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2259 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
2260 cfa_off -= 8;
2263 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
2265 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
2266 if (current_frame_info.reg_save_ar_lc != 0)
2268 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
2269 insn = emit_move_insn (alt_reg, reg);
2270 RTX_FRAME_RELATED_P (insn) = 1;
2272 /* Even if we're not going to generate an epilogue, we still
2273 need to save the register so that EH works. */
2274 if (! epilogue_p)
2275 emit_insn (gen_prologue_use (alt_reg));
2277 else
2279 alt_regno = next_scratch_gr_reg ();
2280 alt_reg = gen_rtx_REG (DImode, alt_regno);
2281 emit_move_insn (alt_reg, reg);
2282 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2283 cfa_off -= 8;
2287 if (current_frame_info.reg_save_gp)
2289 insn = emit_move_insn (gen_rtx_REG (DImode,
2290 current_frame_info.reg_save_gp),
2291 pic_offset_table_rtx);
2292 /* We don't know for sure yet if this is actually needed, since
2293 we've not split the PIC call patterns. If all of the calls
2294 are indirect, and not followed by any uses of the gp, then
2295 this save is dead. Allow it to go away. */
2296 REG_NOTES (insn)
2297 = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, REG_NOTES (insn));
2300 /* We should now be at the base of the gr/br/fr spill area. */
2301 if (cfa_off != (current_frame_info.spill_cfa_off
2302 + current_frame_info.spill_size))
2303 abort ();
2305 /* Spill all general registers. */
2306 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
2307 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2309 reg = gen_rtx_REG (DImode, regno);
2310 do_spill (gen_gr_spill, reg, cfa_off, reg);
2311 cfa_off -= 8;
2314 /* Handle BR0 specially -- it may be getting stored permanently in
2315 some GR register. */
2316 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
2318 reg = gen_rtx_REG (DImode, BR_REG (0));
2319 if (current_frame_info.reg_save_b0 != 0)
2321 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
2322 insn = emit_move_insn (alt_reg, reg);
2323 RTX_FRAME_RELATED_P (insn) = 1;
2325 /* Even if we're not going to generate an epilogue, we still
2326 need to save the register so that EH works. */
2327 if (! epilogue_p)
2328 emit_insn (gen_prologue_use (alt_reg));
2330 else
2332 alt_regno = next_scratch_gr_reg ();
2333 alt_reg = gen_rtx_REG (DImode, alt_regno);
2334 emit_move_insn (alt_reg, reg);
2335 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2336 cfa_off -= 8;
2340 /* Spill the rest of the BR registers. */
2341 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
2342 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2344 alt_regno = next_scratch_gr_reg ();
2345 alt_reg = gen_rtx_REG (DImode, alt_regno);
2346 reg = gen_rtx_REG (DImode, regno);
2347 emit_move_insn (alt_reg, reg);
2348 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2349 cfa_off -= 8;
2352 /* Align the frame and spill all FR registers. */
2353 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
2354 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2356 if (cfa_off & 15)
2357 abort ();
2358 reg = gen_rtx_REG (XFmode, regno);
2359 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
2360 cfa_off -= 16;
2363 if (cfa_off != current_frame_info.spill_cfa_off)
2364 abort ();
2366 finish_spill_pointers ();
2369 /* Called after register allocation to add any instructions needed for the
2370 epilogue. Using an epilogue insn is favored compared to putting all of the
2371 instructions in output_function_prologue(), since it allows the scheduler
2372 to intermix instructions with the saves of the caller saved registers. In
2373 some cases, it might be necessary to emit a barrier instruction as the last
2374 insn to prevent such scheduling. */
2376 void
2377 ia64_expand_epilogue (int sibcall_p)
2379 rtx insn, reg, alt_reg, ar_unat_save_reg;
2380 int regno, alt_regno, cfa_off;
2382 ia64_compute_frame_size (get_frame_size ());
2384 /* If there is a frame pointer, then we use it instead of the stack
2385 pointer, so that the stack pointer does not need to be valid when
2386 the epilogue starts. See EXIT_IGNORE_STACK. */
2387 if (frame_pointer_needed)
2388 setup_spill_pointers (current_frame_info.n_spilled,
2389 hard_frame_pointer_rtx, 0);
2390 else
2391 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
2392 current_frame_info.total_size);
2394 if (current_frame_info.total_size != 0)
2396 /* ??? At this point we must generate a magic insn that appears to
2397 modify the spill iterators and the frame pointer. This would
2398 allow the most scheduling freedom. For now, just hard stop. */
2399 emit_insn (gen_blockage ());
2402 /* Locate the bottom of the register save area. */
2403 cfa_off = (current_frame_info.spill_cfa_off
2404 + current_frame_info.spill_size
2405 + current_frame_info.extra_spill_size);
2407 /* Restore the predicate registers. */
2408 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
2410 if (current_frame_info.reg_save_pr != 0)
2411 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
2412 else
2414 alt_regno = next_scratch_gr_reg ();
2415 alt_reg = gen_rtx_REG (DImode, alt_regno);
2416 do_restore (gen_movdi_x, alt_reg, cfa_off);
2417 cfa_off -= 8;
2419 reg = gen_rtx_REG (DImode, PR_REG (0));
2420 emit_move_insn (reg, alt_reg);
2423 /* Restore the application registers. */
2425 /* Load the saved unat from the stack, but do not restore it until
2426 after the GRs have been restored. */
2427 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2429 if (current_frame_info.reg_save_ar_unat != 0)
2430 ar_unat_save_reg
2431 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
2432 else
2434 alt_regno = next_scratch_gr_reg ();
2435 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
2436 current_frame_info.gr_used_mask |= 1 << alt_regno;
2437 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
2438 cfa_off -= 8;
2441 else
2442 ar_unat_save_reg = NULL_RTX;
2444 if (current_frame_info.reg_save_ar_pfs != 0)
2446 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_pfs);
2447 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2448 emit_move_insn (reg, alt_reg);
2450 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
2452 alt_regno = next_scratch_gr_reg ();
2453 alt_reg = gen_rtx_REG (DImode, alt_regno);
2454 do_restore (gen_movdi_x, alt_reg, cfa_off);
2455 cfa_off -= 8;
2456 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2457 emit_move_insn (reg, alt_reg);
2460 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
2462 if (current_frame_info.reg_save_ar_lc != 0)
2463 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
2464 else
2466 alt_regno = next_scratch_gr_reg ();
2467 alt_reg = gen_rtx_REG (DImode, alt_regno);
2468 do_restore (gen_movdi_x, alt_reg, cfa_off);
2469 cfa_off -= 8;
2471 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
2472 emit_move_insn (reg, alt_reg);
2475 /* We should now be at the base of the gr/br/fr spill area. */
2476 if (cfa_off != (current_frame_info.spill_cfa_off
2477 + current_frame_info.spill_size))
2478 abort ();
2480 /* The GP may be stored on the stack in the prologue, but it's
2481 never restored in the epilogue. Skip the stack slot. */
2482 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
2483 cfa_off -= 8;
2485 /* Restore all general registers. */
2486 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
2487 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2489 reg = gen_rtx_REG (DImode, regno);
2490 do_restore (gen_gr_restore, reg, cfa_off);
2491 cfa_off -= 8;
2494 /* Restore the branch registers. Handle B0 specially, as it may
2495 have gotten stored in some GR register. */
2496 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
2498 if (current_frame_info.reg_save_b0 != 0)
2499 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
2500 else
2502 alt_regno = next_scratch_gr_reg ();
2503 alt_reg = gen_rtx_REG (DImode, alt_regno);
2504 do_restore (gen_movdi_x, alt_reg, cfa_off);
2505 cfa_off -= 8;
2507 reg = gen_rtx_REG (DImode, BR_REG (0));
2508 emit_move_insn (reg, alt_reg);
2511 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
2512 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2514 alt_regno = next_scratch_gr_reg ();
2515 alt_reg = gen_rtx_REG (DImode, alt_regno);
2516 do_restore (gen_movdi_x, alt_reg, cfa_off);
2517 cfa_off -= 8;
2518 reg = gen_rtx_REG (DImode, regno);
2519 emit_move_insn (reg, alt_reg);
2522 /* Restore floating point registers. */
2523 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
2524 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2526 if (cfa_off & 15)
2527 abort ();
2528 reg = gen_rtx_REG (XFmode, regno);
2529 do_restore (gen_fr_restore_x, reg, cfa_off);
2530 cfa_off -= 16;
2533 /* Restore ar.unat for real. */
2534 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2536 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2537 emit_move_insn (reg, ar_unat_save_reg);
2540 if (cfa_off != current_frame_info.spill_cfa_off)
2541 abort ();
2543 finish_spill_pointers ();
2545 if (current_frame_info.total_size || cfun->machine->ia64_eh_epilogue_sp)
2547 /* ??? At this point we must generate a magic insn that appears to
2548 modify the spill iterators, the stack pointer, and the frame
2549 pointer. This would allow the most scheduling freedom. For now,
2550 just hard stop. */
2551 emit_insn (gen_blockage ());
2554 if (cfun->machine->ia64_eh_epilogue_sp)
2555 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
2556 else if (frame_pointer_needed)
2558 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
2559 RTX_FRAME_RELATED_P (insn) = 1;
2561 else if (current_frame_info.total_size)
2563 rtx offset, frame_size_rtx;
2565 frame_size_rtx = GEN_INT (current_frame_info.total_size);
2566 if (CONST_OK_FOR_I (current_frame_info.total_size))
2567 offset = frame_size_rtx;
2568 else
2570 regno = next_scratch_gr_reg ();
2571 offset = gen_rtx_REG (DImode, regno);
2572 emit_move_insn (offset, frame_size_rtx);
2575 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
2576 offset));
2578 RTX_FRAME_RELATED_P (insn) = 1;
2579 if (GET_CODE (offset) != CONST_INT)
2581 REG_NOTES (insn)
2582 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2583 gen_rtx_SET (VOIDmode,
2584 stack_pointer_rtx,
2585 gen_rtx_PLUS (DImode,
2586 stack_pointer_rtx,
2587 frame_size_rtx)),
2588 REG_NOTES (insn));
2592 if (cfun->machine->ia64_eh_epilogue_bsp)
2593 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
2595 if (! sibcall_p)
2596 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
2597 else
2599 int fp = GR_REG (2);
2600 /* We need a throw away register here, r0 and r1 are reserved, so r2 is the
2601 first available call clobbered register. If there was a frame_pointer
2602 register, we may have swapped the names of r2 and HARD_FRAME_POINTER_REGNUM,
2603 so we have to make sure we're using the string "r2" when emitting
2604 the register name for the assembler. */
2605 if (current_frame_info.reg_fp && current_frame_info.reg_fp == GR_REG (2))
2606 fp = HARD_FRAME_POINTER_REGNUM;
2608 /* We must emit an alloc to force the input registers to become output
2609 registers. Otherwise, if the callee tries to pass its parameters
2610 through to another call without an intervening alloc, then these
2611 values get lost. */
2612 /* ??? We don't need to preserve all input registers. We only need to
2613 preserve those input registers used as arguments to the sibling call.
2614 It is unclear how to compute that number here. */
2615 if (current_frame_info.n_input_regs != 0)
2617 rtx n_inputs = GEN_INT (current_frame_info.n_input_regs);
2618 insn = emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
2619 const0_rtx, const0_rtx,
2620 n_inputs, const0_rtx));
2621 RTX_FRAME_RELATED_P (insn) = 1;
2626 /* Return 1 if br.ret can do all the work required to return from a
2627 function. */
2630 ia64_direct_return (void)
2632 if (reload_completed && ! frame_pointer_needed)
2634 ia64_compute_frame_size (get_frame_size ());
2636 return (current_frame_info.total_size == 0
2637 && current_frame_info.n_spilled == 0
2638 && current_frame_info.reg_save_b0 == 0
2639 && current_frame_info.reg_save_pr == 0
2640 && current_frame_info.reg_save_ar_pfs == 0
2641 && current_frame_info.reg_save_ar_unat == 0
2642 && current_frame_info.reg_save_ar_lc == 0);
2644 return 0;
2647 /* Return the magic cookie that we use to hold the return address
2648 during early compilation. */
2651 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
2653 if (count != 0)
2654 return NULL;
2655 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
2658 /* Split this value after reload, now that we know where the return
2659 address is saved. */
2661 void
2662 ia64_split_return_addr_rtx (rtx dest)
2664 rtx src;
2666 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
2668 if (current_frame_info.reg_save_b0 != 0)
2669 src = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
2670 else
2672 HOST_WIDE_INT off;
2673 unsigned int regno;
2675 /* Compute offset from CFA for BR0. */
2676 /* ??? Must be kept in sync with ia64_expand_prologue. */
2677 off = (current_frame_info.spill_cfa_off
2678 + current_frame_info.spill_size);
2679 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
2680 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2681 off -= 8;
2683 /* Convert CFA offset to a register based offset. */
2684 if (frame_pointer_needed)
2685 src = hard_frame_pointer_rtx;
2686 else
2688 src = stack_pointer_rtx;
2689 off += current_frame_info.total_size;
2692 /* Load address into scratch register. */
2693 if (CONST_OK_FOR_I (off))
2694 emit_insn (gen_adddi3 (dest, src, GEN_INT (off)));
2695 else
2697 emit_move_insn (dest, GEN_INT (off));
2698 emit_insn (gen_adddi3 (dest, src, dest));
2701 src = gen_rtx_MEM (Pmode, dest);
2704 else
2705 src = gen_rtx_REG (DImode, BR_REG (0));
2707 emit_move_insn (dest, src);
2711 ia64_hard_regno_rename_ok (int from, int to)
2713 /* Don't clobber any of the registers we reserved for the prologue. */
2714 if (to == current_frame_info.reg_fp
2715 || to == current_frame_info.reg_save_b0
2716 || to == current_frame_info.reg_save_pr
2717 || to == current_frame_info.reg_save_ar_pfs
2718 || to == current_frame_info.reg_save_ar_unat
2719 || to == current_frame_info.reg_save_ar_lc)
2720 return 0;
2722 if (from == current_frame_info.reg_fp
2723 || from == current_frame_info.reg_save_b0
2724 || from == current_frame_info.reg_save_pr
2725 || from == current_frame_info.reg_save_ar_pfs
2726 || from == current_frame_info.reg_save_ar_unat
2727 || from == current_frame_info.reg_save_ar_lc)
2728 return 0;
2730 /* Don't use output registers outside the register frame. */
2731 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
2732 return 0;
2734 /* Retain even/oddness on predicate register pairs. */
2735 if (PR_REGNO_P (from) && PR_REGNO_P (to))
2736 return (from & 1) == (to & 1);
2738 return 1;
2741 /* Target hook for assembling integer objects. Handle word-sized
2742 aligned objects and detect the cases when @fptr is needed. */
2744 static bool
2745 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
2747 if (size == POINTER_SIZE / BITS_PER_UNIT
2748 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
2749 && GET_CODE (x) == SYMBOL_REF
2750 && SYMBOL_REF_FUNCTION_P (x))
2752 static const char * const directive[2][2] = {
2753 /* 64-bit pointer */ /* 32-bit pointer */
2754 { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
2755 { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
2757 fputs (directive[(aligned_p != 0)][POINTER_SIZE == 32], asm_out_file);
2758 output_addr_const (asm_out_file, x);
2759 fputs (")\n", asm_out_file);
2760 return true;
2762 return default_assemble_integer (x, size, aligned_p);
2765 /* Emit the function prologue. */
2767 static void
2768 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
2770 int mask, grsave, grsave_prev;
2772 if (current_frame_info.need_regstk)
2773 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
2774 current_frame_info.n_input_regs,
2775 current_frame_info.n_local_regs,
2776 current_frame_info.n_output_regs,
2777 current_frame_info.n_rotate_regs);
2779 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
2780 return;
2782 /* Emit the .prologue directive. */
2784 mask = 0;
2785 grsave = grsave_prev = 0;
2786 if (current_frame_info.reg_save_b0 != 0)
2788 mask |= 8;
2789 grsave = grsave_prev = current_frame_info.reg_save_b0;
2791 if (current_frame_info.reg_save_ar_pfs != 0
2792 && (grsave_prev == 0
2793 || current_frame_info.reg_save_ar_pfs == grsave_prev + 1))
2795 mask |= 4;
2796 if (grsave_prev == 0)
2797 grsave = current_frame_info.reg_save_ar_pfs;
2798 grsave_prev = current_frame_info.reg_save_ar_pfs;
2800 if (current_frame_info.reg_fp != 0
2801 && (grsave_prev == 0
2802 || current_frame_info.reg_fp == grsave_prev + 1))
2804 mask |= 2;
2805 if (grsave_prev == 0)
2806 grsave = HARD_FRAME_POINTER_REGNUM;
2807 grsave_prev = current_frame_info.reg_fp;
2809 if (current_frame_info.reg_save_pr != 0
2810 && (grsave_prev == 0
2811 || current_frame_info.reg_save_pr == grsave_prev + 1))
2813 mask |= 1;
2814 if (grsave_prev == 0)
2815 grsave = current_frame_info.reg_save_pr;
2818 if (mask && TARGET_GNU_AS)
2819 fprintf (file, "\t.prologue %d, %d\n", mask,
2820 ia64_dbx_register_number (grsave));
2821 else
2822 fputs ("\t.prologue\n", file);
2824 /* Emit a .spill directive, if necessary, to relocate the base of
2825 the register spill area. */
2826 if (current_frame_info.spill_cfa_off != -16)
2827 fprintf (file, "\t.spill %ld\n",
2828 (long) (current_frame_info.spill_cfa_off
2829 + current_frame_info.spill_size));
2832 /* Emit the .body directive at the scheduled end of the prologue. */
2834 static void
2835 ia64_output_function_end_prologue (FILE *file)
2837 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
2838 return;
2840 fputs ("\t.body\n", file);
2843 /* Emit the function epilogue. */
2845 static void
2846 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
2847 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
2849 int i;
2851 if (current_frame_info.reg_fp)
2853 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
2854 reg_names[HARD_FRAME_POINTER_REGNUM]
2855 = reg_names[current_frame_info.reg_fp];
2856 reg_names[current_frame_info.reg_fp] = tmp;
2858 if (! TARGET_REG_NAMES)
2860 for (i = 0; i < current_frame_info.n_input_regs; i++)
2861 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
2862 for (i = 0; i < current_frame_info.n_local_regs; i++)
2863 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
2864 for (i = 0; i < current_frame_info.n_output_regs; i++)
2865 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
2868 current_frame_info.initialized = 0;
2872 ia64_dbx_register_number (int regno)
2874 /* In ia64_expand_prologue we quite literally renamed the frame pointer
2875 from its home at loc79 to something inside the register frame. We
2876 must perform the same renumbering here for the debug info. */
2877 if (current_frame_info.reg_fp)
2879 if (regno == HARD_FRAME_POINTER_REGNUM)
2880 regno = current_frame_info.reg_fp;
2881 else if (regno == current_frame_info.reg_fp)
2882 regno = HARD_FRAME_POINTER_REGNUM;
2885 if (IN_REGNO_P (regno))
2886 return 32 + regno - IN_REG (0);
2887 else if (LOC_REGNO_P (regno))
2888 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
2889 else if (OUT_REGNO_P (regno))
2890 return (32 + current_frame_info.n_input_regs
2891 + current_frame_info.n_local_regs + regno - OUT_REG (0));
2892 else
2893 return regno;
2896 void
2897 ia64_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
2899 rtx addr_reg, eight = GEN_INT (8);
2901 /* The Intel assembler requires that the global __ia64_trampoline symbol
2902 be declared explicitly */
2903 if (!TARGET_GNU_AS)
2905 static bool declared_ia64_trampoline = false;
2907 if (!declared_ia64_trampoline)
2909 declared_ia64_trampoline = true;
2910 (*targetm.asm_out.globalize_label) (asm_out_file,
2911 "__ia64_trampoline");
2915 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
2916 addr = convert_memory_address (Pmode, addr);
2917 fnaddr = convert_memory_address (Pmode, fnaddr);
2918 static_chain = convert_memory_address (Pmode, static_chain);
2920 /* Load up our iterator. */
2921 addr_reg = gen_reg_rtx (Pmode);
2922 emit_move_insn (addr_reg, addr);
2924 /* The first two words are the fake descriptor:
2925 __ia64_trampoline, ADDR+16. */
2926 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
2927 gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline"));
2928 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
2930 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
2931 copy_to_reg (plus_constant (addr, 16)));
2932 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
2934 /* The third word is the target descriptor. */
2935 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), fnaddr);
2936 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
2938 /* The fourth word is the static chain. */
2939 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), static_chain);
2942 /* Do any needed setup for a variadic function. CUM has not been updated
2943 for the last named argument which has type TYPE and mode MODE.
2945 We generate the actual spill instructions during prologue generation. */
2947 static void
2948 ia64_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
2949 tree type, int * pretend_size,
2950 int second_time ATTRIBUTE_UNUSED)
2952 CUMULATIVE_ARGS next_cum = *cum;
2954 /* Skip the current argument. */
2955 ia64_function_arg_advance (&next_cum, mode, type, 1);
2957 if (next_cum.words < MAX_ARGUMENT_SLOTS)
2959 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
2960 *pretend_size = n * UNITS_PER_WORD;
2961 cfun->machine->n_varargs = n;
2965 /* Check whether TYPE is a homogeneous floating point aggregate. If
2966 it is, return the mode of the floating point type that appears
2967 in all leafs. If it is not, return VOIDmode.
2969 An aggregate is a homogeneous floating point aggregate is if all
2970 fields/elements in it have the same floating point type (e.g,
2971 SFmode). 128-bit quad-precision floats are excluded. */
2973 static enum machine_mode
2974 hfa_element_mode (tree type, int nested)
2976 enum machine_mode element_mode = VOIDmode;
2977 enum machine_mode mode;
2978 enum tree_code code = TREE_CODE (type);
2979 int know_element_mode = 0;
2980 tree t;
2982 switch (code)
2984 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
2985 case BOOLEAN_TYPE: case CHAR_TYPE: case POINTER_TYPE:
2986 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
2987 case FILE_TYPE: case LANG_TYPE: case FUNCTION_TYPE:
2988 return VOIDmode;
2990 /* Fortran complex types are supposed to be HFAs, so we need to handle
2991 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
2992 types though. */
2993 case COMPLEX_TYPE:
2994 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
2995 && TYPE_MODE (type) != TCmode)
2996 return GET_MODE_INNER (TYPE_MODE (type));
2997 else
2998 return VOIDmode;
3000 case REAL_TYPE:
3001 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
3002 mode if this is contained within an aggregate. */
3003 if (nested && TYPE_MODE (type) != TFmode)
3004 return TYPE_MODE (type);
3005 else
3006 return VOIDmode;
3008 case ARRAY_TYPE:
3009 return hfa_element_mode (TREE_TYPE (type), 1);
3011 case RECORD_TYPE:
3012 case UNION_TYPE:
3013 case QUAL_UNION_TYPE:
3014 for (t = TYPE_FIELDS (type); t; t = TREE_CHAIN (t))
3016 if (TREE_CODE (t) != FIELD_DECL)
3017 continue;
3019 mode = hfa_element_mode (TREE_TYPE (t), 1);
3020 if (know_element_mode)
3022 if (mode != element_mode)
3023 return VOIDmode;
3025 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
3026 return VOIDmode;
3027 else
3029 know_element_mode = 1;
3030 element_mode = mode;
3033 return element_mode;
3035 default:
3036 /* If we reach here, we probably have some front-end specific type
3037 that the backend doesn't know about. This can happen via the
3038 aggregate_value_p call in init_function_start. All we can do is
3039 ignore unknown tree types. */
3040 return VOIDmode;
3043 return VOIDmode;
3046 /* Return the number of words required to hold a quantity of TYPE and MODE
3047 when passed as an argument. */
3048 static int
3049 ia64_function_arg_words (tree type, enum machine_mode mode)
3051 int words;
3053 if (mode == BLKmode)
3054 words = int_size_in_bytes (type);
3055 else
3056 words = GET_MODE_SIZE (mode);
3058 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
3061 /* Return the number of registers that should be skipped so the current
3062 argument (described by TYPE and WORDS) will be properly aligned.
3064 Integer and float arguments larger than 8 bytes start at the next
3065 even boundary. Aggregates larger than 8 bytes start at the next
3066 even boundary if the aggregate has 16 byte alignment. Note that
3067 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
3068 but are still to be aligned in registers.
3070 ??? The ABI does not specify how to handle aggregates with
3071 alignment from 9 to 15 bytes, or greater than 16. We handle them
3072 all as if they had 16 byte alignment. Such aggregates can occur
3073 only if gcc extensions are used. */
3074 static int
3075 ia64_function_arg_offset (CUMULATIVE_ARGS *cum, tree type, int words)
3077 if ((cum->words & 1) == 0)
3078 return 0;
3080 if (type
3081 && TREE_CODE (type) != INTEGER_TYPE
3082 && TREE_CODE (type) != REAL_TYPE)
3083 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
3084 else
3085 return words > 1;
3088 /* Return rtx for register where argument is passed, or zero if it is passed
3089 on the stack. */
3090 /* ??? 128-bit quad-precision floats are always passed in general
3091 registers. */
3094 ia64_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
3095 int named, int incoming)
3097 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
3098 int words = ia64_function_arg_words (type, mode);
3099 int offset = ia64_function_arg_offset (cum, type, words);
3100 enum machine_mode hfa_mode = VOIDmode;
3102 /* If all argument slots are used, then it must go on the stack. */
3103 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
3104 return 0;
3106 /* Check for and handle homogeneous FP aggregates. */
3107 if (type)
3108 hfa_mode = hfa_element_mode (type, 0);
3110 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
3111 and unprototyped hfas are passed specially. */
3112 if (hfa_mode != VOIDmode && (! cum->prototype || named))
3114 rtx loc[16];
3115 int i = 0;
3116 int fp_regs = cum->fp_regs;
3117 int int_regs = cum->words + offset;
3118 int hfa_size = GET_MODE_SIZE (hfa_mode);
3119 int byte_size;
3120 int args_byte_size;
3122 /* If prototyped, pass it in FR regs then GR regs.
3123 If not prototyped, pass it in both FR and GR regs.
3125 If this is an SFmode aggregate, then it is possible to run out of
3126 FR regs while GR regs are still left. In that case, we pass the
3127 remaining part in the GR regs. */
3129 /* Fill the FP regs. We do this always. We stop if we reach the end
3130 of the argument, the last FP register, or the last argument slot. */
3132 byte_size = ((mode == BLKmode)
3133 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3134 args_byte_size = int_regs * UNITS_PER_WORD;
3135 offset = 0;
3136 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
3137 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
3139 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3140 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
3141 + fp_regs)),
3142 GEN_INT (offset));
3143 offset += hfa_size;
3144 args_byte_size += hfa_size;
3145 fp_regs++;
3148 /* If no prototype, then the whole thing must go in GR regs. */
3149 if (! cum->prototype)
3150 offset = 0;
3151 /* If this is an SFmode aggregate, then we might have some left over
3152 that needs to go in GR regs. */
3153 else if (byte_size != offset)
3154 int_regs += offset / UNITS_PER_WORD;
3156 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
3158 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
3160 enum machine_mode gr_mode = DImode;
3161 unsigned int gr_size;
3163 /* If we have an odd 4 byte hunk because we ran out of FR regs,
3164 then this goes in a GR reg left adjusted/little endian, right
3165 adjusted/big endian. */
3166 /* ??? Currently this is handled wrong, because 4-byte hunks are
3167 always right adjusted/little endian. */
3168 if (offset & 0x4)
3169 gr_mode = SImode;
3170 /* If we have an even 4 byte hunk because the aggregate is a
3171 multiple of 4 bytes in size, then this goes in a GR reg right
3172 adjusted/little endian. */
3173 else if (byte_size - offset == 4)
3174 gr_mode = SImode;
3176 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3177 gen_rtx_REG (gr_mode, (basereg
3178 + int_regs)),
3179 GEN_INT (offset));
3181 gr_size = GET_MODE_SIZE (gr_mode);
3182 offset += gr_size;
3183 if (gr_size == UNITS_PER_WORD
3184 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
3185 int_regs++;
3186 else if (gr_size > UNITS_PER_WORD)
3187 int_regs += gr_size / UNITS_PER_WORD;
3189 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
3192 /* Integral and aggregates go in general registers. If we have run out of
3193 FR registers, then FP values must also go in general registers. This can
3194 happen when we have a SFmode HFA. */
3195 else if (mode == TFmode || mode == TCmode
3196 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
3198 int byte_size = ((mode == BLKmode)
3199 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3200 if (BYTES_BIG_ENDIAN
3201 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
3202 && byte_size < UNITS_PER_WORD
3203 && byte_size > 0)
3205 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
3206 gen_rtx_REG (DImode,
3207 (basereg + cum->words
3208 + offset)),
3209 const0_rtx);
3210 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
3212 else
3213 return gen_rtx_REG (mode, basereg + cum->words + offset);
3217 /* If there is a prototype, then FP values go in a FR register when
3218 named, and in a GR register when unnamed. */
3219 else if (cum->prototype)
3221 if (named)
3222 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
3223 /* In big-endian mode, an anonymous SFmode value must be represented
3224 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
3225 the value into the high half of the general register. */
3226 else if (BYTES_BIG_ENDIAN && mode == SFmode)
3227 return gen_rtx_PARALLEL (mode,
3228 gen_rtvec (1,
3229 gen_rtx_EXPR_LIST (VOIDmode,
3230 gen_rtx_REG (DImode, basereg + cum->words + offset),
3231 const0_rtx)));
3232 else
3233 return gen_rtx_REG (mode, basereg + cum->words + offset);
3235 /* If there is no prototype, then FP values go in both FR and GR
3236 registers. */
3237 else
3239 /* See comment above. */
3240 enum machine_mode inner_mode =
3241 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
3243 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
3244 gen_rtx_REG (mode, (FR_ARG_FIRST
3245 + cum->fp_regs)),
3246 const0_rtx);
3247 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
3248 gen_rtx_REG (inner_mode,
3249 (basereg + cum->words
3250 + offset)),
3251 const0_rtx);
3253 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
3257 /* Return number of bytes, at the beginning of the argument, that must be
3258 put in registers. 0 is the argument is entirely in registers or entirely
3259 in memory. */
3261 static int
3262 ia64_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3263 tree type, bool named ATTRIBUTE_UNUSED)
3265 int words = ia64_function_arg_words (type, mode);
3266 int offset = ia64_function_arg_offset (cum, type, words);
3268 /* If all argument slots are used, then it must go on the stack. */
3269 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
3270 return 0;
3272 /* It doesn't matter whether the argument goes in FR or GR regs. If
3273 it fits within the 8 argument slots, then it goes entirely in
3274 registers. If it extends past the last argument slot, then the rest
3275 goes on the stack. */
3277 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
3278 return 0;
3280 return (MAX_ARGUMENT_SLOTS - cum->words - offset) * UNITS_PER_WORD;
3283 /* Update CUM to point after this argument. This is patterned after
3284 ia64_function_arg. */
3286 void
3287 ia64_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3288 tree type, int named)
3290 int words = ia64_function_arg_words (type, mode);
3291 int offset = ia64_function_arg_offset (cum, type, words);
3292 enum machine_mode hfa_mode = VOIDmode;
3294 /* If all arg slots are already full, then there is nothing to do. */
3295 if (cum->words >= MAX_ARGUMENT_SLOTS)
3296 return;
3298 cum->words += words + offset;
3300 /* Check for and handle homogeneous FP aggregates. */
3301 if (type)
3302 hfa_mode = hfa_element_mode (type, 0);
3304 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
3305 and unprototyped hfas are passed specially. */
3306 if (hfa_mode != VOIDmode && (! cum->prototype || named))
3308 int fp_regs = cum->fp_regs;
3309 /* This is the original value of cum->words + offset. */
3310 int int_regs = cum->words - words;
3311 int hfa_size = GET_MODE_SIZE (hfa_mode);
3312 int byte_size;
3313 int args_byte_size;
3315 /* If prototyped, pass it in FR regs then GR regs.
3316 If not prototyped, pass it in both FR and GR regs.
3318 If this is an SFmode aggregate, then it is possible to run out of
3319 FR regs while GR regs are still left. In that case, we pass the
3320 remaining part in the GR regs. */
3322 /* Fill the FP regs. We do this always. We stop if we reach the end
3323 of the argument, the last FP register, or the last argument slot. */
3325 byte_size = ((mode == BLKmode)
3326 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3327 args_byte_size = int_regs * UNITS_PER_WORD;
3328 offset = 0;
3329 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
3330 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
3332 offset += hfa_size;
3333 args_byte_size += hfa_size;
3334 fp_regs++;
3337 cum->fp_regs = fp_regs;
3340 /* Integral and aggregates go in general registers. So do TFmode FP values.
3341 If we have run out of FR registers, then other FP values must also go in
3342 general registers. This can happen when we have a SFmode HFA. */
3343 else if (mode == TFmode || mode == TCmode
3344 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
3345 cum->int_regs = cum->words;
3347 /* If there is a prototype, then FP values go in a FR register when
3348 named, and in a GR register when unnamed. */
3349 else if (cum->prototype)
3351 if (! named)
3352 cum->int_regs = cum->words;
3353 else
3354 /* ??? Complex types should not reach here. */
3355 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
3357 /* If there is no prototype, then FP values go in both FR and GR
3358 registers. */
3359 else
3361 /* ??? Complex types should not reach here. */
3362 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
3363 cum->int_regs = cum->words;
3367 /* Arguments with alignment larger than 8 bytes start at the next even
3368 boundary. On ILP32 HPUX, TFmode arguments start on next even boundary
3369 even though their normal alignment is 8 bytes. See ia64_function_arg. */
3372 ia64_function_arg_boundary (enum machine_mode mode, tree type)
3375 if (mode == TFmode && TARGET_HPUX && TARGET_ILP32)
3376 return PARM_BOUNDARY * 2;
3378 if (type)
3380 if (TYPE_ALIGN (type) > PARM_BOUNDARY)
3381 return PARM_BOUNDARY * 2;
3382 else
3383 return PARM_BOUNDARY;
3386 if (GET_MODE_BITSIZE (mode) > PARM_BOUNDARY)
3387 return PARM_BOUNDARY * 2;
3388 else
3389 return PARM_BOUNDARY;
3392 /* Variable sized types are passed by reference. */
3393 /* ??? At present this is a GCC extension to the IA-64 ABI. */
3395 static bool
3396 ia64_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
3397 enum machine_mode mode ATTRIBUTE_UNUSED,
3398 tree type, bool named ATTRIBUTE_UNUSED)
3400 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
3403 /* True if it is OK to do sibling call optimization for the specified
3404 call expression EXP. DECL will be the called function, or NULL if
3405 this is an indirect call. */
3406 static bool
3407 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
3409 /* We can't perform a sibcall if the current function has the syscall_linkage
3410 attribute. */
3411 if (lookup_attribute ("syscall_linkage",
3412 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
3413 return false;
3415 /* We must always return with our current GP. This means we can
3416 only sibcall to functions defined in the current module. */
3417 return decl && (*targetm.binds_local_p) (decl);
3421 /* Implement va_arg. */
3423 static tree
3424 ia64_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
3426 /* Variable sized types are passed by reference. */
3427 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
3429 tree ptrtype = build_pointer_type (type);
3430 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
3431 return build_va_arg_indirect_ref (addr);
3434 /* Aggregate arguments with alignment larger than 8 bytes start at
3435 the next even boundary. Integer and floating point arguments
3436 do so if they are larger than 8 bytes, whether or not they are
3437 also aligned larger than 8 bytes. */
3438 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
3439 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
3441 tree t = build (PLUS_EXPR, TREE_TYPE (valist), valist,
3442 build_int_cst (NULL_TREE, 2 * UNITS_PER_WORD - 1));
3443 t = build (BIT_AND_EXPR, TREE_TYPE (t), t,
3444 build_int_cst (NULL_TREE, -2 * UNITS_PER_WORD));
3445 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
3446 gimplify_and_add (t, pre_p);
3449 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
3452 /* Return 1 if function return value returned in memory. Return 0 if it is
3453 in a register. */
3455 static bool
3456 ia64_return_in_memory (tree valtype, tree fntype ATTRIBUTE_UNUSED)
3458 enum machine_mode mode;
3459 enum machine_mode hfa_mode;
3460 HOST_WIDE_INT byte_size;
3462 mode = TYPE_MODE (valtype);
3463 byte_size = GET_MODE_SIZE (mode);
3464 if (mode == BLKmode)
3466 byte_size = int_size_in_bytes (valtype);
3467 if (byte_size < 0)
3468 return true;
3471 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
3473 hfa_mode = hfa_element_mode (valtype, 0);
3474 if (hfa_mode != VOIDmode)
3476 int hfa_size = GET_MODE_SIZE (hfa_mode);
3478 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
3479 return true;
3480 else
3481 return false;
3483 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
3484 return true;
3485 else
3486 return false;
3489 /* Return rtx for register that holds the function return value. */
3492 ia64_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
3494 enum machine_mode mode;
3495 enum machine_mode hfa_mode;
3497 mode = TYPE_MODE (valtype);
3498 hfa_mode = hfa_element_mode (valtype, 0);
3500 if (hfa_mode != VOIDmode)
3502 rtx loc[8];
3503 int i;
3504 int hfa_size;
3505 int byte_size;
3506 int offset;
3508 hfa_size = GET_MODE_SIZE (hfa_mode);
3509 byte_size = ((mode == BLKmode)
3510 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
3511 offset = 0;
3512 for (i = 0; offset < byte_size; i++)
3514 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3515 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
3516 GEN_INT (offset));
3517 offset += hfa_size;
3519 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
3521 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
3522 return gen_rtx_REG (mode, FR_ARG_FIRST);
3523 else
3525 if (BYTES_BIG_ENDIAN
3526 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
3528 rtx loc[8];
3529 int offset;
3530 int bytesize;
3531 int i;
3533 offset = 0;
3534 bytesize = int_size_in_bytes (valtype);
3535 for (i = 0; offset < bytesize; i++)
3537 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3538 gen_rtx_REG (DImode,
3539 GR_RET_FIRST + i),
3540 GEN_INT (offset));
3541 offset += UNITS_PER_WORD;
3543 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
3545 else
3546 return gen_rtx_REG (mode, GR_RET_FIRST);
3550 /* This is called from dwarf2out.c via ASM_OUTPUT_DWARF_DTPREL.
3551 We need to emit DTP-relative relocations. */
3553 void
3554 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
3556 if (size != 8)
3557 abort ();
3558 fputs ("\tdata8.ua\t@dtprel(", file);
3559 output_addr_const (file, x);
3560 fputs (")", file);
3563 /* Print a memory address as an operand to reference that memory location. */
3565 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
3566 also call this from ia64_print_operand for memory addresses. */
3568 void
3569 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
3570 rtx address ATTRIBUTE_UNUSED)
3574 /* Print an operand to an assembler instruction.
3575 C Swap and print a comparison operator.
3576 D Print an FP comparison operator.
3577 E Print 32 - constant, for SImode shifts as extract.
3578 e Print 64 - constant, for DImode rotates.
3579 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
3580 a floating point register emitted normally.
3581 I Invert a predicate register by adding 1.
3582 J Select the proper predicate register for a condition.
3583 j Select the inverse predicate register for a condition.
3584 O Append .acq for volatile load.
3585 P Postincrement of a MEM.
3586 Q Append .rel for volatile store.
3587 S Shift amount for shladd instruction.
3588 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
3589 for Intel assembler.
3590 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
3591 for Intel assembler.
3592 r Print register name, or constant 0 as r0. HP compatibility for
3593 Linux kernel. */
3594 void
3595 ia64_print_operand (FILE * file, rtx x, int code)
3597 const char *str;
3599 switch (code)
3601 case 0:
3602 /* Handled below. */
3603 break;
3605 case 'C':
3607 enum rtx_code c = swap_condition (GET_CODE (x));
3608 fputs (GET_RTX_NAME (c), file);
3609 return;
3612 case 'D':
3613 switch (GET_CODE (x))
3615 case NE:
3616 str = "neq";
3617 break;
3618 case UNORDERED:
3619 str = "unord";
3620 break;
3621 case ORDERED:
3622 str = "ord";
3623 break;
3624 default:
3625 str = GET_RTX_NAME (GET_CODE (x));
3626 break;
3628 fputs (str, file);
3629 return;
3631 case 'E':
3632 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
3633 return;
3635 case 'e':
3636 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
3637 return;
3639 case 'F':
3640 if (x == CONST0_RTX (GET_MODE (x)))
3641 str = reg_names [FR_REG (0)];
3642 else if (x == CONST1_RTX (GET_MODE (x)))
3643 str = reg_names [FR_REG (1)];
3644 else if (GET_CODE (x) == REG)
3645 str = reg_names [REGNO (x)];
3646 else
3647 abort ();
3648 fputs (str, file);
3649 return;
3651 case 'I':
3652 fputs (reg_names [REGNO (x) + 1], file);
3653 return;
3655 case 'J':
3656 case 'j':
3658 unsigned int regno = REGNO (XEXP (x, 0));
3659 if (GET_CODE (x) == EQ)
3660 regno += 1;
3661 if (code == 'j')
3662 regno ^= 1;
3663 fputs (reg_names [regno], file);
3665 return;
3667 case 'O':
3668 if (MEM_VOLATILE_P (x))
3669 fputs(".acq", file);
3670 return;
3672 case 'P':
3674 HOST_WIDE_INT value;
3676 switch (GET_CODE (XEXP (x, 0)))
3678 default:
3679 return;
3681 case POST_MODIFY:
3682 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
3683 if (GET_CODE (x) == CONST_INT)
3684 value = INTVAL (x);
3685 else if (GET_CODE (x) == REG)
3687 fprintf (file, ", %s", reg_names[REGNO (x)]);
3688 return;
3690 else
3691 abort ();
3692 break;
3694 case POST_INC:
3695 value = GET_MODE_SIZE (GET_MODE (x));
3696 break;
3698 case POST_DEC:
3699 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
3700 break;
3703 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
3704 return;
3707 case 'Q':
3708 if (MEM_VOLATILE_P (x))
3709 fputs(".rel", file);
3710 return;
3712 case 'S':
3713 fprintf (file, "%d", exact_log2 (INTVAL (x)));
3714 return;
3716 case 'T':
3717 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
3719 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
3720 return;
3722 break;
3724 case 'U':
3725 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
3727 const char *prefix = "0x";
3728 if (INTVAL (x) & 0x80000000)
3730 fprintf (file, "0xffffffff");
3731 prefix = "";
3733 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
3734 return;
3736 break;
3738 case 'r':
3739 /* If this operand is the constant zero, write it as register zero.
3740 Any register, zero, or CONST_INT value is OK here. */
3741 if (GET_CODE (x) == REG)
3742 fputs (reg_names[REGNO (x)], file);
3743 else if (x == CONST0_RTX (GET_MODE (x)))
3744 fputs ("r0", file);
3745 else if (GET_CODE (x) == CONST_INT)
3746 output_addr_const (file, x);
3747 else
3748 output_operand_lossage ("invalid %%r value");
3749 return;
3751 case '+':
3753 const char *which;
3755 /* For conditional branches, returns or calls, substitute
3756 sptk, dptk, dpnt, or spnt for %s. */
3757 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
3758 if (x)
3760 int pred_val = INTVAL (XEXP (x, 0));
3762 /* Guess top and bottom 10% statically predicted. */
3763 if (pred_val < REG_BR_PROB_BASE / 50)
3764 which = ".spnt";
3765 else if (pred_val < REG_BR_PROB_BASE / 2)
3766 which = ".dpnt";
3767 else if (pred_val < REG_BR_PROB_BASE / 100 * 98)
3768 which = ".dptk";
3769 else
3770 which = ".sptk";
3772 else if (GET_CODE (current_output_insn) == CALL_INSN)
3773 which = ".sptk";
3774 else
3775 which = ".dptk";
3777 fputs (which, file);
3778 return;
3781 case ',':
3782 x = current_insn_predicate;
3783 if (x)
3785 unsigned int regno = REGNO (XEXP (x, 0));
3786 if (GET_CODE (x) == EQ)
3787 regno += 1;
3788 fprintf (file, "(%s) ", reg_names [regno]);
3790 return;
3792 default:
3793 output_operand_lossage ("ia64_print_operand: unknown code");
3794 return;
3797 switch (GET_CODE (x))
3799 /* This happens for the spill/restore instructions. */
3800 case POST_INC:
3801 case POST_DEC:
3802 case POST_MODIFY:
3803 x = XEXP (x, 0);
3804 /* ... fall through ... */
3806 case REG:
3807 fputs (reg_names [REGNO (x)], file);
3808 break;
3810 case MEM:
3812 rtx addr = XEXP (x, 0);
3813 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
3814 addr = XEXP (addr, 0);
3815 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
3816 break;
3819 default:
3820 output_addr_const (file, x);
3821 break;
3824 return;
3827 /* Compute a (partial) cost for rtx X. Return true if the complete
3828 cost has been computed, and false if subexpressions should be
3829 scanned. In either case, *TOTAL contains the cost result. */
3830 /* ??? This is incomplete. */
3832 static bool
3833 ia64_rtx_costs (rtx x, int code, int outer_code, int *total)
3835 switch (code)
3837 case CONST_INT:
3838 switch (outer_code)
3840 case SET:
3841 *total = CONST_OK_FOR_J (INTVAL (x)) ? 0 : COSTS_N_INSNS (1);
3842 return true;
3843 case PLUS:
3844 if (CONST_OK_FOR_I (INTVAL (x)))
3845 *total = 0;
3846 else if (CONST_OK_FOR_J (INTVAL (x)))
3847 *total = 1;
3848 else
3849 *total = COSTS_N_INSNS (1);
3850 return true;
3851 default:
3852 if (CONST_OK_FOR_K (INTVAL (x)) || CONST_OK_FOR_L (INTVAL (x)))
3853 *total = 0;
3854 else
3855 *total = COSTS_N_INSNS (1);
3856 return true;
3859 case CONST_DOUBLE:
3860 *total = COSTS_N_INSNS (1);
3861 return true;
3863 case CONST:
3864 case SYMBOL_REF:
3865 case LABEL_REF:
3866 *total = COSTS_N_INSNS (3);
3867 return true;
3869 case MULT:
3870 /* For multiplies wider than HImode, we have to go to the FPU,
3871 which normally involves copies. Plus there's the latency
3872 of the multiply itself, and the latency of the instructions to
3873 transfer integer regs to FP regs. */
3874 /* ??? Check for FP mode. */
3875 if (GET_MODE_SIZE (GET_MODE (x)) > 2)
3876 *total = COSTS_N_INSNS (10);
3877 else
3878 *total = COSTS_N_INSNS (2);
3879 return true;
3881 case PLUS:
3882 case MINUS:
3883 case ASHIFT:
3884 case ASHIFTRT:
3885 case LSHIFTRT:
3886 *total = COSTS_N_INSNS (1);
3887 return true;
3889 case DIV:
3890 case UDIV:
3891 case MOD:
3892 case UMOD:
3893 /* We make divide expensive, so that divide-by-constant will be
3894 optimized to a multiply. */
3895 *total = COSTS_N_INSNS (60);
3896 return true;
3898 default:
3899 return false;
3903 /* Calculate the cost of moving data from a register in class FROM to
3904 one in class TO, using MODE. */
3907 ia64_register_move_cost (enum machine_mode mode, enum reg_class from,
3908 enum reg_class to)
3910 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
3911 if (to == ADDL_REGS)
3912 to = GR_REGS;
3913 if (from == ADDL_REGS)
3914 from = GR_REGS;
3916 /* All costs are symmetric, so reduce cases by putting the
3917 lower number class as the destination. */
3918 if (from < to)
3920 enum reg_class tmp = to;
3921 to = from, from = tmp;
3924 /* Moving from FR<->GR in XFmode must be more expensive than 2,
3925 so that we get secondary memory reloads. Between FR_REGS,
3926 we have to make this at least as expensive as MEMORY_MOVE_COST
3927 to avoid spectacularly poor register class preferencing. */
3928 if (mode == XFmode)
3930 if (to != GR_REGS || from != GR_REGS)
3931 return MEMORY_MOVE_COST (mode, to, 0);
3932 else
3933 return 3;
3936 switch (to)
3938 case PR_REGS:
3939 /* Moving between PR registers takes two insns. */
3940 if (from == PR_REGS)
3941 return 3;
3942 /* Moving between PR and anything but GR is impossible. */
3943 if (from != GR_REGS)
3944 return MEMORY_MOVE_COST (mode, to, 0);
3945 break;
3947 case BR_REGS:
3948 /* Moving between BR and anything but GR is impossible. */
3949 if (from != GR_REGS && from != GR_AND_BR_REGS)
3950 return MEMORY_MOVE_COST (mode, to, 0);
3951 break;
3953 case AR_I_REGS:
3954 case AR_M_REGS:
3955 /* Moving between AR and anything but GR is impossible. */
3956 if (from != GR_REGS)
3957 return MEMORY_MOVE_COST (mode, to, 0);
3958 break;
3960 case GR_REGS:
3961 case FR_REGS:
3962 case GR_AND_FR_REGS:
3963 case GR_AND_BR_REGS:
3964 case ALL_REGS:
3965 break;
3967 default:
3968 abort ();
3971 return 2;
3974 /* This function returns the register class required for a secondary
3975 register when copying between one of the registers in CLASS, and X,
3976 using MODE. A return value of NO_REGS means that no secondary register
3977 is required. */
3979 enum reg_class
3980 ia64_secondary_reload_class (enum reg_class class,
3981 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
3983 int regno = -1;
3985 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
3986 regno = true_regnum (x);
3988 switch (class)
3990 case BR_REGS:
3991 case AR_M_REGS:
3992 case AR_I_REGS:
3993 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
3994 interaction. We end up with two pseudos with overlapping lifetimes
3995 both of which are equiv to the same constant, and both which need
3996 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
3997 changes depending on the path length, which means the qty_first_reg
3998 check in make_regs_eqv can give different answers at different times.
3999 At some point I'll probably need a reload_indi pattern to handle
4000 this.
4002 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
4003 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
4004 non-general registers for good measure. */
4005 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
4006 return GR_REGS;
4008 /* This is needed if a pseudo used as a call_operand gets spilled to a
4009 stack slot. */
4010 if (GET_CODE (x) == MEM)
4011 return GR_REGS;
4012 break;
4014 case FR_REGS:
4015 /* Need to go through general registers to get to other class regs. */
4016 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
4017 return GR_REGS;
4019 /* This can happen when a paradoxical subreg is an operand to the
4020 muldi3 pattern. */
4021 /* ??? This shouldn't be necessary after instruction scheduling is
4022 enabled, because paradoxical subregs are not accepted by
4023 register_operand when INSN_SCHEDULING is defined. Or alternatively,
4024 stop the paradoxical subreg stupidity in the *_operand functions
4025 in recog.c. */
4026 if (GET_CODE (x) == MEM
4027 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
4028 || GET_MODE (x) == QImode))
4029 return GR_REGS;
4031 /* This can happen because of the ior/and/etc patterns that accept FP
4032 registers as operands. If the third operand is a constant, then it
4033 needs to be reloaded into a FP register. */
4034 if (GET_CODE (x) == CONST_INT)
4035 return GR_REGS;
4037 /* This can happen because of register elimination in a muldi3 insn.
4038 E.g. `26107 * (unsigned long)&u'. */
4039 if (GET_CODE (x) == PLUS)
4040 return GR_REGS;
4041 break;
4043 case PR_REGS:
4044 /* ??? This happens if we cse/gcse a BImode value across a call,
4045 and the function has a nonlocal goto. This is because global
4046 does not allocate call crossing pseudos to hard registers when
4047 current_function_has_nonlocal_goto is true. This is relatively
4048 common for C++ programs that use exceptions. To reproduce,
4049 return NO_REGS and compile libstdc++. */
4050 if (GET_CODE (x) == MEM)
4051 return GR_REGS;
4053 /* This can happen when we take a BImode subreg of a DImode value,
4054 and that DImode value winds up in some non-GR register. */
4055 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
4056 return GR_REGS;
4057 break;
4059 default:
4060 break;
4063 return NO_REGS;
4067 /* Emit text to declare externally defined variables and functions, because
4068 the Intel assembler does not support undefined externals. */
4070 void
4071 ia64_asm_output_external (FILE *file, tree decl, const char *name)
4073 int save_referenced;
4075 /* GNU as does not need anything here, but the HP linker does need
4076 something for external functions. */
4078 if (TARGET_GNU_AS
4079 && (!TARGET_HPUX_LD
4080 || TREE_CODE (decl) != FUNCTION_DECL
4081 || strstr (name, "__builtin_") == name))
4082 return;
4084 /* ??? The Intel assembler creates a reference that needs to be satisfied by
4085 the linker when we do this, so we need to be careful not to do this for
4086 builtin functions which have no library equivalent. Unfortunately, we
4087 can't tell here whether or not a function will actually be called by
4088 expand_expr, so we pull in library functions even if we may not need
4089 them later. */
4090 if (! strcmp (name, "__builtin_next_arg")
4091 || ! strcmp (name, "alloca")
4092 || ! strcmp (name, "__builtin_constant_p")
4093 || ! strcmp (name, "__builtin_args_info"))
4094 return;
4096 if (TARGET_HPUX_LD)
4097 ia64_hpux_add_extern_decl (decl);
4098 else
4100 /* assemble_name will set TREE_SYMBOL_REFERENCED, so we must save and
4101 restore it. */
4102 save_referenced = TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl));
4103 if (TREE_CODE (decl) == FUNCTION_DECL)
4104 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
4105 (*targetm.asm_out.globalize_label) (file, name);
4106 TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)) = save_referenced;
4110 /* Parse the -mfixed-range= option string. */
4112 static void
4113 fix_range (const char *const_str)
4115 int i, first, last;
4116 char *str, *dash, *comma;
4118 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
4119 REG2 are either register names or register numbers. The effect
4120 of this option is to mark the registers in the range from REG1 to
4121 REG2 as ``fixed'' so they won't be used by the compiler. This is
4122 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
4124 i = strlen (const_str);
4125 str = (char *) alloca (i + 1);
4126 memcpy (str, const_str, i + 1);
4128 while (1)
4130 dash = strchr (str, '-');
4131 if (!dash)
4133 warning ("value of -mfixed-range must have form REG1-REG2");
4134 return;
4136 *dash = '\0';
4138 comma = strchr (dash + 1, ',');
4139 if (comma)
4140 *comma = '\0';
4142 first = decode_reg_name (str);
4143 if (first < 0)
4145 warning ("unknown register name: %s", str);
4146 return;
4149 last = decode_reg_name (dash + 1);
4150 if (last < 0)
4152 warning ("unknown register name: %s", dash + 1);
4153 return;
4156 *dash = '-';
4158 if (first > last)
4160 warning ("%s-%s is an empty range", str, dash + 1);
4161 return;
4164 for (i = first; i <= last; ++i)
4165 fixed_regs[i] = call_used_regs[i] = 1;
4167 if (!comma)
4168 break;
4170 *comma = ',';
4171 str = comma + 1;
4175 static struct machine_function *
4176 ia64_init_machine_status (void)
4178 return ggc_alloc_cleared (sizeof (struct machine_function));
4181 /* Handle TARGET_OPTIONS switches. */
4183 void
4184 ia64_override_options (void)
4186 static struct pta
4188 const char *const name; /* processor name or nickname. */
4189 const enum processor_type processor;
4191 const processor_alias_table[] =
4193 {"itanium", PROCESSOR_ITANIUM},
4194 {"itanium1", PROCESSOR_ITANIUM},
4195 {"merced", PROCESSOR_ITANIUM},
4196 {"itanium2", PROCESSOR_ITANIUM2},
4197 {"mckinley", PROCESSOR_ITANIUM2},
4200 int const pta_size = ARRAY_SIZE (processor_alias_table);
4201 int i;
4203 if (TARGET_AUTO_PIC)
4204 target_flags |= MASK_CONST_GP;
4206 if (TARGET_INLINE_FLOAT_DIV_LAT && TARGET_INLINE_FLOAT_DIV_THR)
4208 if ((target_flags_explicit & MASK_INLINE_FLOAT_DIV_LAT)
4209 && (target_flags_explicit & MASK_INLINE_FLOAT_DIV_THR))
4211 warning ("cannot optimize floating point division for both latency and throughput");
4212 target_flags &= ~MASK_INLINE_FLOAT_DIV_THR;
4214 else
4216 if (target_flags_explicit & MASK_INLINE_FLOAT_DIV_THR)
4217 target_flags &= ~MASK_INLINE_FLOAT_DIV_LAT;
4218 else
4219 target_flags &= ~MASK_INLINE_FLOAT_DIV_THR;
4223 if (TARGET_INLINE_INT_DIV_LAT && TARGET_INLINE_INT_DIV_THR)
4225 if ((target_flags_explicit & MASK_INLINE_INT_DIV_LAT)
4226 && (target_flags_explicit & MASK_INLINE_INT_DIV_THR))
4228 warning ("cannot optimize integer division for both latency and throughput");
4229 target_flags &= ~MASK_INLINE_INT_DIV_THR;
4231 else
4233 if (target_flags_explicit & MASK_INLINE_INT_DIV_THR)
4234 target_flags &= ~MASK_INLINE_INT_DIV_LAT;
4235 else
4236 target_flags &= ~MASK_INLINE_INT_DIV_THR;
4240 if (TARGET_INLINE_SQRT_LAT && TARGET_INLINE_SQRT_THR)
4242 if ((target_flags_explicit & MASK_INLINE_SQRT_LAT)
4243 && (target_flags_explicit & MASK_INLINE_SQRT_THR))
4245 warning ("cannot optimize square root for both latency and throughput");
4246 target_flags &= ~MASK_INLINE_SQRT_THR;
4248 else
4250 if (target_flags_explicit & MASK_INLINE_SQRT_THR)
4251 target_flags &= ~MASK_INLINE_SQRT_LAT;
4252 else
4253 target_flags &= ~MASK_INLINE_SQRT_THR;
4257 if (TARGET_INLINE_SQRT_LAT)
4259 warning ("not yet implemented: latency-optimized inline square root");
4260 target_flags &= ~MASK_INLINE_SQRT_LAT;
4263 if (ia64_fixed_range_string)
4264 fix_range (ia64_fixed_range_string);
4266 if (ia64_tls_size_string)
4268 char *end;
4269 unsigned long tmp = strtoul (ia64_tls_size_string, &end, 10);
4270 if (*end || (tmp != 14 && tmp != 22 && tmp != 64))
4271 error ("bad value (%s) for -mtls-size= switch", ia64_tls_size_string);
4272 else
4273 ia64_tls_size = tmp;
4276 if (!ia64_tune_string)
4277 ia64_tune_string = "itanium2";
4279 for (i = 0; i < pta_size; i++)
4280 if (! strcmp (ia64_tune_string, processor_alias_table[i].name))
4282 ia64_tune = processor_alias_table[i].processor;
4283 break;
4286 if (i == pta_size)
4287 error ("bad value (%s) for -tune= switch", ia64_tune_string);
4289 ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
4290 flag_schedule_insns_after_reload = 0;
4292 /* Variable tracking should be run after all optimizations which change order
4293 of insns. It also needs a valid CFG. */
4294 ia64_flag_var_tracking = flag_var_tracking;
4295 flag_var_tracking = 0;
4297 ia64_section_threshold = g_switch_set ? g_switch_value : IA64_DEFAULT_GVALUE;
4299 init_machine_status = ia64_init_machine_status;
4302 static enum attr_itanium_class ia64_safe_itanium_class (rtx);
4303 static enum attr_type ia64_safe_type (rtx);
4305 static enum attr_itanium_class
4306 ia64_safe_itanium_class (rtx insn)
4308 if (recog_memoized (insn) >= 0)
4309 return get_attr_itanium_class (insn);
4310 else
4311 return ITANIUM_CLASS_UNKNOWN;
4314 static enum attr_type
4315 ia64_safe_type (rtx insn)
4317 if (recog_memoized (insn) >= 0)
4318 return get_attr_type (insn);
4319 else
4320 return TYPE_UNKNOWN;
4323 /* The following collection of routines emit instruction group stop bits as
4324 necessary to avoid dependencies. */
4326 /* Need to track some additional registers as far as serialization is
4327 concerned so we can properly handle br.call and br.ret. We could
4328 make these registers visible to gcc, but since these registers are
4329 never explicitly used in gcc generated code, it seems wasteful to
4330 do so (plus it would make the call and return patterns needlessly
4331 complex). */
4332 #define REG_RP (BR_REG (0))
4333 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
4334 /* This is used for volatile asms which may require a stop bit immediately
4335 before and after them. */
4336 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
4337 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
4338 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
4340 /* For each register, we keep track of how it has been written in the
4341 current instruction group.
4343 If a register is written unconditionally (no qualifying predicate),
4344 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
4346 If a register is written if its qualifying predicate P is true, we
4347 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
4348 may be written again by the complement of P (P^1) and when this happens,
4349 WRITE_COUNT gets set to 2.
4351 The result of this is that whenever an insn attempts to write a register
4352 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
4354 If a predicate register is written by a floating-point insn, we set
4355 WRITTEN_BY_FP to true.
4357 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
4358 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
4360 struct reg_write_state
4362 unsigned int write_count : 2;
4363 unsigned int first_pred : 16;
4364 unsigned int written_by_fp : 1;
4365 unsigned int written_by_and : 1;
4366 unsigned int written_by_or : 1;
4369 /* Cumulative info for the current instruction group. */
4370 struct reg_write_state rws_sum[NUM_REGS];
4371 /* Info for the current instruction. This gets copied to rws_sum after a
4372 stop bit is emitted. */
4373 struct reg_write_state rws_insn[NUM_REGS];
4375 /* Indicates whether this is the first instruction after a stop bit,
4376 in which case we don't need another stop bit. Without this, we hit
4377 the abort in ia64_variable_issue when scheduling an alloc. */
4378 static int first_instruction;
4380 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
4381 RTL for one instruction. */
4382 struct reg_flags
4384 unsigned int is_write : 1; /* Is register being written? */
4385 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
4386 unsigned int is_branch : 1; /* Is register used as part of a branch? */
4387 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
4388 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
4389 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
4392 static void rws_update (struct reg_write_state *, int, struct reg_flags, int);
4393 static int rws_access_regno (int, struct reg_flags, int);
4394 static int rws_access_reg (rtx, struct reg_flags, int);
4395 static void update_set_flags (rtx, struct reg_flags *, int *, rtx *);
4396 static int set_src_needs_barrier (rtx, struct reg_flags, int, rtx);
4397 static int rtx_needs_barrier (rtx, struct reg_flags, int);
4398 static void init_insn_group_barriers (void);
4399 static int group_barrier_needed_p (rtx);
4400 static int safe_group_barrier_needed_p (rtx);
4402 /* Update *RWS for REGNO, which is being written by the current instruction,
4403 with predicate PRED, and associated register flags in FLAGS. */
4405 static void
4406 rws_update (struct reg_write_state *rws, int regno, struct reg_flags flags, int pred)
4408 if (pred)
4409 rws[regno].write_count++;
4410 else
4411 rws[regno].write_count = 2;
4412 rws[regno].written_by_fp |= flags.is_fp;
4413 /* ??? Not tracking and/or across differing predicates. */
4414 rws[regno].written_by_and = flags.is_and;
4415 rws[regno].written_by_or = flags.is_or;
4416 rws[regno].first_pred = pred;
4419 /* Handle an access to register REGNO of type FLAGS using predicate register
4420 PRED. Update rws_insn and rws_sum arrays. Return 1 if this access creates
4421 a dependency with an earlier instruction in the same group. */
4423 static int
4424 rws_access_regno (int regno, struct reg_flags flags, int pred)
4426 int need_barrier = 0;
4428 if (regno >= NUM_REGS)
4429 abort ();
4431 if (! PR_REGNO_P (regno))
4432 flags.is_and = flags.is_or = 0;
4434 if (flags.is_write)
4436 int write_count;
4438 /* One insn writes same reg multiple times? */
4439 if (rws_insn[regno].write_count > 0)
4440 abort ();
4442 /* Update info for current instruction. */
4443 rws_update (rws_insn, regno, flags, pred);
4444 write_count = rws_sum[regno].write_count;
4446 switch (write_count)
4448 case 0:
4449 /* The register has not been written yet. */
4450 rws_update (rws_sum, regno, flags, pred);
4451 break;
4453 case 1:
4454 /* The register has been written via a predicate. If this is
4455 not a complementary predicate, then we need a barrier. */
4456 /* ??? This assumes that P and P+1 are always complementary
4457 predicates for P even. */
4458 if (flags.is_and && rws_sum[regno].written_by_and)
4460 else if (flags.is_or && rws_sum[regno].written_by_or)
4462 else if ((rws_sum[regno].first_pred ^ 1) != pred)
4463 need_barrier = 1;
4464 rws_update (rws_sum, regno, flags, pred);
4465 break;
4467 case 2:
4468 /* The register has been unconditionally written already. We
4469 need a barrier. */
4470 if (flags.is_and && rws_sum[regno].written_by_and)
4472 else if (flags.is_or && rws_sum[regno].written_by_or)
4474 else
4475 need_barrier = 1;
4476 rws_sum[regno].written_by_and = flags.is_and;
4477 rws_sum[regno].written_by_or = flags.is_or;
4478 break;
4480 default:
4481 abort ();
4484 else
4486 if (flags.is_branch)
4488 /* Branches have several RAW exceptions that allow to avoid
4489 barriers. */
4491 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
4492 /* RAW dependencies on branch regs are permissible as long
4493 as the writer is a non-branch instruction. Since we
4494 never generate code that uses a branch register written
4495 by a branch instruction, handling this case is
4496 easy. */
4497 return 0;
4499 if (REGNO_REG_CLASS (regno) == PR_REGS
4500 && ! rws_sum[regno].written_by_fp)
4501 /* The predicates of a branch are available within the
4502 same insn group as long as the predicate was written by
4503 something other than a floating-point instruction. */
4504 return 0;
4507 if (flags.is_and && rws_sum[regno].written_by_and)
4508 return 0;
4509 if (flags.is_or && rws_sum[regno].written_by_or)
4510 return 0;
4512 switch (rws_sum[regno].write_count)
4514 case 0:
4515 /* The register has not been written yet. */
4516 break;
4518 case 1:
4519 /* The register has been written via a predicate. If this is
4520 not a complementary predicate, then we need a barrier. */
4521 /* ??? This assumes that P and P+1 are always complementary
4522 predicates for P even. */
4523 if ((rws_sum[regno].first_pred ^ 1) != pred)
4524 need_barrier = 1;
4525 break;
4527 case 2:
4528 /* The register has been unconditionally written already. We
4529 need a barrier. */
4530 need_barrier = 1;
4531 break;
4533 default:
4534 abort ();
4538 return need_barrier;
4541 static int
4542 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
4544 int regno = REGNO (reg);
4545 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
4547 if (n == 1)
4548 return rws_access_regno (regno, flags, pred);
4549 else
4551 int need_barrier = 0;
4552 while (--n >= 0)
4553 need_barrier |= rws_access_regno (regno + n, flags, pred);
4554 return need_barrier;
4558 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
4559 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
4561 static void
4562 update_set_flags (rtx x, struct reg_flags *pflags, int *ppred, rtx *pcond)
4564 rtx src = SET_SRC (x);
4566 *pcond = 0;
4568 switch (GET_CODE (src))
4570 case CALL:
4571 return;
4573 case IF_THEN_ELSE:
4574 if (SET_DEST (x) == pc_rtx)
4575 /* X is a conditional branch. */
4576 return;
4577 else
4579 int is_complemented = 0;
4581 /* X is a conditional move. */
4582 rtx cond = XEXP (src, 0);
4583 if (GET_CODE (cond) == EQ)
4584 is_complemented = 1;
4585 cond = XEXP (cond, 0);
4586 if (GET_CODE (cond) != REG
4587 && REGNO_REG_CLASS (REGNO (cond)) != PR_REGS)
4588 abort ();
4589 *pcond = cond;
4590 if (XEXP (src, 1) == SET_DEST (x)
4591 || XEXP (src, 2) == SET_DEST (x))
4593 /* X is a conditional move that conditionally writes the
4594 destination. */
4596 /* We need another complement in this case. */
4597 if (XEXP (src, 1) == SET_DEST (x))
4598 is_complemented = ! is_complemented;
4600 *ppred = REGNO (cond);
4601 if (is_complemented)
4602 ++*ppred;
4605 /* ??? If this is a conditional write to the dest, then this
4606 instruction does not actually read one source. This probably
4607 doesn't matter, because that source is also the dest. */
4608 /* ??? Multiple writes to predicate registers are allowed
4609 if they are all AND type compares, or if they are all OR
4610 type compares. We do not generate such instructions
4611 currently. */
4613 /* ... fall through ... */
4615 default:
4616 if (COMPARISON_P (src)
4617 && GET_MODE_CLASS (GET_MODE (XEXP (src, 0))) == MODE_FLOAT)
4618 /* Set pflags->is_fp to 1 so that we know we're dealing
4619 with a floating point comparison when processing the
4620 destination of the SET. */
4621 pflags->is_fp = 1;
4623 /* Discover if this is a parallel comparison. We only handle
4624 and.orcm and or.andcm at present, since we must retain a
4625 strict inverse on the predicate pair. */
4626 else if (GET_CODE (src) == AND)
4627 pflags->is_and = 1;
4628 else if (GET_CODE (src) == IOR)
4629 pflags->is_or = 1;
4631 break;
4635 /* Subroutine of rtx_needs_barrier; this function determines whether the
4636 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
4637 are as in rtx_needs_barrier. COND is an rtx that holds the condition
4638 for this insn. */
4640 static int
4641 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred, rtx cond)
4643 int need_barrier = 0;
4644 rtx dst;
4645 rtx src = SET_SRC (x);
4647 if (GET_CODE (src) == CALL)
4648 /* We don't need to worry about the result registers that
4649 get written by subroutine call. */
4650 return rtx_needs_barrier (src, flags, pred);
4651 else if (SET_DEST (x) == pc_rtx)
4653 /* X is a conditional branch. */
4654 /* ??? This seems redundant, as the caller sets this bit for
4655 all JUMP_INSNs. */
4656 flags.is_branch = 1;
4657 return rtx_needs_barrier (src, flags, pred);
4660 need_barrier = rtx_needs_barrier (src, flags, pred);
4662 /* This instruction unconditionally uses a predicate register. */
4663 if (cond)
4664 need_barrier |= rws_access_reg (cond, flags, 0);
4666 dst = SET_DEST (x);
4667 if (GET_CODE (dst) == ZERO_EXTRACT)
4669 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
4670 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
4671 dst = XEXP (dst, 0);
4673 return need_barrier;
4676 /* Handle an access to rtx X of type FLAGS using predicate register
4677 PRED. Return 1 if this access creates a dependency with an earlier
4678 instruction in the same group. */
4680 static int
4681 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
4683 int i, j;
4684 int is_complemented = 0;
4685 int need_barrier = 0;
4686 const char *format_ptr;
4687 struct reg_flags new_flags;
4688 rtx cond = 0;
4690 if (! x)
4691 return 0;
4693 new_flags = flags;
4695 switch (GET_CODE (x))
4697 case SET:
4698 update_set_flags (x, &new_flags, &pred, &cond);
4699 need_barrier = set_src_needs_barrier (x, new_flags, pred, cond);
4700 if (GET_CODE (SET_SRC (x)) != CALL)
4702 new_flags.is_write = 1;
4703 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
4705 break;
4707 case CALL:
4708 new_flags.is_write = 0;
4709 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
4711 /* Avoid multiple register writes, in case this is a pattern with
4712 multiple CALL rtx. This avoids an abort in rws_access_reg. */
4713 if (! flags.is_sibcall && ! rws_insn[REG_AR_CFM].write_count)
4715 new_flags.is_write = 1;
4716 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
4717 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
4718 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
4720 break;
4722 case COND_EXEC:
4723 /* X is a predicated instruction. */
4725 cond = COND_EXEC_TEST (x);
4726 if (pred)
4727 abort ();
4728 need_barrier = rtx_needs_barrier (cond, flags, 0);
4730 if (GET_CODE (cond) == EQ)
4731 is_complemented = 1;
4732 cond = XEXP (cond, 0);
4733 if (GET_CODE (cond) != REG
4734 && REGNO_REG_CLASS (REGNO (cond)) != PR_REGS)
4735 abort ();
4736 pred = REGNO (cond);
4737 if (is_complemented)
4738 ++pred;
4740 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
4741 return need_barrier;
4743 case CLOBBER:
4744 case USE:
4745 /* Clobber & use are for earlier compiler-phases only. */
4746 break;
4748 case ASM_OPERANDS:
4749 case ASM_INPUT:
4750 /* We always emit stop bits for traditional asms. We emit stop bits
4751 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
4752 if (GET_CODE (x) != ASM_OPERANDS
4753 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
4755 /* Avoid writing the register multiple times if we have multiple
4756 asm outputs. This avoids an abort in rws_access_reg. */
4757 if (! rws_insn[REG_VOLATILE].write_count)
4759 new_flags.is_write = 1;
4760 rws_access_regno (REG_VOLATILE, new_flags, pred);
4762 return 1;
4765 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
4766 We cannot just fall through here since then we would be confused
4767 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
4768 traditional asms unlike their normal usage. */
4770 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
4771 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
4772 need_barrier = 1;
4773 break;
4775 case PARALLEL:
4776 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
4778 rtx pat = XVECEXP (x, 0, i);
4779 if (GET_CODE (pat) == SET)
4781 update_set_flags (pat, &new_flags, &pred, &cond);
4782 need_barrier |= set_src_needs_barrier (pat, new_flags, pred, cond);
4784 else if (GET_CODE (pat) == USE
4785 || GET_CODE (pat) == CALL
4786 || GET_CODE (pat) == ASM_OPERANDS)
4787 need_barrier |= rtx_needs_barrier (pat, flags, pred);
4788 else if (GET_CODE (pat) != CLOBBER && GET_CODE (pat) != RETURN)
4789 abort ();
4791 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
4793 rtx pat = XVECEXP (x, 0, i);
4794 if (GET_CODE (pat) == SET)
4796 if (GET_CODE (SET_SRC (pat)) != CALL)
4798 new_flags.is_write = 1;
4799 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
4800 pred);
4803 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
4804 need_barrier |= rtx_needs_barrier (pat, flags, pred);
4806 break;
4808 case SUBREG:
4809 x = SUBREG_REG (x);
4810 /* FALLTHRU */
4811 case REG:
4812 if (REGNO (x) == AR_UNAT_REGNUM)
4814 for (i = 0; i < 64; ++i)
4815 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
4817 else
4818 need_barrier = rws_access_reg (x, flags, pred);
4819 break;
4821 case MEM:
4822 /* Find the regs used in memory address computation. */
4823 new_flags.is_write = 0;
4824 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
4825 break;
4827 case CONST_INT: case CONST_DOUBLE:
4828 case SYMBOL_REF: case LABEL_REF: case CONST:
4829 break;
4831 /* Operators with side-effects. */
4832 case POST_INC: case POST_DEC:
4833 if (GET_CODE (XEXP (x, 0)) != REG)
4834 abort ();
4836 new_flags.is_write = 0;
4837 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
4838 new_flags.is_write = 1;
4839 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
4840 break;
4842 case POST_MODIFY:
4843 if (GET_CODE (XEXP (x, 0)) != REG)
4844 abort ();
4846 new_flags.is_write = 0;
4847 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
4848 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
4849 new_flags.is_write = 1;
4850 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
4851 break;
4853 /* Handle common unary and binary ops for efficiency. */
4854 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
4855 case MOD: case UDIV: case UMOD: case AND: case IOR:
4856 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
4857 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
4858 case NE: case EQ: case GE: case GT: case LE:
4859 case LT: case GEU: case GTU: case LEU: case LTU:
4860 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
4861 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
4862 break;
4864 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
4865 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
4866 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
4867 case SQRT: case FFS: case POPCOUNT:
4868 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
4869 break;
4871 case UNSPEC:
4872 switch (XINT (x, 1))
4874 case UNSPEC_LTOFF_DTPMOD:
4875 case UNSPEC_LTOFF_DTPREL:
4876 case UNSPEC_DTPREL:
4877 case UNSPEC_LTOFF_TPREL:
4878 case UNSPEC_TPREL:
4879 case UNSPEC_PRED_REL_MUTEX:
4880 case UNSPEC_PIC_CALL:
4881 case UNSPEC_MF:
4882 case UNSPEC_FETCHADD_ACQ:
4883 case UNSPEC_BSP_VALUE:
4884 case UNSPEC_FLUSHRS:
4885 case UNSPEC_BUNDLE_SELECTOR:
4886 break;
4888 case UNSPEC_GR_SPILL:
4889 case UNSPEC_GR_RESTORE:
4891 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
4892 HOST_WIDE_INT bit = (offset >> 3) & 63;
4894 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
4895 new_flags.is_write = (XINT (x, 1) == UNSPEC_GR_SPILL);
4896 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
4897 new_flags, pred);
4898 break;
4901 case UNSPEC_FR_SPILL:
4902 case UNSPEC_FR_RESTORE:
4903 case UNSPEC_GETF_EXP:
4904 case UNSPEC_SETF_EXP:
4905 case UNSPEC_ADDP4:
4906 case UNSPEC_FR_SQRT_RECIP_APPROX:
4907 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
4908 break;
4910 case UNSPEC_FR_RECIP_APPROX:
4911 case UNSPEC_SHRP:
4912 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
4913 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
4914 break;
4916 case UNSPEC_CMPXCHG_ACQ:
4917 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
4918 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
4919 break;
4921 default:
4922 abort ();
4924 break;
4926 case UNSPEC_VOLATILE:
4927 switch (XINT (x, 1))
4929 case UNSPECV_ALLOC:
4930 /* Alloc must always be the first instruction of a group.
4931 We force this by always returning true. */
4932 /* ??? We might get better scheduling if we explicitly check for
4933 input/local/output register dependencies, and modify the
4934 scheduler so that alloc is always reordered to the start of
4935 the current group. We could then eliminate all of the
4936 first_instruction code. */
4937 rws_access_regno (AR_PFS_REGNUM, flags, pred);
4939 new_flags.is_write = 1;
4940 rws_access_regno (REG_AR_CFM, new_flags, pred);
4941 return 1;
4943 case UNSPECV_SET_BSP:
4944 need_barrier = 1;
4945 break;
4947 case UNSPECV_BLOCKAGE:
4948 case UNSPECV_INSN_GROUP_BARRIER:
4949 case UNSPECV_BREAK:
4950 case UNSPECV_PSAC_ALL:
4951 case UNSPECV_PSAC_NORMAL:
4952 return 0;
4954 default:
4955 abort ();
4957 break;
4959 case RETURN:
4960 new_flags.is_write = 0;
4961 need_barrier = rws_access_regno (REG_RP, flags, pred);
4962 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
4964 new_flags.is_write = 1;
4965 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
4966 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
4967 break;
4969 default:
4970 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
4971 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
4972 switch (format_ptr[i])
4974 case '0': /* unused field */
4975 case 'i': /* integer */
4976 case 'n': /* note */
4977 case 'w': /* wide integer */
4978 case 's': /* pointer to string */
4979 case 'S': /* optional pointer to string */
4980 break;
4982 case 'e':
4983 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
4984 need_barrier = 1;
4985 break;
4987 case 'E':
4988 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
4989 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
4990 need_barrier = 1;
4991 break;
4993 default:
4994 abort ();
4996 break;
4998 return need_barrier;
5001 /* Clear out the state for group_barrier_needed_p at the start of a
5002 sequence of insns. */
5004 static void
5005 init_insn_group_barriers (void)
5007 memset (rws_sum, 0, sizeof (rws_sum));
5008 first_instruction = 1;
5011 /* Given the current state, recorded by previous calls to this function,
5012 determine whether a group barrier (a stop bit) is necessary before INSN.
5013 Return nonzero if so. */
5015 static int
5016 group_barrier_needed_p (rtx insn)
5018 rtx pat;
5019 int need_barrier = 0;
5020 struct reg_flags flags;
5022 memset (&flags, 0, sizeof (flags));
5023 switch (GET_CODE (insn))
5025 case NOTE:
5026 break;
5028 case BARRIER:
5029 /* A barrier doesn't imply an instruction group boundary. */
5030 break;
5032 case CODE_LABEL:
5033 memset (rws_insn, 0, sizeof (rws_insn));
5034 return 1;
5036 case CALL_INSN:
5037 flags.is_branch = 1;
5038 flags.is_sibcall = SIBLING_CALL_P (insn);
5039 memset (rws_insn, 0, sizeof (rws_insn));
5041 /* Don't bundle a call following another call. */
5042 if ((pat = prev_active_insn (insn))
5043 && GET_CODE (pat) == CALL_INSN)
5045 need_barrier = 1;
5046 break;
5049 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
5050 break;
5052 case JUMP_INSN:
5053 flags.is_branch = 1;
5055 /* Don't bundle a jump following a call. */
5056 if ((pat = prev_active_insn (insn))
5057 && GET_CODE (pat) == CALL_INSN)
5059 need_barrier = 1;
5060 break;
5062 /* FALLTHRU */
5064 case INSN:
5065 if (GET_CODE (PATTERN (insn)) == USE
5066 || GET_CODE (PATTERN (insn)) == CLOBBER)
5067 /* Don't care about USE and CLOBBER "insns"---those are used to
5068 indicate to the optimizer that it shouldn't get rid of
5069 certain operations. */
5070 break;
5072 pat = PATTERN (insn);
5074 /* Ug. Hack hacks hacked elsewhere. */
5075 switch (recog_memoized (insn))
5077 /* We play dependency tricks with the epilogue in order
5078 to get proper schedules. Undo this for dv analysis. */
5079 case CODE_FOR_epilogue_deallocate_stack:
5080 case CODE_FOR_prologue_allocate_stack:
5081 pat = XVECEXP (pat, 0, 0);
5082 break;
5084 /* The pattern we use for br.cloop confuses the code above.
5085 The second element of the vector is representative. */
5086 case CODE_FOR_doloop_end_internal:
5087 pat = XVECEXP (pat, 0, 1);
5088 break;
5090 /* Doesn't generate code. */
5091 case CODE_FOR_pred_rel_mutex:
5092 case CODE_FOR_prologue_use:
5093 return 0;
5095 default:
5096 break;
5099 memset (rws_insn, 0, sizeof (rws_insn));
5100 need_barrier = rtx_needs_barrier (pat, flags, 0);
5102 /* Check to see if the previous instruction was a volatile
5103 asm. */
5104 if (! need_barrier)
5105 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
5106 break;
5108 default:
5109 abort ();
5112 if (first_instruction && INSN_P (insn)
5113 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
5114 && GET_CODE (PATTERN (insn)) != USE
5115 && GET_CODE (PATTERN (insn)) != CLOBBER)
5117 need_barrier = 0;
5118 first_instruction = 0;
5121 return need_barrier;
5124 /* Like group_barrier_needed_p, but do not clobber the current state. */
5126 static int
5127 safe_group_barrier_needed_p (rtx insn)
5129 struct reg_write_state rws_saved[NUM_REGS];
5130 int saved_first_instruction;
5131 int t;
5133 memcpy (rws_saved, rws_sum, NUM_REGS * sizeof *rws_saved);
5134 saved_first_instruction = first_instruction;
5136 t = group_barrier_needed_p (insn);
5138 memcpy (rws_sum, rws_saved, NUM_REGS * sizeof *rws_saved);
5139 first_instruction = saved_first_instruction;
5141 return t;
5144 /* Scan the current function and insert stop bits as necessary to
5145 eliminate dependencies. This function assumes that a final
5146 instruction scheduling pass has been run which has already
5147 inserted most of the necessary stop bits. This function only
5148 inserts new ones at basic block boundaries, since these are
5149 invisible to the scheduler. */
5151 static void
5152 emit_insn_group_barriers (FILE *dump)
5154 rtx insn;
5155 rtx last_label = 0;
5156 int insns_since_last_label = 0;
5158 init_insn_group_barriers ();
5160 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5162 if (GET_CODE (insn) == CODE_LABEL)
5164 if (insns_since_last_label)
5165 last_label = insn;
5166 insns_since_last_label = 0;
5168 else if (GET_CODE (insn) == NOTE
5169 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
5171 if (insns_since_last_label)
5172 last_label = insn;
5173 insns_since_last_label = 0;
5175 else if (GET_CODE (insn) == INSN
5176 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
5177 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
5179 init_insn_group_barriers ();
5180 last_label = 0;
5182 else if (INSN_P (insn))
5184 insns_since_last_label = 1;
5186 if (group_barrier_needed_p (insn))
5188 if (last_label)
5190 if (dump)
5191 fprintf (dump, "Emitting stop before label %d\n",
5192 INSN_UID (last_label));
5193 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
5194 insn = last_label;
5196 init_insn_group_barriers ();
5197 last_label = 0;
5204 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
5205 This function has to emit all necessary group barriers. */
5207 static void
5208 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
5210 rtx insn;
5212 init_insn_group_barriers ();
5214 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5216 if (GET_CODE (insn) == BARRIER)
5218 rtx last = prev_active_insn (insn);
5220 if (! last)
5221 continue;
5222 if (GET_CODE (last) == JUMP_INSN
5223 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
5224 last = prev_active_insn (last);
5225 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
5226 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
5228 init_insn_group_barriers ();
5230 else if (INSN_P (insn))
5232 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
5233 init_insn_group_barriers ();
5234 else if (group_barrier_needed_p (insn))
5236 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5237 init_insn_group_barriers ();
5238 group_barrier_needed_p (insn);
5245 static int errata_find_address_regs (rtx *, void *);
5246 static void errata_emit_nops (rtx);
5247 static void fixup_errata (void);
5249 /* This structure is used to track some details about the previous insns
5250 groups so we can determine if it may be necessary to insert NOPs to
5251 workaround hardware errata. */
5252 static struct group
5254 HARD_REG_SET p_reg_set;
5255 HARD_REG_SET gr_reg_conditionally_set;
5256 } last_group[2];
5258 /* Index into the last_group array. */
5259 static int group_idx;
5261 /* Called through for_each_rtx; determines if a hard register that was
5262 conditionally set in the previous group is used as an address register.
5263 It ensures that for_each_rtx returns 1 in that case. */
5264 static int
5265 errata_find_address_regs (rtx *xp, void *data ATTRIBUTE_UNUSED)
5267 rtx x = *xp;
5268 if (GET_CODE (x) != MEM)
5269 return 0;
5270 x = XEXP (x, 0);
5271 if (GET_CODE (x) == POST_MODIFY)
5272 x = XEXP (x, 0);
5273 if (GET_CODE (x) == REG)
5275 struct group *prev_group = last_group + (group_idx ^ 1);
5276 if (TEST_HARD_REG_BIT (prev_group->gr_reg_conditionally_set,
5277 REGNO (x)))
5278 return 1;
5279 return -1;
5281 return 0;
5284 /* Called for each insn; this function keeps track of the state in
5285 last_group and emits additional NOPs if necessary to work around
5286 an Itanium A/B step erratum. */
5287 static void
5288 errata_emit_nops (rtx insn)
5290 struct group *this_group = last_group + group_idx;
5291 struct group *prev_group = last_group + (group_idx ^ 1);
5292 rtx pat = PATTERN (insn);
5293 rtx cond = GET_CODE (pat) == COND_EXEC ? COND_EXEC_TEST (pat) : 0;
5294 rtx real_pat = cond ? COND_EXEC_CODE (pat) : pat;
5295 enum attr_type type;
5296 rtx set = real_pat;
5298 if (GET_CODE (real_pat) == USE
5299 || GET_CODE (real_pat) == CLOBBER
5300 || GET_CODE (real_pat) == ASM_INPUT
5301 || GET_CODE (real_pat) == ADDR_VEC
5302 || GET_CODE (real_pat) == ADDR_DIFF_VEC
5303 || asm_noperands (PATTERN (insn)) >= 0)
5304 return;
5306 /* single_set doesn't work for COND_EXEC insns, so we have to duplicate
5307 parts of it. */
5309 if (GET_CODE (set) == PARALLEL)
5311 int i;
5312 set = XVECEXP (real_pat, 0, 0);
5313 for (i = 1; i < XVECLEN (real_pat, 0); i++)
5314 if (GET_CODE (XVECEXP (real_pat, 0, i)) != USE
5315 && GET_CODE (XVECEXP (real_pat, 0, i)) != CLOBBER)
5317 set = 0;
5318 break;
5322 if (set && GET_CODE (set) != SET)
5323 set = 0;
5325 type = get_attr_type (insn);
5327 if (type == TYPE_F
5328 && set && REG_P (SET_DEST (set)) && PR_REGNO_P (REGNO (SET_DEST (set))))
5329 SET_HARD_REG_BIT (this_group->p_reg_set, REGNO (SET_DEST (set)));
5331 if ((type == TYPE_M || type == TYPE_A) && cond && set
5332 && REG_P (SET_DEST (set))
5333 && GET_CODE (SET_SRC (set)) != PLUS
5334 && GET_CODE (SET_SRC (set)) != MINUS
5335 && (GET_CODE (SET_SRC (set)) != ASHIFT
5336 || !shladd_operand (XEXP (SET_SRC (set), 1), VOIDmode))
5337 && (GET_CODE (SET_SRC (set)) != MEM
5338 || GET_CODE (XEXP (SET_SRC (set), 0)) != POST_MODIFY)
5339 && GENERAL_REGNO_P (REGNO (SET_DEST (set))))
5341 if (!COMPARISON_P (cond)
5342 || !REG_P (XEXP (cond, 0)))
5343 abort ();
5345 if (TEST_HARD_REG_BIT (prev_group->p_reg_set, REGNO (XEXP (cond, 0))))
5346 SET_HARD_REG_BIT (this_group->gr_reg_conditionally_set, REGNO (SET_DEST (set)));
5348 if (for_each_rtx (&real_pat, errata_find_address_regs, NULL))
5350 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5351 emit_insn_before (gen_nop (), insn);
5352 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5353 group_idx = 0;
5354 memset (last_group, 0, sizeof last_group);
5358 /* Emit extra nops if they are required to work around hardware errata. */
5360 static void
5361 fixup_errata (void)
5363 rtx insn;
5365 if (! TARGET_B_STEP)
5366 return;
5368 group_idx = 0;
5369 memset (last_group, 0, sizeof last_group);
5371 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5373 if (!INSN_P (insn))
5374 continue;
5376 if (ia64_safe_type (insn) == TYPE_S)
5378 group_idx ^= 1;
5379 memset (last_group + group_idx, 0, sizeof last_group[group_idx]);
5381 else
5382 errata_emit_nops (insn);
5387 /* Instruction scheduling support. */
5389 #define NR_BUNDLES 10
5391 /* A list of names of all available bundles. */
5393 static const char *bundle_name [NR_BUNDLES] =
5395 ".mii",
5396 ".mmi",
5397 ".mfi",
5398 ".mmf",
5399 #if NR_BUNDLES == 10
5400 ".bbb",
5401 ".mbb",
5402 #endif
5403 ".mib",
5404 ".mmb",
5405 ".mfb",
5406 ".mlx"
5409 /* Nonzero if we should insert stop bits into the schedule. */
5411 int ia64_final_schedule = 0;
5413 /* Codes of the corresponding quieryied units: */
5415 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
5416 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
5418 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
5419 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
5421 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
5423 /* The following variable value is an insn group barrier. */
5425 static rtx dfa_stop_insn;
5427 /* The following variable value is the last issued insn. */
5429 static rtx last_scheduled_insn;
5431 /* The following variable value is size of the DFA state. */
5433 static size_t dfa_state_size;
5435 /* The following variable value is pointer to a DFA state used as
5436 temporary variable. */
5438 static state_t temp_dfa_state = NULL;
5440 /* The following variable value is DFA state after issuing the last
5441 insn. */
5443 static state_t prev_cycle_state = NULL;
5445 /* The following array element values are TRUE if the corresponding
5446 insn requires to add stop bits before it. */
5448 static char *stops_p;
5450 /* The following variable is used to set up the mentioned above array. */
5452 static int stop_before_p = 0;
5454 /* The following variable value is length of the arrays `clocks' and
5455 `add_cycles'. */
5457 static int clocks_length;
5459 /* The following array element values are cycles on which the
5460 corresponding insn will be issued. The array is used only for
5461 Itanium1. */
5463 static int *clocks;
5465 /* The following array element values are numbers of cycles should be
5466 added to improve insn scheduling for MM_insns for Itanium1. */
5468 static int *add_cycles;
5470 static rtx ia64_single_set (rtx);
5471 static void ia64_emit_insn_before (rtx, rtx);
5473 /* Map a bundle number to its pseudo-op. */
5475 const char *
5476 get_bundle_name (int b)
5478 return bundle_name[b];
5482 /* Return the maximum number of instructions a cpu can issue. */
5484 static int
5485 ia64_issue_rate (void)
5487 return 6;
5490 /* Helper function - like single_set, but look inside COND_EXEC. */
5492 static rtx
5493 ia64_single_set (rtx insn)
5495 rtx x = PATTERN (insn), ret;
5496 if (GET_CODE (x) == COND_EXEC)
5497 x = COND_EXEC_CODE (x);
5498 if (GET_CODE (x) == SET)
5499 return x;
5501 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
5502 Although they are not classical single set, the second set is there just
5503 to protect it from moving past FP-relative stack accesses. */
5504 switch (recog_memoized (insn))
5506 case CODE_FOR_prologue_allocate_stack:
5507 case CODE_FOR_epilogue_deallocate_stack:
5508 ret = XVECEXP (x, 0, 0);
5509 break;
5511 default:
5512 ret = single_set_2 (insn, x);
5513 break;
5516 return ret;
5519 /* Adjust the cost of a scheduling dependency. Return the new cost of
5520 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
5522 static int
5523 ia64_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
5525 enum attr_itanium_class dep_class;
5526 enum attr_itanium_class insn_class;
5528 if (REG_NOTE_KIND (link) != REG_DEP_OUTPUT)
5529 return cost;
5531 insn_class = ia64_safe_itanium_class (insn);
5532 dep_class = ia64_safe_itanium_class (dep_insn);
5533 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
5534 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
5535 return 0;
5537 return cost;
5540 /* Like emit_insn_before, but skip cycle_display notes.
5541 ??? When cycle display notes are implemented, update this. */
5543 static void
5544 ia64_emit_insn_before (rtx insn, rtx before)
5546 emit_insn_before (insn, before);
5549 /* The following function marks insns who produce addresses for load
5550 and store insns. Such insns will be placed into M slots because it
5551 decrease latency time for Itanium1 (see function
5552 `ia64_produce_address_p' and the DFA descriptions). */
5554 static void
5555 ia64_dependencies_evaluation_hook (rtx head, rtx tail)
5557 rtx insn, link, next, next_tail;
5559 next_tail = NEXT_INSN (tail);
5560 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
5561 if (INSN_P (insn))
5562 insn->call = 0;
5563 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
5564 if (INSN_P (insn)
5565 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
5567 for (link = INSN_DEPEND (insn); link != 0; link = XEXP (link, 1))
5569 next = XEXP (link, 0);
5570 if ((ia64_safe_itanium_class (next) == ITANIUM_CLASS_ST
5571 || ia64_safe_itanium_class (next) == ITANIUM_CLASS_STF)
5572 && ia64_st_address_bypass_p (insn, next))
5573 break;
5574 else if ((ia64_safe_itanium_class (next) == ITANIUM_CLASS_LD
5575 || ia64_safe_itanium_class (next)
5576 == ITANIUM_CLASS_FLD)
5577 && ia64_ld_address_bypass_p (insn, next))
5578 break;
5580 insn->call = link != 0;
5584 /* We're beginning a new block. Initialize data structures as necessary. */
5586 static void
5587 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
5588 int sched_verbose ATTRIBUTE_UNUSED,
5589 int max_ready ATTRIBUTE_UNUSED)
5591 #ifdef ENABLE_CHECKING
5592 rtx insn;
5594 if (reload_completed)
5595 for (insn = NEXT_INSN (current_sched_info->prev_head);
5596 insn != current_sched_info->next_tail;
5597 insn = NEXT_INSN (insn))
5598 if (SCHED_GROUP_P (insn))
5599 abort ();
5600 #endif
5601 last_scheduled_insn = NULL_RTX;
5602 init_insn_group_barriers ();
5605 /* We are about to being issuing insns for this clock cycle.
5606 Override the default sort algorithm to better slot instructions. */
5608 static int
5609 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
5610 int *pn_ready, int clock_var ATTRIBUTE_UNUSED,
5611 int reorder_type)
5613 int n_asms;
5614 int n_ready = *pn_ready;
5615 rtx *e_ready = ready + n_ready;
5616 rtx *insnp;
5618 if (sched_verbose)
5619 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
5621 if (reorder_type == 0)
5623 /* First, move all USEs, CLOBBERs and other crud out of the way. */
5624 n_asms = 0;
5625 for (insnp = ready; insnp < e_ready; insnp++)
5626 if (insnp < e_ready)
5628 rtx insn = *insnp;
5629 enum attr_type t = ia64_safe_type (insn);
5630 if (t == TYPE_UNKNOWN)
5632 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
5633 || asm_noperands (PATTERN (insn)) >= 0)
5635 rtx lowest = ready[n_asms];
5636 ready[n_asms] = insn;
5637 *insnp = lowest;
5638 n_asms++;
5640 else
5642 rtx highest = ready[n_ready - 1];
5643 ready[n_ready - 1] = insn;
5644 *insnp = highest;
5645 return 1;
5650 if (n_asms < n_ready)
5652 /* Some normal insns to process. Skip the asms. */
5653 ready += n_asms;
5654 n_ready -= n_asms;
5656 else if (n_ready > 0)
5657 return 1;
5660 if (ia64_final_schedule)
5662 int deleted = 0;
5663 int nr_need_stop = 0;
5665 for (insnp = ready; insnp < e_ready; insnp++)
5666 if (safe_group_barrier_needed_p (*insnp))
5667 nr_need_stop++;
5669 if (reorder_type == 1 && n_ready == nr_need_stop)
5670 return 0;
5671 if (reorder_type == 0)
5672 return 1;
5673 insnp = e_ready;
5674 /* Move down everything that needs a stop bit, preserving
5675 relative order. */
5676 while (insnp-- > ready + deleted)
5677 while (insnp >= ready + deleted)
5679 rtx insn = *insnp;
5680 if (! safe_group_barrier_needed_p (insn))
5681 break;
5682 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
5683 *ready = insn;
5684 deleted++;
5686 n_ready -= deleted;
5687 ready += deleted;
5690 return 1;
5693 /* We are about to being issuing insns for this clock cycle. Override
5694 the default sort algorithm to better slot instructions. */
5696 static int
5697 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
5698 int clock_var)
5700 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
5701 pn_ready, clock_var, 0);
5704 /* Like ia64_sched_reorder, but called after issuing each insn.
5705 Override the default sort algorithm to better slot instructions. */
5707 static int
5708 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
5709 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
5710 int *pn_ready, int clock_var)
5712 if (ia64_tune == PROCESSOR_ITANIUM && reload_completed && last_scheduled_insn)
5713 clocks [INSN_UID (last_scheduled_insn)] = clock_var;
5714 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
5715 clock_var, 1);
5718 /* We are about to issue INSN. Return the number of insns left on the
5719 ready queue that can be issued this cycle. */
5721 static int
5722 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
5723 int sched_verbose ATTRIBUTE_UNUSED,
5724 rtx insn ATTRIBUTE_UNUSED,
5725 int can_issue_more ATTRIBUTE_UNUSED)
5727 last_scheduled_insn = insn;
5728 memcpy (prev_cycle_state, curr_state, dfa_state_size);
5729 if (reload_completed)
5731 if (group_barrier_needed_p (insn))
5732 abort ();
5733 if (GET_CODE (insn) == CALL_INSN)
5734 init_insn_group_barriers ();
5735 stops_p [INSN_UID (insn)] = stop_before_p;
5736 stop_before_p = 0;
5738 return 1;
5741 /* We are choosing insn from the ready queue. Return nonzero if INSN
5742 can be chosen. */
5744 static int
5745 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn)
5747 if (insn == NULL_RTX || !INSN_P (insn))
5748 abort ();
5749 return (!reload_completed
5750 || !safe_group_barrier_needed_p (insn));
5753 /* The following variable value is pseudo-insn used by the DFA insn
5754 scheduler to change the DFA state when the simulated clock is
5755 increased. */
5757 static rtx dfa_pre_cycle_insn;
5759 /* We are about to being issuing INSN. Return nonzero if we cannot
5760 issue it on given cycle CLOCK and return zero if we should not sort
5761 the ready queue on the next clock start. */
5763 static int
5764 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
5765 int clock, int *sort_p)
5767 int setup_clocks_p = FALSE;
5769 if (insn == NULL_RTX || !INSN_P (insn))
5770 abort ();
5771 if ((reload_completed && safe_group_barrier_needed_p (insn))
5772 || (last_scheduled_insn
5773 && (GET_CODE (last_scheduled_insn) == CALL_INSN
5774 || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
5775 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
5777 init_insn_group_barriers ();
5778 if (verbose && dump)
5779 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
5780 last_clock == clock ? " + cycle advance" : "");
5781 stop_before_p = 1;
5782 if (last_clock == clock)
5784 state_transition (curr_state, dfa_stop_insn);
5785 if (TARGET_EARLY_STOP_BITS)
5786 *sort_p = (last_scheduled_insn == NULL_RTX
5787 || GET_CODE (last_scheduled_insn) != CALL_INSN);
5788 else
5789 *sort_p = 0;
5790 return 1;
5792 else if (reload_completed)
5793 setup_clocks_p = TRUE;
5794 if (GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
5795 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)
5796 state_reset (curr_state);
5797 else
5799 memcpy (curr_state, prev_cycle_state, dfa_state_size);
5800 state_transition (curr_state, dfa_stop_insn);
5801 state_transition (curr_state, dfa_pre_cycle_insn);
5802 state_transition (curr_state, NULL);
5805 else if (reload_completed)
5806 setup_clocks_p = TRUE;
5807 if (setup_clocks_p && ia64_tune == PROCESSOR_ITANIUM
5808 && GET_CODE (PATTERN (insn)) != ASM_INPUT
5809 && asm_noperands (PATTERN (insn)) < 0)
5811 enum attr_itanium_class c = ia64_safe_itanium_class (insn);
5813 if (c != ITANIUM_CLASS_MMMUL && c != ITANIUM_CLASS_MMSHF)
5815 rtx link;
5816 int d = -1;
5818 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
5819 if (REG_NOTE_KIND (link) == 0)
5821 enum attr_itanium_class dep_class;
5822 rtx dep_insn = XEXP (link, 0);
5824 dep_class = ia64_safe_itanium_class (dep_insn);
5825 if ((dep_class == ITANIUM_CLASS_MMMUL
5826 || dep_class == ITANIUM_CLASS_MMSHF)
5827 && last_clock - clocks [INSN_UID (dep_insn)] < 4
5828 && (d < 0
5829 || last_clock - clocks [INSN_UID (dep_insn)] < d))
5830 d = last_clock - clocks [INSN_UID (dep_insn)];
5832 if (d >= 0)
5833 add_cycles [INSN_UID (insn)] = 3 - d;
5836 return 0;
5841 /* The following page contains abstract data `bundle states' which are
5842 used for bundling insns (inserting nops and template generation). */
5844 /* The following describes state of insn bundling. */
5846 struct bundle_state
5848 /* Unique bundle state number to identify them in the debugging
5849 output */
5850 int unique_num;
5851 rtx insn; /* corresponding insn, NULL for the 1st and the last state */
5852 /* number nops before and after the insn */
5853 short before_nops_num, after_nops_num;
5854 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
5855 insn */
5856 int cost; /* cost of the state in cycles */
5857 int accumulated_insns_num; /* number of all previous insns including
5858 nops. L is considered as 2 insns */
5859 int branch_deviation; /* deviation of previous branches from 3rd slots */
5860 struct bundle_state *next; /* next state with the same insn_num */
5861 struct bundle_state *originator; /* originator (previous insn state) */
5862 /* All bundle states are in the following chain. */
5863 struct bundle_state *allocated_states_chain;
5864 /* The DFA State after issuing the insn and the nops. */
5865 state_t dfa_state;
5868 /* The following is map insn number to the corresponding bundle state. */
5870 static struct bundle_state **index_to_bundle_states;
5872 /* The unique number of next bundle state. */
5874 static int bundle_states_num;
5876 /* All allocated bundle states are in the following chain. */
5878 static struct bundle_state *allocated_bundle_states_chain;
5880 /* All allocated but not used bundle states are in the following
5881 chain. */
5883 static struct bundle_state *free_bundle_state_chain;
5886 /* The following function returns a free bundle state. */
5888 static struct bundle_state *
5889 get_free_bundle_state (void)
5891 struct bundle_state *result;
5893 if (free_bundle_state_chain != NULL)
5895 result = free_bundle_state_chain;
5896 free_bundle_state_chain = result->next;
5898 else
5900 result = xmalloc (sizeof (struct bundle_state));
5901 result->dfa_state = xmalloc (dfa_state_size);
5902 result->allocated_states_chain = allocated_bundle_states_chain;
5903 allocated_bundle_states_chain = result;
5905 result->unique_num = bundle_states_num++;
5906 return result;
5910 /* The following function frees given bundle state. */
5912 static void
5913 free_bundle_state (struct bundle_state *state)
5915 state->next = free_bundle_state_chain;
5916 free_bundle_state_chain = state;
5919 /* Start work with abstract data `bundle states'. */
5921 static void
5922 initiate_bundle_states (void)
5924 bundle_states_num = 0;
5925 free_bundle_state_chain = NULL;
5926 allocated_bundle_states_chain = NULL;
5929 /* Finish work with abstract data `bundle states'. */
5931 static void
5932 finish_bundle_states (void)
5934 struct bundle_state *curr_state, *next_state;
5936 for (curr_state = allocated_bundle_states_chain;
5937 curr_state != NULL;
5938 curr_state = next_state)
5940 next_state = curr_state->allocated_states_chain;
5941 free (curr_state->dfa_state);
5942 free (curr_state);
5946 /* Hash table of the bundle states. The key is dfa_state and insn_num
5947 of the bundle states. */
5949 static htab_t bundle_state_table;
5951 /* The function returns hash of BUNDLE_STATE. */
5953 static unsigned
5954 bundle_state_hash (const void *bundle_state)
5956 const struct bundle_state *state = (struct bundle_state *) bundle_state;
5957 unsigned result, i;
5959 for (result = i = 0; i < dfa_state_size; i++)
5960 result += (((unsigned char *) state->dfa_state) [i]
5961 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
5962 return result + state->insn_num;
5965 /* The function returns nonzero if the bundle state keys are equal. */
5967 static int
5968 bundle_state_eq_p (const void *bundle_state_1, const void *bundle_state_2)
5970 const struct bundle_state * state1 = (struct bundle_state *) bundle_state_1;
5971 const struct bundle_state * state2 = (struct bundle_state *) bundle_state_2;
5973 return (state1->insn_num == state2->insn_num
5974 && memcmp (state1->dfa_state, state2->dfa_state,
5975 dfa_state_size) == 0);
5978 /* The function inserts the BUNDLE_STATE into the hash table. The
5979 function returns nonzero if the bundle has been inserted into the
5980 table. The table contains the best bundle state with given key. */
5982 static int
5983 insert_bundle_state (struct bundle_state *bundle_state)
5985 void **entry_ptr;
5987 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, 1);
5988 if (*entry_ptr == NULL)
5990 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
5991 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
5992 *entry_ptr = (void *) bundle_state;
5993 return TRUE;
5995 else if (bundle_state->cost < ((struct bundle_state *) *entry_ptr)->cost
5996 || (bundle_state->cost == ((struct bundle_state *) *entry_ptr)->cost
5997 && (((struct bundle_state *)*entry_ptr)->accumulated_insns_num
5998 > bundle_state->accumulated_insns_num
5999 || (((struct bundle_state *)
6000 *entry_ptr)->accumulated_insns_num
6001 == bundle_state->accumulated_insns_num
6002 && ((struct bundle_state *)
6003 *entry_ptr)->branch_deviation
6004 > bundle_state->branch_deviation))))
6007 struct bundle_state temp;
6009 temp = *(struct bundle_state *) *entry_ptr;
6010 *(struct bundle_state *) *entry_ptr = *bundle_state;
6011 ((struct bundle_state *) *entry_ptr)->next = temp.next;
6012 *bundle_state = temp;
6014 return FALSE;
6017 /* Start work with the hash table. */
6019 static void
6020 initiate_bundle_state_table (void)
6022 bundle_state_table = htab_create (50, bundle_state_hash, bundle_state_eq_p,
6023 (htab_del) 0);
6026 /* Finish work with the hash table. */
6028 static void
6029 finish_bundle_state_table (void)
6031 htab_delete (bundle_state_table);
6036 /* The following variable is a insn `nop' used to check bundle states
6037 with different number of inserted nops. */
6039 static rtx ia64_nop;
6041 /* The following function tries to issue NOPS_NUM nops for the current
6042 state without advancing processor cycle. If it failed, the
6043 function returns FALSE and frees the current state. */
6045 static int
6046 try_issue_nops (struct bundle_state *curr_state, int nops_num)
6048 int i;
6050 for (i = 0; i < nops_num; i++)
6051 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
6053 free_bundle_state (curr_state);
6054 return FALSE;
6056 return TRUE;
6059 /* The following function tries to issue INSN for the current
6060 state without advancing processor cycle. If it failed, the
6061 function returns FALSE and frees the current state. */
6063 static int
6064 try_issue_insn (struct bundle_state *curr_state, rtx insn)
6066 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
6068 free_bundle_state (curr_state);
6069 return FALSE;
6071 return TRUE;
6074 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
6075 starting with ORIGINATOR without advancing processor cycle. If
6076 TRY_BUNDLE_END_P is TRUE, the function also/only (if
6077 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
6078 If it was successful, the function creates new bundle state and
6079 insert into the hash table and into `index_to_bundle_states'. */
6081 static void
6082 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
6083 rtx insn, int try_bundle_end_p, int only_bundle_end_p)
6085 struct bundle_state *curr_state;
6087 curr_state = get_free_bundle_state ();
6088 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
6089 curr_state->insn = insn;
6090 curr_state->insn_num = originator->insn_num + 1;
6091 curr_state->cost = originator->cost;
6092 curr_state->originator = originator;
6093 curr_state->before_nops_num = before_nops_num;
6094 curr_state->after_nops_num = 0;
6095 curr_state->accumulated_insns_num
6096 = originator->accumulated_insns_num + before_nops_num;
6097 curr_state->branch_deviation = originator->branch_deviation;
6098 if (insn == NULL_RTX)
6099 abort ();
6100 else if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
6102 if (GET_MODE (insn) == TImode)
6103 abort ();
6104 if (!try_issue_nops (curr_state, before_nops_num))
6105 return;
6106 if (!try_issue_insn (curr_state, insn))
6107 return;
6108 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
6109 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
6110 && curr_state->accumulated_insns_num % 3 != 0)
6112 free_bundle_state (curr_state);
6113 return;
6116 else if (GET_MODE (insn) != TImode)
6118 if (!try_issue_nops (curr_state, before_nops_num))
6119 return;
6120 if (!try_issue_insn (curr_state, insn))
6121 return;
6122 curr_state->accumulated_insns_num++;
6123 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6124 || asm_noperands (PATTERN (insn)) >= 0)
6125 abort ();
6126 if (ia64_safe_type (insn) == TYPE_L)
6127 curr_state->accumulated_insns_num++;
6129 else
6131 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
6132 state_transition (curr_state->dfa_state, NULL);
6133 curr_state->cost++;
6134 if (!try_issue_nops (curr_state, before_nops_num))
6135 return;
6136 if (!try_issue_insn (curr_state, insn))
6137 return;
6138 curr_state->accumulated_insns_num++;
6139 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6140 || asm_noperands (PATTERN (insn)) >= 0)
6142 /* Finish bundle containing asm insn. */
6143 curr_state->after_nops_num
6144 = 3 - curr_state->accumulated_insns_num % 3;
6145 curr_state->accumulated_insns_num
6146 += 3 - curr_state->accumulated_insns_num % 3;
6148 else if (ia64_safe_type (insn) == TYPE_L)
6149 curr_state->accumulated_insns_num++;
6151 if (ia64_safe_type (insn) == TYPE_B)
6152 curr_state->branch_deviation
6153 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
6154 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
6156 if (!only_bundle_end_p && insert_bundle_state (curr_state))
6158 state_t dfa_state;
6159 struct bundle_state *curr_state1;
6160 struct bundle_state *allocated_states_chain;
6162 curr_state1 = get_free_bundle_state ();
6163 dfa_state = curr_state1->dfa_state;
6164 allocated_states_chain = curr_state1->allocated_states_chain;
6165 *curr_state1 = *curr_state;
6166 curr_state1->dfa_state = dfa_state;
6167 curr_state1->allocated_states_chain = allocated_states_chain;
6168 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
6169 dfa_state_size);
6170 curr_state = curr_state1;
6172 if (!try_issue_nops (curr_state,
6173 3 - curr_state->accumulated_insns_num % 3))
6174 return;
6175 curr_state->after_nops_num
6176 = 3 - curr_state->accumulated_insns_num % 3;
6177 curr_state->accumulated_insns_num
6178 += 3 - curr_state->accumulated_insns_num % 3;
6180 if (!insert_bundle_state (curr_state))
6181 free_bundle_state (curr_state);
6182 return;
6185 /* The following function returns position in the two window bundle
6186 for given STATE. */
6188 static int
6189 get_max_pos (state_t state)
6191 if (cpu_unit_reservation_p (state, pos_6))
6192 return 6;
6193 else if (cpu_unit_reservation_p (state, pos_5))
6194 return 5;
6195 else if (cpu_unit_reservation_p (state, pos_4))
6196 return 4;
6197 else if (cpu_unit_reservation_p (state, pos_3))
6198 return 3;
6199 else if (cpu_unit_reservation_p (state, pos_2))
6200 return 2;
6201 else if (cpu_unit_reservation_p (state, pos_1))
6202 return 1;
6203 else
6204 return 0;
6207 /* The function returns code of a possible template for given position
6208 and state. The function should be called only with 2 values of
6209 position equal to 3 or 6. */
6211 static int
6212 get_template (state_t state, int pos)
6214 switch (pos)
6216 case 3:
6217 if (cpu_unit_reservation_p (state, _0mii_))
6218 return 0;
6219 else if (cpu_unit_reservation_p (state, _0mmi_))
6220 return 1;
6221 else if (cpu_unit_reservation_p (state, _0mfi_))
6222 return 2;
6223 else if (cpu_unit_reservation_p (state, _0mmf_))
6224 return 3;
6225 else if (cpu_unit_reservation_p (state, _0bbb_))
6226 return 4;
6227 else if (cpu_unit_reservation_p (state, _0mbb_))
6228 return 5;
6229 else if (cpu_unit_reservation_p (state, _0mib_))
6230 return 6;
6231 else if (cpu_unit_reservation_p (state, _0mmb_))
6232 return 7;
6233 else if (cpu_unit_reservation_p (state, _0mfb_))
6234 return 8;
6235 else if (cpu_unit_reservation_p (state, _0mlx_))
6236 return 9;
6237 else
6238 abort ();
6239 case 6:
6240 if (cpu_unit_reservation_p (state, _1mii_))
6241 return 0;
6242 else if (cpu_unit_reservation_p (state, _1mmi_))
6243 return 1;
6244 else if (cpu_unit_reservation_p (state, _1mfi_))
6245 return 2;
6246 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
6247 return 3;
6248 else if (cpu_unit_reservation_p (state, _1bbb_))
6249 return 4;
6250 else if (cpu_unit_reservation_p (state, _1mbb_))
6251 return 5;
6252 else if (cpu_unit_reservation_p (state, _1mib_))
6253 return 6;
6254 else if (cpu_unit_reservation_p (state, _1mmb_))
6255 return 7;
6256 else if (cpu_unit_reservation_p (state, _1mfb_))
6257 return 8;
6258 else if (cpu_unit_reservation_p (state, _1mlx_))
6259 return 9;
6260 else
6261 abort ();
6262 default:
6263 abort ();
6267 /* The following function returns an insn important for insn bundling
6268 followed by INSN and before TAIL. */
6270 static rtx
6271 get_next_important_insn (rtx insn, rtx tail)
6273 for (; insn && insn != tail; insn = NEXT_INSN (insn))
6274 if (INSN_P (insn)
6275 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
6276 && GET_CODE (PATTERN (insn)) != USE
6277 && GET_CODE (PATTERN (insn)) != CLOBBER)
6278 return insn;
6279 return NULL_RTX;
6282 /* The following function does insn bundling. Bundling means
6283 inserting templates and nop insns to fit insn groups into permitted
6284 templates. Instruction scheduling uses NDFA (non-deterministic
6285 finite automata) encoding informations about the templates and the
6286 inserted nops. Nondeterminism of the automata permits follows
6287 all possible insn sequences very fast.
6289 Unfortunately it is not possible to get information about inserting
6290 nop insns and used templates from the automata states. The
6291 automata only says that we can issue an insn possibly inserting
6292 some nops before it and using some template. Therefore insn
6293 bundling in this function is implemented by using DFA
6294 (deterministic finite automata). We follows all possible insn
6295 sequences by inserting 0-2 nops (that is what the NDFA describe for
6296 insn scheduling) before/after each insn being bundled. We know the
6297 start of simulated processor cycle from insn scheduling (insn
6298 starting a new cycle has TImode).
6300 Simple implementation of insn bundling would create enormous
6301 number of possible insn sequences satisfying information about new
6302 cycle ticks taken from the insn scheduling. To make the algorithm
6303 practical we use dynamic programming. Each decision (about
6304 inserting nops and implicitly about previous decisions) is described
6305 by structure bundle_state (see above). If we generate the same
6306 bundle state (key is automaton state after issuing the insns and
6307 nops for it), we reuse already generated one. As consequence we
6308 reject some decisions which cannot improve the solution and
6309 reduce memory for the algorithm.
6311 When we reach the end of EBB (extended basic block), we choose the
6312 best sequence and then, moving back in EBB, insert templates for
6313 the best alternative. The templates are taken from querying
6314 automaton state for each insn in chosen bundle states.
6316 So the algorithm makes two (forward and backward) passes through
6317 EBB. There is an additional forward pass through EBB for Itanium1
6318 processor. This pass inserts more nops to make dependency between
6319 a producer insn and MMMUL/MMSHF at least 4 cycles long. */
6321 static void
6322 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
6324 struct bundle_state *curr_state, *next_state, *best_state;
6325 rtx insn, next_insn;
6326 int insn_num;
6327 int i, bundle_end_p, only_bundle_end_p, asm_p;
6328 int pos = 0, max_pos, template0, template1;
6329 rtx b;
6330 rtx nop;
6331 enum attr_type type;
6333 insn_num = 0;
6334 /* Count insns in the EBB. */
6335 for (insn = NEXT_INSN (prev_head_insn);
6336 insn && insn != tail;
6337 insn = NEXT_INSN (insn))
6338 if (INSN_P (insn))
6339 insn_num++;
6340 if (insn_num == 0)
6341 return;
6342 bundling_p = 1;
6343 dfa_clean_insn_cache ();
6344 initiate_bundle_state_table ();
6345 index_to_bundle_states = xmalloc ((insn_num + 2)
6346 * sizeof (struct bundle_state *));
6347 /* First (forward) pass -- generation of bundle states. */
6348 curr_state = get_free_bundle_state ();
6349 curr_state->insn = NULL;
6350 curr_state->before_nops_num = 0;
6351 curr_state->after_nops_num = 0;
6352 curr_state->insn_num = 0;
6353 curr_state->cost = 0;
6354 curr_state->accumulated_insns_num = 0;
6355 curr_state->branch_deviation = 0;
6356 curr_state->next = NULL;
6357 curr_state->originator = NULL;
6358 state_reset (curr_state->dfa_state);
6359 index_to_bundle_states [0] = curr_state;
6360 insn_num = 0;
6361 /* Shift cycle mark if it is put on insn which could be ignored. */
6362 for (insn = NEXT_INSN (prev_head_insn);
6363 insn != tail;
6364 insn = NEXT_INSN (insn))
6365 if (INSN_P (insn)
6366 && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
6367 || GET_CODE (PATTERN (insn)) == USE
6368 || GET_CODE (PATTERN (insn)) == CLOBBER)
6369 && GET_MODE (insn) == TImode)
6371 PUT_MODE (insn, VOIDmode);
6372 for (next_insn = NEXT_INSN (insn);
6373 next_insn != tail;
6374 next_insn = NEXT_INSN (next_insn))
6375 if (INSN_P (next_insn)
6376 && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
6377 && GET_CODE (PATTERN (next_insn)) != USE
6378 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
6380 PUT_MODE (next_insn, TImode);
6381 break;
6384 /* Froward pass: generation of bundle states. */
6385 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
6386 insn != NULL_RTX;
6387 insn = next_insn)
6389 if (!INSN_P (insn)
6390 || ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
6391 || GET_CODE (PATTERN (insn)) == USE
6392 || GET_CODE (PATTERN (insn)) == CLOBBER)
6393 abort ();
6394 type = ia64_safe_type (insn);
6395 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
6396 insn_num++;
6397 index_to_bundle_states [insn_num] = NULL;
6398 for (curr_state = index_to_bundle_states [insn_num - 1];
6399 curr_state != NULL;
6400 curr_state = next_state)
6402 pos = curr_state->accumulated_insns_num % 3;
6403 next_state = curr_state->next;
6404 /* We must fill up the current bundle in order to start a
6405 subsequent asm insn in a new bundle. Asm insn is always
6406 placed in a separate bundle. */
6407 only_bundle_end_p
6408 = (next_insn != NULL_RTX
6409 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
6410 && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
6411 /* We may fill up the current bundle if it is the cycle end
6412 without a group barrier. */
6413 bundle_end_p
6414 = (only_bundle_end_p || next_insn == NULL_RTX
6415 || (GET_MODE (next_insn) == TImode
6416 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
6417 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
6418 || type == TYPE_S
6419 /* We need to insert 2 nops for cases like M_MII. To
6420 guarantee issuing all insns on the same cycle for
6421 Itanium 1, we need to issue 2 nops after the first M
6422 insn (MnnMII where n is a nop insn). */
6423 || ((type == TYPE_M || type == TYPE_A)
6424 && ia64_tune == PROCESSOR_ITANIUM
6425 && !bundle_end_p && pos == 1))
6426 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
6427 only_bundle_end_p);
6428 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
6429 only_bundle_end_p);
6430 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
6431 only_bundle_end_p);
6433 if (index_to_bundle_states [insn_num] == NULL)
6434 abort ();
6435 for (curr_state = index_to_bundle_states [insn_num];
6436 curr_state != NULL;
6437 curr_state = curr_state->next)
6438 if (verbose >= 2 && dump)
6440 /* This structure is taken from generated code of the
6441 pipeline hazard recognizer (see file insn-attrtab.c).
6442 Please don't forget to change the structure if a new
6443 automaton is added to .md file. */
6444 struct DFA_chip
6446 unsigned short one_automaton_state;
6447 unsigned short oneb_automaton_state;
6448 unsigned short two_automaton_state;
6449 unsigned short twob_automaton_state;
6452 fprintf
6453 (dump,
6454 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
6455 curr_state->unique_num,
6456 (curr_state->originator == NULL
6457 ? -1 : curr_state->originator->unique_num),
6458 curr_state->cost,
6459 curr_state->before_nops_num, curr_state->after_nops_num,
6460 curr_state->accumulated_insns_num, curr_state->branch_deviation,
6461 (ia64_tune == PROCESSOR_ITANIUM
6462 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
6463 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
6464 INSN_UID (insn));
6467 if (index_to_bundle_states [insn_num] == NULL)
6468 /* We should find a solution because the 2nd insn scheduling has
6469 found one. */
6470 abort ();
6471 /* Find a state corresponding to the best insn sequence. */
6472 best_state = NULL;
6473 for (curr_state = index_to_bundle_states [insn_num];
6474 curr_state != NULL;
6475 curr_state = curr_state->next)
6476 /* We are just looking at the states with fully filled up last
6477 bundle. The first we prefer insn sequences with minimal cost
6478 then with minimal inserted nops and finally with branch insns
6479 placed in the 3rd slots. */
6480 if (curr_state->accumulated_insns_num % 3 == 0
6481 && (best_state == NULL || best_state->cost > curr_state->cost
6482 || (best_state->cost == curr_state->cost
6483 && (curr_state->accumulated_insns_num
6484 < best_state->accumulated_insns_num
6485 || (curr_state->accumulated_insns_num
6486 == best_state->accumulated_insns_num
6487 && curr_state->branch_deviation
6488 < best_state->branch_deviation)))))
6489 best_state = curr_state;
6490 /* Second (backward) pass: adding nops and templates. */
6491 insn_num = best_state->before_nops_num;
6492 template0 = template1 = -1;
6493 for (curr_state = best_state;
6494 curr_state->originator != NULL;
6495 curr_state = curr_state->originator)
6497 insn = curr_state->insn;
6498 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
6499 || asm_noperands (PATTERN (insn)) >= 0);
6500 insn_num++;
6501 if (verbose >= 2 && dump)
6503 struct DFA_chip
6505 unsigned short one_automaton_state;
6506 unsigned short oneb_automaton_state;
6507 unsigned short two_automaton_state;
6508 unsigned short twob_automaton_state;
6511 fprintf
6512 (dump,
6513 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
6514 curr_state->unique_num,
6515 (curr_state->originator == NULL
6516 ? -1 : curr_state->originator->unique_num),
6517 curr_state->cost,
6518 curr_state->before_nops_num, curr_state->after_nops_num,
6519 curr_state->accumulated_insns_num, curr_state->branch_deviation,
6520 (ia64_tune == PROCESSOR_ITANIUM
6521 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
6522 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
6523 INSN_UID (insn));
6525 /* Find the position in the current bundle window. The window can
6526 contain at most two bundles. Two bundle window means that
6527 the processor will make two bundle rotation. */
6528 max_pos = get_max_pos (curr_state->dfa_state);
6529 if (max_pos == 6
6530 /* The following (negative template number) means that the
6531 processor did one bundle rotation. */
6532 || (max_pos == 3 && template0 < 0))
6534 /* We are at the end of the window -- find template(s) for
6535 its bundle(s). */
6536 pos = max_pos;
6537 if (max_pos == 3)
6538 template0 = get_template (curr_state->dfa_state, 3);
6539 else
6541 template1 = get_template (curr_state->dfa_state, 3);
6542 template0 = get_template (curr_state->dfa_state, 6);
6545 if (max_pos > 3 && template1 < 0)
6546 /* It may happen when we have the stop inside a bundle. */
6548 if (pos > 3)
6549 abort ();
6550 template1 = get_template (curr_state->dfa_state, 3);
6551 pos += 3;
6553 if (!asm_p)
6554 /* Emit nops after the current insn. */
6555 for (i = 0; i < curr_state->after_nops_num; i++)
6557 nop = gen_nop ();
6558 emit_insn_after (nop, insn);
6559 pos--;
6560 if (pos < 0)
6561 abort ();
6562 if (pos % 3 == 0)
6564 /* We are at the start of a bundle: emit the template
6565 (it should be defined). */
6566 if (template0 < 0)
6567 abort ();
6568 b = gen_bundle_selector (GEN_INT (template0));
6569 ia64_emit_insn_before (b, nop);
6570 /* If we have two bundle window, we make one bundle
6571 rotation. Otherwise template0 will be undefined
6572 (negative value). */
6573 template0 = template1;
6574 template1 = -1;
6577 /* Move the position backward in the window. Group barrier has
6578 no slot. Asm insn takes all bundle. */
6579 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
6580 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6581 && asm_noperands (PATTERN (insn)) < 0)
6582 pos--;
6583 /* Long insn takes 2 slots. */
6584 if (ia64_safe_type (insn) == TYPE_L)
6585 pos--;
6586 if (pos < 0)
6587 abort ();
6588 if (pos % 3 == 0
6589 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
6590 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6591 && asm_noperands (PATTERN (insn)) < 0)
6593 /* The current insn is at the bundle start: emit the
6594 template. */
6595 if (template0 < 0)
6596 abort ();
6597 b = gen_bundle_selector (GEN_INT (template0));
6598 ia64_emit_insn_before (b, insn);
6599 b = PREV_INSN (insn);
6600 insn = b;
6601 /* See comment above in analogous place for emitting nops
6602 after the insn. */
6603 template0 = template1;
6604 template1 = -1;
6606 /* Emit nops after the current insn. */
6607 for (i = 0; i < curr_state->before_nops_num; i++)
6609 nop = gen_nop ();
6610 ia64_emit_insn_before (nop, insn);
6611 nop = PREV_INSN (insn);
6612 insn = nop;
6613 pos--;
6614 if (pos < 0)
6615 abort ();
6616 if (pos % 3 == 0)
6618 /* See comment above in analogous place for emitting nops
6619 after the insn. */
6620 if (template0 < 0)
6621 abort ();
6622 b = gen_bundle_selector (GEN_INT (template0));
6623 ia64_emit_insn_before (b, insn);
6624 b = PREV_INSN (insn);
6625 insn = b;
6626 template0 = template1;
6627 template1 = -1;
6631 if (ia64_tune == PROCESSOR_ITANIUM)
6632 /* Insert additional cycles for MM-insns (MMMUL and MMSHF).
6633 Itanium1 has a strange design, if the distance between an insn
6634 and dependent MM-insn is less 4 then we have a 6 additional
6635 cycles stall. So we make the distance equal to 4 cycles if it
6636 is less. */
6637 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
6638 insn != NULL_RTX;
6639 insn = next_insn)
6641 if (!INSN_P (insn)
6642 || ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
6643 || GET_CODE (PATTERN (insn)) == USE
6644 || GET_CODE (PATTERN (insn)) == CLOBBER)
6645 abort ();
6646 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
6647 if (INSN_UID (insn) < clocks_length && add_cycles [INSN_UID (insn)])
6648 /* We found a MM-insn which needs additional cycles. */
6650 rtx last;
6651 int i, j, n;
6652 int pred_stop_p;
6654 /* Now we are searching for a template of the bundle in
6655 which the MM-insn is placed and the position of the
6656 insn in the bundle (0, 1, 2). Also we are searching
6657 for that there is a stop before the insn. */
6658 last = prev_active_insn (insn);
6659 pred_stop_p = recog_memoized (last) == CODE_FOR_insn_group_barrier;
6660 if (pred_stop_p)
6661 last = prev_active_insn (last);
6662 n = 0;
6663 for (;; last = prev_active_insn (last))
6664 if (recog_memoized (last) == CODE_FOR_bundle_selector)
6666 template0 = XINT (XVECEXP (PATTERN (last), 0, 0), 0);
6667 if (template0 == 9)
6668 /* The insn is in MLX bundle. Change the template
6669 onto MFI because we will add nops before the
6670 insn. It simplifies subsequent code a lot. */
6671 PATTERN (last)
6672 = gen_bundle_selector (const2_rtx); /* -> MFI */
6673 break;
6675 else if (recog_memoized (last) != CODE_FOR_insn_group_barrier
6676 && (ia64_safe_itanium_class (last)
6677 != ITANIUM_CLASS_IGNORE))
6678 n++;
6679 /* Some check of correctness: the stop is not at the
6680 bundle start, there are no more 3 insns in the bundle,
6681 and the MM-insn is not at the start of bundle with
6682 template MLX. */
6683 if ((pred_stop_p && n == 0) || n > 2
6684 || (template0 == 9 && n != 0))
6685 abort ();
6686 /* Put nops after the insn in the bundle. */
6687 for (j = 3 - n; j > 0; j --)
6688 ia64_emit_insn_before (gen_nop (), insn);
6689 /* It takes into account that we will add more N nops
6690 before the insn lately -- please see code below. */
6691 add_cycles [INSN_UID (insn)]--;
6692 if (!pred_stop_p || add_cycles [INSN_UID (insn)])
6693 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
6694 insn);
6695 if (pred_stop_p)
6696 add_cycles [INSN_UID (insn)]--;
6697 for (i = add_cycles [INSN_UID (insn)]; i > 0; i--)
6699 /* Insert "MII;" template. */
6700 ia64_emit_insn_before (gen_bundle_selector (const0_rtx),
6701 insn);
6702 ia64_emit_insn_before (gen_nop (), insn);
6703 ia64_emit_insn_before (gen_nop (), insn);
6704 if (i > 1)
6706 /* To decrease code size, we use "MI;I;"
6707 template. */
6708 ia64_emit_insn_before
6709 (gen_insn_group_barrier (GEN_INT (3)), insn);
6710 i--;
6712 ia64_emit_insn_before (gen_nop (), insn);
6713 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
6714 insn);
6716 /* Put the MM-insn in the same slot of a bundle with the
6717 same template as the original one. */
6718 ia64_emit_insn_before (gen_bundle_selector (GEN_INT (template0)),
6719 insn);
6720 /* To put the insn in the same slot, add necessary number
6721 of nops. */
6722 for (j = n; j > 0; j --)
6723 ia64_emit_insn_before (gen_nop (), insn);
6724 /* Put the stop if the original bundle had it. */
6725 if (pred_stop_p)
6726 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
6727 insn);
6730 free (index_to_bundle_states);
6731 finish_bundle_state_table ();
6732 bundling_p = 0;
6733 dfa_clean_insn_cache ();
6736 /* The following function is called at the end of scheduling BB or
6737 EBB. After reload, it inserts stop bits and does insn bundling. */
6739 static void
6740 ia64_sched_finish (FILE *dump, int sched_verbose)
6742 if (sched_verbose)
6743 fprintf (dump, "// Finishing schedule.\n");
6744 if (!reload_completed)
6745 return;
6746 if (reload_completed)
6748 final_emit_insn_group_barriers (dump);
6749 bundling (dump, sched_verbose, current_sched_info->prev_head,
6750 current_sched_info->next_tail);
6751 if (sched_verbose && dump)
6752 fprintf (dump, "// finishing %d-%d\n",
6753 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
6754 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
6756 return;
6760 /* The following function inserts stop bits in scheduled BB or EBB. */
6762 static void
6763 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
6765 rtx insn;
6766 int need_barrier_p = 0;
6767 rtx prev_insn = NULL_RTX;
6769 init_insn_group_barriers ();
6771 for (insn = NEXT_INSN (current_sched_info->prev_head);
6772 insn != current_sched_info->next_tail;
6773 insn = NEXT_INSN (insn))
6775 if (GET_CODE (insn) == BARRIER)
6777 rtx last = prev_active_insn (insn);
6779 if (! last)
6780 continue;
6781 if (GET_CODE (last) == JUMP_INSN
6782 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
6783 last = prev_active_insn (last);
6784 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
6785 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
6787 init_insn_group_barriers ();
6788 need_barrier_p = 0;
6789 prev_insn = NULL_RTX;
6791 else if (INSN_P (insn))
6793 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
6795 init_insn_group_barriers ();
6796 need_barrier_p = 0;
6797 prev_insn = NULL_RTX;
6799 else if (need_barrier_p || group_barrier_needed_p (insn))
6801 if (TARGET_EARLY_STOP_BITS)
6803 rtx last;
6805 for (last = insn;
6806 last != current_sched_info->prev_head;
6807 last = PREV_INSN (last))
6808 if (INSN_P (last) && GET_MODE (last) == TImode
6809 && stops_p [INSN_UID (last)])
6810 break;
6811 if (last == current_sched_info->prev_head)
6812 last = insn;
6813 last = prev_active_insn (last);
6814 if (last
6815 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
6816 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
6817 last);
6818 init_insn_group_barriers ();
6819 for (last = NEXT_INSN (last);
6820 last != insn;
6821 last = NEXT_INSN (last))
6822 if (INSN_P (last))
6823 group_barrier_needed_p (last);
6825 else
6827 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
6828 insn);
6829 init_insn_group_barriers ();
6831 group_barrier_needed_p (insn);
6832 prev_insn = NULL_RTX;
6834 else if (recog_memoized (insn) >= 0)
6835 prev_insn = insn;
6836 need_barrier_p = (GET_CODE (insn) == CALL_INSN
6837 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6838 || asm_noperands (PATTERN (insn)) >= 0);
6845 /* If the following function returns TRUE, we will use the the DFA
6846 insn scheduler. */
6848 static int
6849 ia64_first_cycle_multipass_dfa_lookahead (void)
6851 return (reload_completed ? 6 : 4);
6854 /* The following function initiates variable `dfa_pre_cycle_insn'. */
6856 static void
6857 ia64_init_dfa_pre_cycle_insn (void)
6859 if (temp_dfa_state == NULL)
6861 dfa_state_size = state_size ();
6862 temp_dfa_state = xmalloc (dfa_state_size);
6863 prev_cycle_state = xmalloc (dfa_state_size);
6865 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
6866 PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
6867 recog_memoized (dfa_pre_cycle_insn);
6868 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
6869 PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX;
6870 recog_memoized (dfa_stop_insn);
6873 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
6874 used by the DFA insn scheduler. */
6876 static rtx
6877 ia64_dfa_pre_cycle_insn (void)
6879 return dfa_pre_cycle_insn;
6882 /* The following function returns TRUE if PRODUCER (of type ilog or
6883 ld) produces address for CONSUMER (of type st or stf). */
6886 ia64_st_address_bypass_p (rtx producer, rtx consumer)
6888 rtx dest, reg, mem;
6890 if (producer == NULL_RTX || consumer == NULL_RTX)
6891 abort ();
6892 dest = ia64_single_set (producer);
6893 if (dest == NULL_RTX || (reg = SET_DEST (dest)) == NULL_RTX
6894 || (GET_CODE (reg) != REG && GET_CODE (reg) != SUBREG))
6895 abort ();
6896 if (GET_CODE (reg) == SUBREG)
6897 reg = SUBREG_REG (reg);
6898 dest = ia64_single_set (consumer);
6899 if (dest == NULL_RTX || (mem = SET_DEST (dest)) == NULL_RTX
6900 || GET_CODE (mem) != MEM)
6901 abort ();
6902 return reg_mentioned_p (reg, mem);
6905 /* The following function returns TRUE if PRODUCER (of type ilog or
6906 ld) produces address for CONSUMER (of type ld or fld). */
6909 ia64_ld_address_bypass_p (rtx producer, rtx consumer)
6911 rtx dest, src, reg, mem;
6913 if (producer == NULL_RTX || consumer == NULL_RTX)
6914 abort ();
6915 dest = ia64_single_set (producer);
6916 if (dest == NULL_RTX || (reg = SET_DEST (dest)) == NULL_RTX
6917 || (GET_CODE (reg) != REG && GET_CODE (reg) != SUBREG))
6918 abort ();
6919 if (GET_CODE (reg) == SUBREG)
6920 reg = SUBREG_REG (reg);
6921 src = ia64_single_set (consumer);
6922 if (src == NULL_RTX || (mem = SET_SRC (src)) == NULL_RTX)
6923 abort ();
6924 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
6925 mem = XVECEXP (mem, 0, 0);
6926 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
6927 mem = XEXP (mem, 0);
6929 /* Note that LO_SUM is used for GOT loads. */
6930 if (GET_CODE (mem) != LO_SUM && GET_CODE (mem) != MEM)
6931 abort ();
6933 return reg_mentioned_p (reg, mem);
6936 /* The following function returns TRUE if INSN produces address for a
6937 load/store insn. We will place such insns into M slot because it
6938 decreases its latency time. */
6941 ia64_produce_address_p (rtx insn)
6943 return insn->call;
6947 /* Emit pseudo-ops for the assembler to describe predicate relations.
6948 At present this assumes that we only consider predicate pairs to
6949 be mutex, and that the assembler can deduce proper values from
6950 straight-line code. */
6952 static void
6953 emit_predicate_relation_info (void)
6955 basic_block bb;
6957 FOR_EACH_BB_REVERSE (bb)
6959 int r;
6960 rtx head = BB_HEAD (bb);
6962 /* We only need such notes at code labels. */
6963 if (GET_CODE (head) != CODE_LABEL)
6964 continue;
6965 if (GET_CODE (NEXT_INSN (head)) == NOTE
6966 && NOTE_LINE_NUMBER (NEXT_INSN (head)) == NOTE_INSN_BASIC_BLOCK)
6967 head = NEXT_INSN (head);
6969 for (r = PR_REG (0); r < PR_REG (64); r += 2)
6970 if (REGNO_REG_SET_P (bb->global_live_at_start, r))
6972 rtx p = gen_rtx_REG (BImode, r);
6973 rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
6974 if (head == BB_END (bb))
6975 BB_END (bb) = n;
6976 head = n;
6980 /* Look for conditional calls that do not return, and protect predicate
6981 relations around them. Otherwise the assembler will assume the call
6982 returns, and complain about uses of call-clobbered predicates after
6983 the call. */
6984 FOR_EACH_BB_REVERSE (bb)
6986 rtx insn = BB_HEAD (bb);
6988 while (1)
6990 if (GET_CODE (insn) == CALL_INSN
6991 && GET_CODE (PATTERN (insn)) == COND_EXEC
6992 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
6994 rtx b = emit_insn_before (gen_safe_across_calls_all (), insn);
6995 rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn);
6996 if (BB_HEAD (bb) == insn)
6997 BB_HEAD (bb) = b;
6998 if (BB_END (bb) == insn)
6999 BB_END (bb) = a;
7002 if (insn == BB_END (bb))
7003 break;
7004 insn = NEXT_INSN (insn);
7009 /* Perform machine dependent operations on the rtl chain INSNS. */
7011 static void
7012 ia64_reorg (void)
7014 /* We are freeing block_for_insn in the toplev to keep compatibility
7015 with old MDEP_REORGS that are not CFG based. Recompute it now. */
7016 compute_bb_for_insn ();
7018 /* If optimizing, we'll have split before scheduling. */
7019 if (optimize == 0)
7020 split_all_insns (0);
7022 /* ??? update_life_info_in_dirty_blocks fails to terminate during
7023 non-optimizing bootstrap. */
7024 update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES, PROP_DEATH_NOTES);
7026 if (ia64_flag_schedule_insns2)
7028 timevar_push (TV_SCHED2);
7029 ia64_final_schedule = 1;
7031 initiate_bundle_states ();
7032 ia64_nop = make_insn_raw (gen_nop ());
7033 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
7034 recog_memoized (ia64_nop);
7035 clocks_length = get_max_uid () + 1;
7036 stops_p = xcalloc (1, clocks_length);
7037 if (ia64_tune == PROCESSOR_ITANIUM)
7039 clocks = xcalloc (clocks_length, sizeof (int));
7040 add_cycles = xcalloc (clocks_length, sizeof (int));
7042 if (ia64_tune == PROCESSOR_ITANIUM2)
7044 pos_1 = get_cpu_unit_code ("2_1");
7045 pos_2 = get_cpu_unit_code ("2_2");
7046 pos_3 = get_cpu_unit_code ("2_3");
7047 pos_4 = get_cpu_unit_code ("2_4");
7048 pos_5 = get_cpu_unit_code ("2_5");
7049 pos_6 = get_cpu_unit_code ("2_6");
7050 _0mii_ = get_cpu_unit_code ("2b_0mii.");
7051 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
7052 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
7053 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
7054 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
7055 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
7056 _0mib_ = get_cpu_unit_code ("2b_0mib.");
7057 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
7058 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
7059 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
7060 _1mii_ = get_cpu_unit_code ("2b_1mii.");
7061 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
7062 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
7063 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
7064 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
7065 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
7066 _1mib_ = get_cpu_unit_code ("2b_1mib.");
7067 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
7068 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
7069 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
7071 else
7073 pos_1 = get_cpu_unit_code ("1_1");
7074 pos_2 = get_cpu_unit_code ("1_2");
7075 pos_3 = get_cpu_unit_code ("1_3");
7076 pos_4 = get_cpu_unit_code ("1_4");
7077 pos_5 = get_cpu_unit_code ("1_5");
7078 pos_6 = get_cpu_unit_code ("1_6");
7079 _0mii_ = get_cpu_unit_code ("1b_0mii.");
7080 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
7081 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
7082 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
7083 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
7084 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
7085 _0mib_ = get_cpu_unit_code ("1b_0mib.");
7086 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
7087 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
7088 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
7089 _1mii_ = get_cpu_unit_code ("1b_1mii.");
7090 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
7091 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
7092 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
7093 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
7094 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
7095 _1mib_ = get_cpu_unit_code ("1b_1mib.");
7096 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
7097 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
7098 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
7100 schedule_ebbs (dump_file);
7101 finish_bundle_states ();
7102 if (ia64_tune == PROCESSOR_ITANIUM)
7104 free (add_cycles);
7105 free (clocks);
7107 free (stops_p);
7108 emit_insn_group_barriers (dump_file);
7110 ia64_final_schedule = 0;
7111 timevar_pop (TV_SCHED2);
7113 else
7114 emit_all_insn_group_barriers (dump_file);
7116 /* A call must not be the last instruction in a function, so that the
7117 return address is still within the function, so that unwinding works
7118 properly. Note that IA-64 differs from dwarf2 on this point. */
7119 if (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
7121 rtx insn;
7122 int saw_stop = 0;
7124 insn = get_last_insn ();
7125 if (! INSN_P (insn))
7126 insn = prev_active_insn (insn);
7127 /* Skip over insns that expand to nothing. */
7128 while (GET_CODE (insn) == INSN && get_attr_empty (insn) == EMPTY_YES)
7130 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
7131 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
7132 saw_stop = 1;
7133 insn = prev_active_insn (insn);
7135 if (GET_CODE (insn) == CALL_INSN)
7137 if (! saw_stop)
7138 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7139 emit_insn (gen_break_f ());
7140 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7144 fixup_errata ();
7145 emit_predicate_relation_info ();
7147 if (ia64_flag_var_tracking)
7149 timevar_push (TV_VAR_TRACKING);
7150 variable_tracking_main ();
7151 timevar_pop (TV_VAR_TRACKING);
7155 /* Return true if REGNO is used by the epilogue. */
7158 ia64_epilogue_uses (int regno)
7160 switch (regno)
7162 case R_GR (1):
7163 /* With a call to a function in another module, we will write a new
7164 value to "gp". After returning from such a call, we need to make
7165 sure the function restores the original gp-value, even if the
7166 function itself does not use the gp anymore. */
7167 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
7169 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
7170 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
7171 /* For functions defined with the syscall_linkage attribute, all
7172 input registers are marked as live at all function exits. This
7173 prevents the register allocator from using the input registers,
7174 which in turn makes it possible to restart a system call after
7175 an interrupt without having to save/restore the input registers.
7176 This also prevents kernel data from leaking to application code. */
7177 return lookup_attribute ("syscall_linkage",
7178 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
7180 case R_BR (0):
7181 /* Conditional return patterns can't represent the use of `b0' as
7182 the return address, so we force the value live this way. */
7183 return 1;
7185 case AR_PFS_REGNUM:
7186 /* Likewise for ar.pfs, which is used by br.ret. */
7187 return 1;
7189 default:
7190 return 0;
7194 /* Return true if REGNO is used by the frame unwinder. */
7197 ia64_eh_uses (int regno)
7199 if (! reload_completed)
7200 return 0;
7202 if (current_frame_info.reg_save_b0
7203 && regno == current_frame_info.reg_save_b0)
7204 return 1;
7205 if (current_frame_info.reg_save_pr
7206 && regno == current_frame_info.reg_save_pr)
7207 return 1;
7208 if (current_frame_info.reg_save_ar_pfs
7209 && regno == current_frame_info.reg_save_ar_pfs)
7210 return 1;
7211 if (current_frame_info.reg_save_ar_unat
7212 && regno == current_frame_info.reg_save_ar_unat)
7213 return 1;
7214 if (current_frame_info.reg_save_ar_lc
7215 && regno == current_frame_info.reg_save_ar_lc)
7216 return 1;
7218 return 0;
7221 /* Return true if this goes in small data/bss. */
7223 /* ??? We could also support own long data here. Generating movl/add/ld8
7224 instead of addl,ld8/ld8. This makes the code bigger, but should make the
7225 code faster because there is one less load. This also includes incomplete
7226 types which can't go in sdata/sbss. */
7228 static bool
7229 ia64_in_small_data_p (tree exp)
7231 if (TARGET_NO_SDATA)
7232 return false;
7234 /* We want to merge strings, so we never consider them small data. */
7235 if (TREE_CODE (exp) == STRING_CST)
7236 return false;
7238 /* Functions are never small data. */
7239 if (TREE_CODE (exp) == FUNCTION_DECL)
7240 return false;
7242 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
7244 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
7245 if (strcmp (section, ".sdata") == 0
7246 || strcmp (section, ".sbss") == 0)
7247 return true;
7249 else
7251 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
7253 /* If this is an incomplete type with size 0, then we can't put it
7254 in sdata because it might be too big when completed. */
7255 if (size > 0 && size <= ia64_section_threshold)
7256 return true;
7259 return false;
7262 /* Output assembly directives for prologue regions. */
7264 /* The current basic block number. */
7266 static bool last_block;
7268 /* True if we need a copy_state command at the start of the next block. */
7270 static bool need_copy_state;
7272 /* The function emits unwind directives for the start of an epilogue. */
7274 static void
7275 process_epilogue (void)
7277 /* If this isn't the last block of the function, then we need to label the
7278 current state, and copy it back in at the start of the next block. */
7280 if (!last_block)
7282 fprintf (asm_out_file, "\t.label_state 1\n");
7283 need_copy_state = true;
7286 fprintf (asm_out_file, "\t.restore sp\n");
7289 /* This function processes a SET pattern looking for specific patterns
7290 which result in emitting an assembly directive required for unwinding. */
7292 static int
7293 process_set (FILE *asm_out_file, rtx pat)
7295 rtx src = SET_SRC (pat);
7296 rtx dest = SET_DEST (pat);
7297 int src_regno, dest_regno;
7299 /* Look for the ALLOC insn. */
7300 if (GET_CODE (src) == UNSPEC_VOLATILE
7301 && XINT (src, 1) == UNSPECV_ALLOC
7302 && GET_CODE (dest) == REG)
7304 dest_regno = REGNO (dest);
7306 /* If this is the final destination for ar.pfs, then this must
7307 be the alloc in the prologue. */
7308 if (dest_regno == current_frame_info.reg_save_ar_pfs)
7309 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
7310 ia64_dbx_register_number (dest_regno));
7311 else
7313 /* This must be an alloc before a sibcall. We must drop the
7314 old frame info. The easiest way to drop the old frame
7315 info is to ensure we had a ".restore sp" directive
7316 followed by a new prologue. If the procedure doesn't
7317 have a memory-stack frame, we'll issue a dummy ".restore
7318 sp" now. */
7319 if (current_frame_info.total_size == 0)
7320 /* if haven't done process_epilogue() yet, do it now */
7321 process_epilogue ();
7322 fprintf (asm_out_file, "\t.prologue\n");
7324 return 1;
7327 /* Look for SP = .... */
7328 if (GET_CODE (dest) == REG && REGNO (dest) == STACK_POINTER_REGNUM)
7330 if (GET_CODE (src) == PLUS)
7332 rtx op0 = XEXP (src, 0);
7333 rtx op1 = XEXP (src, 1);
7334 if (op0 == dest && GET_CODE (op1) == CONST_INT)
7336 if (INTVAL (op1) < 0)
7337 fprintf (asm_out_file, "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
7338 -INTVAL (op1));
7339 else
7340 process_epilogue ();
7342 else
7343 abort ();
7345 else if (GET_CODE (src) == REG
7346 && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
7347 process_epilogue ();
7348 else
7349 abort ();
7351 return 1;
7354 /* Register move we need to look at. */
7355 if (GET_CODE (dest) == REG && GET_CODE (src) == REG)
7357 src_regno = REGNO (src);
7358 dest_regno = REGNO (dest);
7360 switch (src_regno)
7362 case BR_REG (0):
7363 /* Saving return address pointer. */
7364 if (dest_regno != current_frame_info.reg_save_b0)
7365 abort ();
7366 fprintf (asm_out_file, "\t.save rp, r%d\n",
7367 ia64_dbx_register_number (dest_regno));
7368 return 1;
7370 case PR_REG (0):
7371 if (dest_regno != current_frame_info.reg_save_pr)
7372 abort ();
7373 fprintf (asm_out_file, "\t.save pr, r%d\n",
7374 ia64_dbx_register_number (dest_regno));
7375 return 1;
7377 case AR_UNAT_REGNUM:
7378 if (dest_regno != current_frame_info.reg_save_ar_unat)
7379 abort ();
7380 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
7381 ia64_dbx_register_number (dest_regno));
7382 return 1;
7384 case AR_LC_REGNUM:
7385 if (dest_regno != current_frame_info.reg_save_ar_lc)
7386 abort ();
7387 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
7388 ia64_dbx_register_number (dest_regno));
7389 return 1;
7391 case STACK_POINTER_REGNUM:
7392 if (dest_regno != HARD_FRAME_POINTER_REGNUM
7393 || ! frame_pointer_needed)
7394 abort ();
7395 fprintf (asm_out_file, "\t.vframe r%d\n",
7396 ia64_dbx_register_number (dest_regno));
7397 return 1;
7399 default:
7400 /* Everything else should indicate being stored to memory. */
7401 abort ();
7405 /* Memory store we need to look at. */
7406 if (GET_CODE (dest) == MEM && GET_CODE (src) == REG)
7408 long off;
7409 rtx base;
7410 const char *saveop;
7412 if (GET_CODE (XEXP (dest, 0)) == REG)
7414 base = XEXP (dest, 0);
7415 off = 0;
7417 else if (GET_CODE (XEXP (dest, 0)) == PLUS
7418 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT)
7420 base = XEXP (XEXP (dest, 0), 0);
7421 off = INTVAL (XEXP (XEXP (dest, 0), 1));
7423 else
7424 abort ();
7426 if (base == hard_frame_pointer_rtx)
7428 saveop = ".savepsp";
7429 off = - off;
7431 else if (base == stack_pointer_rtx)
7432 saveop = ".savesp";
7433 else
7434 abort ();
7436 src_regno = REGNO (src);
7437 switch (src_regno)
7439 case BR_REG (0):
7440 if (current_frame_info.reg_save_b0 != 0)
7441 abort ();
7442 fprintf (asm_out_file, "\t%s rp, %ld\n", saveop, off);
7443 return 1;
7445 case PR_REG (0):
7446 if (current_frame_info.reg_save_pr != 0)
7447 abort ();
7448 fprintf (asm_out_file, "\t%s pr, %ld\n", saveop, off);
7449 return 1;
7451 case AR_LC_REGNUM:
7452 if (current_frame_info.reg_save_ar_lc != 0)
7453 abort ();
7454 fprintf (asm_out_file, "\t%s ar.lc, %ld\n", saveop, off);
7455 return 1;
7457 case AR_PFS_REGNUM:
7458 if (current_frame_info.reg_save_ar_pfs != 0)
7459 abort ();
7460 fprintf (asm_out_file, "\t%s ar.pfs, %ld\n", saveop, off);
7461 return 1;
7463 case AR_UNAT_REGNUM:
7464 if (current_frame_info.reg_save_ar_unat != 0)
7465 abort ();
7466 fprintf (asm_out_file, "\t%s ar.unat, %ld\n", saveop, off);
7467 return 1;
7469 case GR_REG (4):
7470 case GR_REG (5):
7471 case GR_REG (6):
7472 case GR_REG (7):
7473 fprintf (asm_out_file, "\t.save.g 0x%x\n",
7474 1 << (src_regno - GR_REG (4)));
7475 return 1;
7477 case BR_REG (1):
7478 case BR_REG (2):
7479 case BR_REG (3):
7480 case BR_REG (4):
7481 case BR_REG (5):
7482 fprintf (asm_out_file, "\t.save.b 0x%x\n",
7483 1 << (src_regno - BR_REG (1)));
7484 return 1;
7486 case FR_REG (2):
7487 case FR_REG (3):
7488 case FR_REG (4):
7489 case FR_REG (5):
7490 fprintf (asm_out_file, "\t.save.f 0x%x\n",
7491 1 << (src_regno - FR_REG (2)));
7492 return 1;
7494 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
7495 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
7496 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
7497 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
7498 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
7499 1 << (src_regno - FR_REG (12)));
7500 return 1;
7502 default:
7503 return 0;
7507 return 0;
7511 /* This function looks at a single insn and emits any directives
7512 required to unwind this insn. */
7513 void
7514 process_for_unwind_directive (FILE *asm_out_file, rtx insn)
7516 if (flag_unwind_tables
7517 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
7519 rtx pat;
7521 if (GET_CODE (insn) == NOTE
7522 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
7524 last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
7526 /* Restore unwind state from immediately before the epilogue. */
7527 if (need_copy_state)
7529 fprintf (asm_out_file, "\t.body\n");
7530 fprintf (asm_out_file, "\t.copy_state 1\n");
7531 need_copy_state = false;
7535 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
7536 return;
7538 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
7539 if (pat)
7540 pat = XEXP (pat, 0);
7541 else
7542 pat = PATTERN (insn);
7544 switch (GET_CODE (pat))
7546 case SET:
7547 process_set (asm_out_file, pat);
7548 break;
7550 case PARALLEL:
7552 int par_index;
7553 int limit = XVECLEN (pat, 0);
7554 for (par_index = 0; par_index < limit; par_index++)
7556 rtx x = XVECEXP (pat, 0, par_index);
7557 if (GET_CODE (x) == SET)
7558 process_set (asm_out_file, x);
7560 break;
7563 default:
7564 abort ();
7570 void
7571 ia64_init_builtins (void)
7573 tree psi_type_node = build_pointer_type (integer_type_node);
7574 tree pdi_type_node = build_pointer_type (long_integer_type_node);
7576 /* __sync_val_compare_and_swap_si, __sync_bool_compare_and_swap_si */
7577 tree si_ftype_psi_si_si
7578 = build_function_type_list (integer_type_node,
7579 psi_type_node, integer_type_node,
7580 integer_type_node, NULL_TREE);
7582 /* __sync_val_compare_and_swap_di */
7583 tree di_ftype_pdi_di_di
7584 = build_function_type_list (long_integer_type_node,
7585 pdi_type_node, long_integer_type_node,
7586 long_integer_type_node, NULL_TREE);
7587 /* __sync_bool_compare_and_swap_di */
7588 tree si_ftype_pdi_di_di
7589 = build_function_type_list (integer_type_node,
7590 pdi_type_node, long_integer_type_node,
7591 long_integer_type_node, NULL_TREE);
7592 /* __sync_synchronize */
7593 tree void_ftype_void
7594 = build_function_type (void_type_node, void_list_node);
7596 /* __sync_lock_test_and_set_si */
7597 tree si_ftype_psi_si
7598 = build_function_type_list (integer_type_node,
7599 psi_type_node, integer_type_node, NULL_TREE);
7601 /* __sync_lock_test_and_set_di */
7602 tree di_ftype_pdi_di
7603 = build_function_type_list (long_integer_type_node,
7604 pdi_type_node, long_integer_type_node,
7605 NULL_TREE);
7607 /* __sync_lock_release_si */
7608 tree void_ftype_psi
7609 = build_function_type_list (void_type_node, psi_type_node, NULL_TREE);
7611 /* __sync_lock_release_di */
7612 tree void_ftype_pdi
7613 = build_function_type_list (void_type_node, pdi_type_node, NULL_TREE);
7615 tree fpreg_type;
7616 tree float80_type;
7618 /* The __fpreg type. */
7619 fpreg_type = make_node (REAL_TYPE);
7620 /* ??? The back end should know to load/save __fpreg variables using
7621 the ldf.fill and stf.spill instructions. */
7622 TYPE_PRECISION (fpreg_type) = 80;
7623 layout_type (fpreg_type);
7624 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
7626 /* The __float80 type. */
7627 float80_type = make_node (REAL_TYPE);
7628 TYPE_PRECISION (float80_type) = 80;
7629 layout_type (float80_type);
7630 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
7632 /* The __float128 type. */
7633 if (!TARGET_HPUX)
7635 tree float128_type = make_node (REAL_TYPE);
7636 TYPE_PRECISION (float128_type) = 128;
7637 layout_type (float128_type);
7638 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
7640 else
7641 /* Under HPUX, this is a synonym for "long double". */
7642 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
7643 "__float128");
7645 #define def_builtin(name, type, code) \
7646 lang_hooks.builtin_function ((name), (type), (code), BUILT_IN_MD, \
7647 NULL, NULL_TREE)
7649 def_builtin ("__sync_val_compare_and_swap_si", si_ftype_psi_si_si,
7650 IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI);
7651 def_builtin ("__sync_val_compare_and_swap_di", di_ftype_pdi_di_di,
7652 IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI);
7653 def_builtin ("__sync_bool_compare_and_swap_si", si_ftype_psi_si_si,
7654 IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI);
7655 def_builtin ("__sync_bool_compare_and_swap_di", si_ftype_pdi_di_di,
7656 IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI);
7658 def_builtin ("__sync_synchronize", void_ftype_void,
7659 IA64_BUILTIN_SYNCHRONIZE);
7661 def_builtin ("__sync_lock_test_and_set_si", si_ftype_psi_si,
7662 IA64_BUILTIN_LOCK_TEST_AND_SET_SI);
7663 def_builtin ("__sync_lock_test_and_set_di", di_ftype_pdi_di,
7664 IA64_BUILTIN_LOCK_TEST_AND_SET_DI);
7665 def_builtin ("__sync_lock_release_si", void_ftype_psi,
7666 IA64_BUILTIN_LOCK_RELEASE_SI);
7667 def_builtin ("__sync_lock_release_di", void_ftype_pdi,
7668 IA64_BUILTIN_LOCK_RELEASE_DI);
7670 def_builtin ("__builtin_ia64_bsp",
7671 build_function_type (ptr_type_node, void_list_node),
7672 IA64_BUILTIN_BSP);
7674 def_builtin ("__builtin_ia64_flushrs",
7675 build_function_type (void_type_node, void_list_node),
7676 IA64_BUILTIN_FLUSHRS);
7678 def_builtin ("__sync_fetch_and_add_si", si_ftype_psi_si,
7679 IA64_BUILTIN_FETCH_AND_ADD_SI);
7680 def_builtin ("__sync_fetch_and_sub_si", si_ftype_psi_si,
7681 IA64_BUILTIN_FETCH_AND_SUB_SI);
7682 def_builtin ("__sync_fetch_and_or_si", si_ftype_psi_si,
7683 IA64_BUILTIN_FETCH_AND_OR_SI);
7684 def_builtin ("__sync_fetch_and_and_si", si_ftype_psi_si,
7685 IA64_BUILTIN_FETCH_AND_AND_SI);
7686 def_builtin ("__sync_fetch_and_xor_si", si_ftype_psi_si,
7687 IA64_BUILTIN_FETCH_AND_XOR_SI);
7688 def_builtin ("__sync_fetch_and_nand_si", si_ftype_psi_si,
7689 IA64_BUILTIN_FETCH_AND_NAND_SI);
7691 def_builtin ("__sync_add_and_fetch_si", si_ftype_psi_si,
7692 IA64_BUILTIN_ADD_AND_FETCH_SI);
7693 def_builtin ("__sync_sub_and_fetch_si", si_ftype_psi_si,
7694 IA64_BUILTIN_SUB_AND_FETCH_SI);
7695 def_builtin ("__sync_or_and_fetch_si", si_ftype_psi_si,
7696 IA64_BUILTIN_OR_AND_FETCH_SI);
7697 def_builtin ("__sync_and_and_fetch_si", si_ftype_psi_si,
7698 IA64_BUILTIN_AND_AND_FETCH_SI);
7699 def_builtin ("__sync_xor_and_fetch_si", si_ftype_psi_si,
7700 IA64_BUILTIN_XOR_AND_FETCH_SI);
7701 def_builtin ("__sync_nand_and_fetch_si", si_ftype_psi_si,
7702 IA64_BUILTIN_NAND_AND_FETCH_SI);
7704 def_builtin ("__sync_fetch_and_add_di", di_ftype_pdi_di,
7705 IA64_BUILTIN_FETCH_AND_ADD_DI);
7706 def_builtin ("__sync_fetch_and_sub_di", di_ftype_pdi_di,
7707 IA64_BUILTIN_FETCH_AND_SUB_DI);
7708 def_builtin ("__sync_fetch_and_or_di", di_ftype_pdi_di,
7709 IA64_BUILTIN_FETCH_AND_OR_DI);
7710 def_builtin ("__sync_fetch_and_and_di", di_ftype_pdi_di,
7711 IA64_BUILTIN_FETCH_AND_AND_DI);
7712 def_builtin ("__sync_fetch_and_xor_di", di_ftype_pdi_di,
7713 IA64_BUILTIN_FETCH_AND_XOR_DI);
7714 def_builtin ("__sync_fetch_and_nand_di", di_ftype_pdi_di,
7715 IA64_BUILTIN_FETCH_AND_NAND_DI);
7717 def_builtin ("__sync_add_and_fetch_di", di_ftype_pdi_di,
7718 IA64_BUILTIN_ADD_AND_FETCH_DI);
7719 def_builtin ("__sync_sub_and_fetch_di", di_ftype_pdi_di,
7720 IA64_BUILTIN_SUB_AND_FETCH_DI);
7721 def_builtin ("__sync_or_and_fetch_di", di_ftype_pdi_di,
7722 IA64_BUILTIN_OR_AND_FETCH_DI);
7723 def_builtin ("__sync_and_and_fetch_di", di_ftype_pdi_di,
7724 IA64_BUILTIN_AND_AND_FETCH_DI);
7725 def_builtin ("__sync_xor_and_fetch_di", di_ftype_pdi_di,
7726 IA64_BUILTIN_XOR_AND_FETCH_DI);
7727 def_builtin ("__sync_nand_and_fetch_di", di_ftype_pdi_di,
7728 IA64_BUILTIN_NAND_AND_FETCH_DI);
7730 #undef def_builtin
7733 /* Expand fetch_and_op intrinsics. The basic code sequence is:
7736 tmp = [ptr];
7737 do {
7738 ret = tmp;
7739 ar.ccv = tmp;
7740 tmp <op>= value;
7741 cmpxchgsz.acq tmp = [ptr], tmp
7742 } while (tmp != ret)
7745 static rtx
7746 ia64_expand_fetch_and_op (optab binoptab, enum machine_mode mode,
7747 tree arglist, rtx target)
7749 rtx ret, label, tmp, ccv, insn, mem, value;
7750 tree arg0, arg1;
7752 arg0 = TREE_VALUE (arglist);
7753 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
7754 mem = expand_expr (arg0, NULL_RTX, Pmode, 0);
7755 #ifdef POINTERS_EXTEND_UNSIGNED
7756 if (GET_MODE(mem) != Pmode)
7757 mem = convert_memory_address (Pmode, mem);
7758 #endif
7759 value = expand_expr (arg1, NULL_RTX, mode, 0);
7761 mem = gen_rtx_MEM (mode, force_reg (Pmode, mem));
7762 MEM_VOLATILE_P (mem) = 1;
7764 if (target && register_operand (target, mode))
7765 ret = target;
7766 else
7767 ret = gen_reg_rtx (mode);
7769 emit_insn (gen_mf ());
7771 /* Special case for fetchadd instructions. */
7772 if (binoptab == add_optab && fetchadd_operand (value, VOIDmode))
7774 if (mode == SImode)
7775 insn = gen_fetchadd_acq_si (ret, mem, value);
7776 else
7777 insn = gen_fetchadd_acq_di (ret, mem, value);
7778 emit_insn (insn);
7779 return ret;
7782 tmp = gen_reg_rtx (mode);
7783 /* ar.ccv must always be loaded with a zero-extended DImode value. */
7784 ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
7785 emit_move_insn (tmp, mem);
7787 label = gen_label_rtx ();
7788 emit_label (label);
7789 emit_move_insn (ret, tmp);
7790 convert_move (ccv, tmp, /*unsignedp=*/1);
7792 /* Perform the specific operation. Special case NAND by noticing
7793 one_cmpl_optab instead. */
7794 if (binoptab == one_cmpl_optab)
7796 tmp = expand_unop (mode, binoptab, tmp, NULL, OPTAB_WIDEN);
7797 binoptab = and_optab;
7799 tmp = expand_binop (mode, binoptab, tmp, value, tmp, 1, OPTAB_WIDEN);
7801 if (mode == SImode)
7802 insn = gen_cmpxchg_acq_si (tmp, mem, tmp, ccv);
7803 else
7804 insn = gen_cmpxchg_acq_di (tmp, mem, tmp, ccv);
7805 emit_insn (insn);
7807 emit_cmp_and_jump_insns (tmp, ret, NE, 0, mode, 1, label);
7809 return ret;
7812 /* Expand op_and_fetch intrinsics. The basic code sequence is:
7815 tmp = [ptr];
7816 do {
7817 old = tmp;
7818 ar.ccv = tmp;
7819 ret = tmp <op> value;
7820 cmpxchgsz.acq tmp = [ptr], ret
7821 } while (tmp != old)
7824 static rtx
7825 ia64_expand_op_and_fetch (optab binoptab, enum machine_mode mode,
7826 tree arglist, rtx target)
7828 rtx old, label, tmp, ret, ccv, insn, mem, value;
7829 tree arg0, arg1;
7831 arg0 = TREE_VALUE (arglist);
7832 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
7833 mem = expand_expr (arg0, NULL_RTX, Pmode, 0);
7834 #ifdef POINTERS_EXTEND_UNSIGNED
7835 if (GET_MODE(mem) != Pmode)
7836 mem = convert_memory_address (Pmode, mem);
7837 #endif
7839 value = expand_expr (arg1, NULL_RTX, mode, 0);
7841 mem = gen_rtx_MEM (mode, force_reg (Pmode, mem));
7842 MEM_VOLATILE_P (mem) = 1;
7844 if (target && ! register_operand (target, mode))
7845 target = NULL_RTX;
7847 emit_insn (gen_mf ());
7848 tmp = gen_reg_rtx (mode);
7849 old = gen_reg_rtx (mode);
7850 /* ar.ccv must always be loaded with a zero-extended DImode value. */
7851 ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
7853 emit_move_insn (tmp, mem);
7855 label = gen_label_rtx ();
7856 emit_label (label);
7857 emit_move_insn (old, tmp);
7858 convert_move (ccv, tmp, /*unsignedp=*/1);
7860 /* Perform the specific operation. Special case NAND by noticing
7861 one_cmpl_optab instead. */
7862 if (binoptab == one_cmpl_optab)
7864 tmp = expand_unop (mode, binoptab, tmp, NULL, OPTAB_WIDEN);
7865 binoptab = and_optab;
7867 ret = expand_binop (mode, binoptab, tmp, value, target, 1, OPTAB_WIDEN);
7869 if (mode == SImode)
7870 insn = gen_cmpxchg_acq_si (tmp, mem, ret, ccv);
7871 else
7872 insn = gen_cmpxchg_acq_di (tmp, mem, ret, ccv);
7873 emit_insn (insn);
7875 emit_cmp_and_jump_insns (tmp, old, NE, 0, mode, 1, label);
7877 return ret;
7880 /* Expand val_ and bool_compare_and_swap. For val_ we want:
7882 ar.ccv = oldval
7884 cmpxchgsz.acq ret = [ptr], newval, ar.ccv
7885 return ret
7887 For bool_ it's the same except return ret == oldval.
7890 static rtx
7891 ia64_expand_compare_and_swap (enum machine_mode rmode, enum machine_mode mode,
7892 int boolp, tree arglist, rtx target)
7894 tree arg0, arg1, arg2;
7895 rtx mem, old, new, ccv, tmp, insn;
7897 arg0 = TREE_VALUE (arglist);
7898 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
7899 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
7900 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
7901 old = expand_expr (arg1, NULL_RTX, mode, 0);
7902 new = expand_expr (arg2, NULL_RTX, mode, 0);
7904 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
7905 MEM_VOLATILE_P (mem) = 1;
7907 if (GET_MODE (old) != mode)
7908 old = convert_to_mode (mode, old, /*unsignedp=*/1);
7909 if (GET_MODE (new) != mode)
7910 new = convert_to_mode (mode, new, /*unsignedp=*/1);
7912 if (! register_operand (old, mode))
7913 old = copy_to_mode_reg (mode, old);
7914 if (! register_operand (new, mode))
7915 new = copy_to_mode_reg (mode, new);
7917 if (! boolp && target && register_operand (target, mode))
7918 tmp = target;
7919 else
7920 tmp = gen_reg_rtx (mode);
7922 ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
7923 convert_move (ccv, old, /*unsignedp=*/1);
7924 emit_insn (gen_mf ());
7925 if (mode == SImode)
7926 insn = gen_cmpxchg_acq_si (tmp, mem, new, ccv);
7927 else
7928 insn = gen_cmpxchg_acq_di (tmp, mem, new, ccv);
7929 emit_insn (insn);
7931 if (boolp)
7933 if (! target)
7934 target = gen_reg_rtx (rmode);
7935 return emit_store_flag_force (target, EQ, tmp, old, mode, 1, 1);
7937 else
7938 return tmp;
7941 /* Expand lock_test_and_set. I.e. `xchgsz ret = [ptr], new'. */
7943 static rtx
7944 ia64_expand_lock_test_and_set (enum machine_mode mode, tree arglist,
7945 rtx target)
7947 tree arg0, arg1;
7948 rtx mem, new, ret, insn;
7950 arg0 = TREE_VALUE (arglist);
7951 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
7952 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
7953 new = expand_expr (arg1, NULL_RTX, mode, 0);
7955 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
7956 MEM_VOLATILE_P (mem) = 1;
7957 if (! register_operand (new, mode))
7958 new = copy_to_mode_reg (mode, new);
7960 if (target && register_operand (target, mode))
7961 ret = target;
7962 else
7963 ret = gen_reg_rtx (mode);
7965 if (mode == SImode)
7966 insn = gen_xchgsi (ret, mem, new);
7967 else
7968 insn = gen_xchgdi (ret, mem, new);
7969 emit_insn (insn);
7971 return ret;
7974 /* Expand lock_release. I.e. `stsz.rel [ptr] = r0'. */
7976 static rtx
7977 ia64_expand_lock_release (enum machine_mode mode, tree arglist,
7978 rtx target ATTRIBUTE_UNUSED)
7980 tree arg0;
7981 rtx mem;
7983 arg0 = TREE_VALUE (arglist);
7984 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
7986 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
7987 MEM_VOLATILE_P (mem) = 1;
7989 emit_move_insn (mem, const0_rtx);
7991 return const0_rtx;
7995 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
7996 enum machine_mode mode ATTRIBUTE_UNUSED,
7997 int ignore ATTRIBUTE_UNUSED)
7999 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
8000 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
8001 tree arglist = TREE_OPERAND (exp, 1);
8002 enum machine_mode rmode = VOIDmode;
8004 switch (fcode)
8006 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI:
8007 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI:
8008 mode = SImode;
8009 rmode = SImode;
8010 break;
8012 case IA64_BUILTIN_LOCK_TEST_AND_SET_SI:
8013 case IA64_BUILTIN_LOCK_RELEASE_SI:
8014 case IA64_BUILTIN_FETCH_AND_ADD_SI:
8015 case IA64_BUILTIN_FETCH_AND_SUB_SI:
8016 case IA64_BUILTIN_FETCH_AND_OR_SI:
8017 case IA64_BUILTIN_FETCH_AND_AND_SI:
8018 case IA64_BUILTIN_FETCH_AND_XOR_SI:
8019 case IA64_BUILTIN_FETCH_AND_NAND_SI:
8020 case IA64_BUILTIN_ADD_AND_FETCH_SI:
8021 case IA64_BUILTIN_SUB_AND_FETCH_SI:
8022 case IA64_BUILTIN_OR_AND_FETCH_SI:
8023 case IA64_BUILTIN_AND_AND_FETCH_SI:
8024 case IA64_BUILTIN_XOR_AND_FETCH_SI:
8025 case IA64_BUILTIN_NAND_AND_FETCH_SI:
8026 mode = SImode;
8027 break;
8029 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI:
8030 mode = DImode;
8031 rmode = SImode;
8032 break;
8034 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI:
8035 mode = DImode;
8036 rmode = DImode;
8037 break;
8039 case IA64_BUILTIN_LOCK_TEST_AND_SET_DI:
8040 case IA64_BUILTIN_LOCK_RELEASE_DI:
8041 case IA64_BUILTIN_FETCH_AND_ADD_DI:
8042 case IA64_BUILTIN_FETCH_AND_SUB_DI:
8043 case IA64_BUILTIN_FETCH_AND_OR_DI:
8044 case IA64_BUILTIN_FETCH_AND_AND_DI:
8045 case IA64_BUILTIN_FETCH_AND_XOR_DI:
8046 case IA64_BUILTIN_FETCH_AND_NAND_DI:
8047 case IA64_BUILTIN_ADD_AND_FETCH_DI:
8048 case IA64_BUILTIN_SUB_AND_FETCH_DI:
8049 case IA64_BUILTIN_OR_AND_FETCH_DI:
8050 case IA64_BUILTIN_AND_AND_FETCH_DI:
8051 case IA64_BUILTIN_XOR_AND_FETCH_DI:
8052 case IA64_BUILTIN_NAND_AND_FETCH_DI:
8053 mode = DImode;
8054 break;
8056 default:
8057 break;
8060 switch (fcode)
8062 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI:
8063 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI:
8064 return ia64_expand_compare_and_swap (rmode, mode, 1, arglist,
8065 target);
8067 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI:
8068 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI:
8069 return ia64_expand_compare_and_swap (rmode, mode, 0, arglist,
8070 target);
8072 case IA64_BUILTIN_SYNCHRONIZE:
8073 emit_insn (gen_mf ());
8074 return const0_rtx;
8076 case IA64_BUILTIN_LOCK_TEST_AND_SET_SI:
8077 case IA64_BUILTIN_LOCK_TEST_AND_SET_DI:
8078 return ia64_expand_lock_test_and_set (mode, arglist, target);
8080 case IA64_BUILTIN_LOCK_RELEASE_SI:
8081 case IA64_BUILTIN_LOCK_RELEASE_DI:
8082 return ia64_expand_lock_release (mode, arglist, target);
8084 case IA64_BUILTIN_BSP:
8085 if (! target || ! register_operand (target, DImode))
8086 target = gen_reg_rtx (DImode);
8087 emit_insn (gen_bsp_value (target));
8088 #ifdef POINTERS_EXTEND_UNSIGNED
8089 target = convert_memory_address (ptr_mode, target);
8090 #endif
8091 return target;
8093 case IA64_BUILTIN_FLUSHRS:
8094 emit_insn (gen_flushrs ());
8095 return const0_rtx;
8097 case IA64_BUILTIN_FETCH_AND_ADD_SI:
8098 case IA64_BUILTIN_FETCH_AND_ADD_DI:
8099 return ia64_expand_fetch_and_op (add_optab, mode, arglist, target);
8101 case IA64_BUILTIN_FETCH_AND_SUB_SI:
8102 case IA64_BUILTIN_FETCH_AND_SUB_DI:
8103 return ia64_expand_fetch_and_op (sub_optab, mode, arglist, target);
8105 case IA64_BUILTIN_FETCH_AND_OR_SI:
8106 case IA64_BUILTIN_FETCH_AND_OR_DI:
8107 return ia64_expand_fetch_and_op (ior_optab, mode, arglist, target);
8109 case IA64_BUILTIN_FETCH_AND_AND_SI:
8110 case IA64_BUILTIN_FETCH_AND_AND_DI:
8111 return ia64_expand_fetch_and_op (and_optab, mode, arglist, target);
8113 case IA64_BUILTIN_FETCH_AND_XOR_SI:
8114 case IA64_BUILTIN_FETCH_AND_XOR_DI:
8115 return ia64_expand_fetch_and_op (xor_optab, mode, arglist, target);
8117 case IA64_BUILTIN_FETCH_AND_NAND_SI:
8118 case IA64_BUILTIN_FETCH_AND_NAND_DI:
8119 return ia64_expand_fetch_and_op (one_cmpl_optab, mode, arglist, target);
8121 case IA64_BUILTIN_ADD_AND_FETCH_SI:
8122 case IA64_BUILTIN_ADD_AND_FETCH_DI:
8123 return ia64_expand_op_and_fetch (add_optab, mode, arglist, target);
8125 case IA64_BUILTIN_SUB_AND_FETCH_SI:
8126 case IA64_BUILTIN_SUB_AND_FETCH_DI:
8127 return ia64_expand_op_and_fetch (sub_optab, mode, arglist, target);
8129 case IA64_BUILTIN_OR_AND_FETCH_SI:
8130 case IA64_BUILTIN_OR_AND_FETCH_DI:
8131 return ia64_expand_op_and_fetch (ior_optab, mode, arglist, target);
8133 case IA64_BUILTIN_AND_AND_FETCH_SI:
8134 case IA64_BUILTIN_AND_AND_FETCH_DI:
8135 return ia64_expand_op_and_fetch (and_optab, mode, arglist, target);
8137 case IA64_BUILTIN_XOR_AND_FETCH_SI:
8138 case IA64_BUILTIN_XOR_AND_FETCH_DI:
8139 return ia64_expand_op_and_fetch (xor_optab, mode, arglist, target);
8141 case IA64_BUILTIN_NAND_AND_FETCH_SI:
8142 case IA64_BUILTIN_NAND_AND_FETCH_DI:
8143 return ia64_expand_op_and_fetch (one_cmpl_optab, mode, arglist, target);
8145 default:
8146 break;
8149 return NULL_RTX;
8152 /* For the HP-UX IA64 aggregate parameters are passed stored in the
8153 most significant bits of the stack slot. */
8155 enum direction
8156 ia64_hpux_function_arg_padding (enum machine_mode mode, tree type)
8158 /* Exception to normal case for structures/unions/etc. */
8160 if (type && AGGREGATE_TYPE_P (type)
8161 && int_size_in_bytes (type) < UNITS_PER_WORD)
8162 return upward;
8164 /* Fall back to the default. */
8165 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
8168 /* Linked list of all external functions that are to be emitted by GCC.
8169 We output the name if and only if TREE_SYMBOL_REFERENCED is set in
8170 order to avoid putting out names that are never really used. */
8172 struct extern_func_list GTY(())
8174 struct extern_func_list *next;
8175 tree decl;
8178 static GTY(()) struct extern_func_list *extern_func_head;
8180 static void
8181 ia64_hpux_add_extern_decl (tree decl)
8183 struct extern_func_list *p = ggc_alloc (sizeof (struct extern_func_list));
8185 p->decl = decl;
8186 p->next = extern_func_head;
8187 extern_func_head = p;
8190 /* Print out the list of used global functions. */
8192 static void
8193 ia64_hpux_file_end (void)
8195 struct extern_func_list *p;
8197 for (p = extern_func_head; p; p = p->next)
8199 tree decl = p->decl;
8200 tree id = DECL_ASSEMBLER_NAME (decl);
8202 if (!id)
8203 abort ();
8205 if (!TREE_ASM_WRITTEN (decl) && TREE_SYMBOL_REFERENCED (id))
8207 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
8209 TREE_ASM_WRITTEN (decl) = 1;
8210 (*targetm.asm_out.globalize_label) (asm_out_file, name);
8211 fputs (TYPE_ASM_OP, asm_out_file);
8212 assemble_name (asm_out_file, name);
8213 fprintf (asm_out_file, "," TYPE_OPERAND_FMT "\n", "function");
8217 extern_func_head = 0;
8220 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
8221 modes of word_mode and larger. Rename the TFmode libfuncs using the
8222 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
8223 backward compatibility. */
8225 static void
8226 ia64_init_libfuncs (void)
8228 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
8229 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
8230 set_optab_libfunc (smod_optab, SImode, "__modsi3");
8231 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
8233 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
8234 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
8235 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
8236 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
8237 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
8239 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
8240 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
8241 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
8242 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
8243 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
8244 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
8246 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
8247 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
8248 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
8249 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
8251 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
8252 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
8255 /* Rename all the TFmode libfuncs using the HPUX conventions. */
8257 static void
8258 ia64_hpux_init_libfuncs (void)
8260 ia64_init_libfuncs ();
8262 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
8263 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
8264 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
8266 /* ia64_expand_compare uses this. */
8267 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
8269 /* These should never be used. */
8270 set_optab_libfunc (eq_optab, TFmode, 0);
8271 set_optab_libfunc (ne_optab, TFmode, 0);
8272 set_optab_libfunc (gt_optab, TFmode, 0);
8273 set_optab_libfunc (ge_optab, TFmode, 0);
8274 set_optab_libfunc (lt_optab, TFmode, 0);
8275 set_optab_libfunc (le_optab, TFmode, 0);
8278 /* Rename the division and modulus functions in VMS. */
8280 static void
8281 ia64_vms_init_libfuncs (void)
8283 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
8284 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
8285 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
8286 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
8287 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
8288 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
8289 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
8290 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
8293 /* Rename the TFmode libfuncs available from soft-fp in glibc using
8294 the HPUX conventions. */
8296 static void
8297 ia64_sysv4_init_libfuncs (void)
8299 ia64_init_libfuncs ();
8301 /* These functions are not part of the HPUX TFmode interface. We
8302 use them instead of _U_Qfcmp, which doesn't work the way we
8303 expect. */
8304 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
8305 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
8306 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
8307 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
8308 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
8309 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
8311 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
8312 glibc doesn't have them. */
8315 /* Switch to the section to which we should output X. The only thing
8316 special we do here is to honor small data. */
8318 static void
8319 ia64_select_rtx_section (enum machine_mode mode, rtx x,
8320 unsigned HOST_WIDE_INT align)
8322 if (GET_MODE_SIZE (mode) > 0
8323 && GET_MODE_SIZE (mode) <= ia64_section_threshold)
8324 sdata_section ();
8325 else
8326 default_elf_select_rtx_section (mode, x, align);
8329 /* It is illegal to have relocations in shared segments on AIX and HPUX.
8330 Pretend flag_pic is always set. */
8332 static void
8333 ia64_rwreloc_select_section (tree exp, int reloc, unsigned HOST_WIDE_INT align)
8335 default_elf_select_section_1 (exp, reloc, align, true);
8338 static void
8339 ia64_rwreloc_unique_section (tree decl, int reloc)
8341 default_unique_section_1 (decl, reloc, true);
8344 static void
8345 ia64_rwreloc_select_rtx_section (enum machine_mode mode, rtx x,
8346 unsigned HOST_WIDE_INT align)
8348 int save_pic = flag_pic;
8349 flag_pic = 1;
8350 ia64_select_rtx_section (mode, x, align);
8351 flag_pic = save_pic;
8354 static unsigned int
8355 ia64_rwreloc_section_type_flags (tree decl, const char *name, int reloc)
8357 return default_section_type_flags_1 (decl, name, reloc, true);
8360 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
8361 structure type and that the address of that type should be passed
8362 in out0, rather than in r8. */
8364 static bool
8365 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
8367 tree ret_type = TREE_TYPE (fntype);
8369 /* The Itanium C++ ABI requires that out0, rather than r8, be used
8370 as the structure return address parameter, if the return value
8371 type has a non-trivial copy constructor or destructor. It is not
8372 clear if this same convention should be used for other
8373 programming languages. Until G++ 3.4, we incorrectly used r8 for
8374 these return values. */
8375 return (abi_version_at_least (2)
8376 && ret_type
8377 && TYPE_MODE (ret_type) == BLKmode
8378 && TREE_ADDRESSABLE (ret_type)
8379 && strcmp (lang_hooks.name, "GNU C++") == 0);
8382 /* Output the assembler code for a thunk function. THUNK_DECL is the
8383 declaration for the thunk function itself, FUNCTION is the decl for
8384 the target function. DELTA is an immediate constant offset to be
8385 added to THIS. If VCALL_OFFSET is nonzero, the word at
8386 *(*this + vcall_offset) should be added to THIS. */
8388 static void
8389 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
8390 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8391 tree function)
8393 rtx this, insn, funexp;
8394 unsigned int this_parmno;
8395 unsigned int this_regno;
8397 reload_completed = 1;
8398 epilogue_completed = 1;
8399 no_new_pseudos = 1;
8400 reset_block_changes ();
8402 /* Set things up as ia64_expand_prologue might. */
8403 last_scratch_gr_reg = 15;
8405 memset (&current_frame_info, 0, sizeof (current_frame_info));
8406 current_frame_info.spill_cfa_off = -16;
8407 current_frame_info.n_input_regs = 1;
8408 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
8410 /* Mark the end of the (empty) prologue. */
8411 emit_note (NOTE_INSN_PROLOGUE_END);
8413 /* Figure out whether "this" will be the first parameter (the
8414 typical case) or the second parameter (as happens when the
8415 virtual function returns certain class objects). */
8416 this_parmno
8417 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
8418 ? 1 : 0);
8419 this_regno = IN_REG (this_parmno);
8420 if (!TARGET_REG_NAMES)
8421 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
8423 this = gen_rtx_REG (Pmode, this_regno);
8424 if (TARGET_ILP32)
8426 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
8427 REG_POINTER (tmp) = 1;
8428 if (delta && CONST_OK_FOR_I (delta))
8430 emit_insn (gen_ptr_extend_plus_imm (this, tmp, GEN_INT (delta)));
8431 delta = 0;
8433 else
8434 emit_insn (gen_ptr_extend (this, tmp));
8437 /* Apply the constant offset, if required. */
8438 if (delta)
8440 rtx delta_rtx = GEN_INT (delta);
8442 if (!CONST_OK_FOR_I (delta))
8444 rtx tmp = gen_rtx_REG (Pmode, 2);
8445 emit_move_insn (tmp, delta_rtx);
8446 delta_rtx = tmp;
8448 emit_insn (gen_adddi3 (this, this, delta_rtx));
8451 /* Apply the offset from the vtable, if required. */
8452 if (vcall_offset)
8454 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8455 rtx tmp = gen_rtx_REG (Pmode, 2);
8457 if (TARGET_ILP32)
8459 rtx t = gen_rtx_REG (ptr_mode, 2);
8460 REG_POINTER (t) = 1;
8461 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this));
8462 if (CONST_OK_FOR_I (vcall_offset))
8464 emit_insn (gen_ptr_extend_plus_imm (tmp, t,
8465 vcall_offset_rtx));
8466 vcall_offset = 0;
8468 else
8469 emit_insn (gen_ptr_extend (tmp, t));
8471 else
8472 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8474 if (vcall_offset)
8476 if (!CONST_OK_FOR_J (vcall_offset))
8478 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
8479 emit_move_insn (tmp2, vcall_offset_rtx);
8480 vcall_offset_rtx = tmp2;
8482 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
8485 if (TARGET_ILP32)
8486 emit_move_insn (gen_rtx_REG (ptr_mode, 2),
8487 gen_rtx_MEM (ptr_mode, tmp));
8488 else
8489 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
8491 emit_insn (gen_adddi3 (this, this, tmp));
8494 /* Generate a tail call to the target function. */
8495 if (! TREE_USED (function))
8497 assemble_external (function);
8498 TREE_USED (function) = 1;
8500 funexp = XEXP (DECL_RTL (function), 0);
8501 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8502 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
8503 insn = get_last_insn ();
8504 SIBLING_CALL_P (insn) = 1;
8506 /* Code generation for calls relies on splitting. */
8507 reload_completed = 1;
8508 epilogue_completed = 1;
8509 try_split (PATTERN (insn), insn, 0);
8511 emit_barrier ();
8513 /* Run just enough of rest_of_compilation to get the insns emitted.
8514 There's not really enough bulk here to make other passes such as
8515 instruction scheduling worth while. Note that use_thunk calls
8516 assemble_start_function and assemble_end_function. */
8518 insn_locators_initialize ();
8519 emit_all_insn_group_barriers (NULL);
8520 insn = get_insns ();
8521 shorten_branches (insn);
8522 final_start_function (insn, file, 1);
8523 final (insn, file, 1, 0);
8524 final_end_function ();
8526 reload_completed = 0;
8527 epilogue_completed = 0;
8528 no_new_pseudos = 0;
8531 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
8533 static rtx
8534 ia64_struct_value_rtx (tree fntype,
8535 int incoming ATTRIBUTE_UNUSED)
8537 if (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype))
8538 return NULL_RTX;
8539 return gen_rtx_REG (Pmode, GR_REG (8));
8542 static bool
8543 ia64_scalar_mode_supported_p (enum machine_mode mode)
8545 switch (mode)
8547 case QImode:
8548 case HImode:
8549 case SImode:
8550 case DImode:
8551 case TImode:
8552 return true;
8554 case SFmode:
8555 case DFmode:
8556 case XFmode:
8557 return true;
8559 case TFmode:
8560 return TARGET_HPUX;
8562 default:
8563 return false;
8567 #include "gt-ia64.h"