* config/alpha/alpha.c (alpha_start_function): Use switch_to_section.
[official-gcc.git] / gcc / config / ia64 / ia64.c
blob2da3383d0f8190a00d349d1214feeda94fbc989c
1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005
3 Free Software Foundation, Inc.
4 Contributed by James E. Wilson <wilson@cygnus.com> and
5 David Mosberger <davidm@hpl.hp.com>.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
12 any later version.
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to
21 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
22 Boston, MA 02110-1301, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "recog.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "except.h"
42 #include "function.h"
43 #include "ggc.h"
44 #include "basic-block.h"
45 #include "toplev.h"
46 #include "sched-int.h"
47 #include "timevar.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "tm_p.h"
51 #include "hashtab.h"
52 #include "langhooks.h"
53 #include "cfglayout.h"
54 #include "tree-gimple.h"
55 #include "intl.h"
57 /* This is used for communication between ASM_OUTPUT_LABEL and
58 ASM_OUTPUT_LABELREF. */
59 int ia64_asm_output_label = 0;
61 /* Define the information needed to generate branch and scc insns. This is
62 stored from the compare operation. */
63 struct rtx_def * ia64_compare_op0;
64 struct rtx_def * ia64_compare_op1;
66 /* Register names for ia64_expand_prologue. */
67 static const char * const ia64_reg_numbers[96] =
68 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
69 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
70 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
71 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
72 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
73 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
74 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
75 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
76 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
77 "r104","r105","r106","r107","r108","r109","r110","r111",
78 "r112","r113","r114","r115","r116","r117","r118","r119",
79 "r120","r121","r122","r123","r124","r125","r126","r127"};
81 /* ??? These strings could be shared with REGISTER_NAMES. */
82 static const char * const ia64_input_reg_names[8] =
83 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
85 /* ??? These strings could be shared with REGISTER_NAMES. */
86 static const char * const ia64_local_reg_names[80] =
87 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
88 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
89 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
90 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
91 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
92 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
93 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
94 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
95 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
96 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
98 /* ??? These strings could be shared with REGISTER_NAMES. */
99 static const char * const ia64_output_reg_names[8] =
100 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
102 /* Which cpu are we scheduling for. */
103 enum processor_type ia64_tune = PROCESSOR_ITANIUM2;
105 /* Determines whether we run our final scheduling pass or not. We always
106 avoid the normal second scheduling pass. */
107 static int ia64_flag_schedule_insns2;
109 /* Determines whether we run variable tracking in machine dependent
110 reorganization. */
111 static int ia64_flag_var_tracking;
113 /* Variables which are this size or smaller are put in the sdata/sbss
114 sections. */
116 unsigned int ia64_section_threshold;
118 /* The following variable is used by the DFA insn scheduler. The value is
119 TRUE if we do insn bundling instead of insn scheduling. */
120 int bundling_p = 0;
122 /* Structure to be filled in by ia64_compute_frame_size with register
123 save masks and offsets for the current function. */
125 struct ia64_frame_info
127 HOST_WIDE_INT total_size; /* size of the stack frame, not including
128 the caller's scratch area. */
129 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
130 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
131 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
132 HARD_REG_SET mask; /* mask of saved registers. */
133 unsigned int gr_used_mask; /* mask of registers in use as gr spill
134 registers or long-term scratches. */
135 int n_spilled; /* number of spilled registers. */
136 int reg_fp; /* register for fp. */
137 int reg_save_b0; /* save register for b0. */
138 int reg_save_pr; /* save register for prs. */
139 int reg_save_ar_pfs; /* save register for ar.pfs. */
140 int reg_save_ar_unat; /* save register for ar.unat. */
141 int reg_save_ar_lc; /* save register for ar.lc. */
142 int reg_save_gp; /* save register for gp. */
143 int n_input_regs; /* number of input registers used. */
144 int n_local_regs; /* number of local registers used. */
145 int n_output_regs; /* number of output registers used. */
146 int n_rotate_regs; /* number of rotating registers used. */
148 char need_regstk; /* true if a .regstk directive needed. */
149 char initialized; /* true if the data is finalized. */
152 /* Current frame information calculated by ia64_compute_frame_size. */
153 static struct ia64_frame_info current_frame_info;
155 static int ia64_first_cycle_multipass_dfa_lookahead (void);
156 static void ia64_dependencies_evaluation_hook (rtx, rtx);
157 static void ia64_init_dfa_pre_cycle_insn (void);
158 static rtx ia64_dfa_pre_cycle_insn (void);
159 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
160 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
161 static rtx gen_tls_get_addr (void);
162 static rtx gen_thread_pointer (void);
163 static int find_gr_spill (int);
164 static int next_scratch_gr_reg (void);
165 static void mark_reg_gr_used_mask (rtx, void *);
166 static void ia64_compute_frame_size (HOST_WIDE_INT);
167 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
168 static void finish_spill_pointers (void);
169 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
170 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
171 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
172 static rtx gen_movdi_x (rtx, rtx, rtx);
173 static rtx gen_fr_spill_x (rtx, rtx, rtx);
174 static rtx gen_fr_restore_x (rtx, rtx, rtx);
176 static enum machine_mode hfa_element_mode (tree, bool);
177 static void ia64_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
178 tree, int *, int);
179 static int ia64_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
180 tree, bool);
181 static bool ia64_function_ok_for_sibcall (tree, tree);
182 static bool ia64_return_in_memory (tree, tree);
183 static bool ia64_rtx_costs (rtx, int, int, int *);
184 static void fix_range (const char *);
185 static bool ia64_handle_option (size_t, const char *, int);
186 static struct machine_function * ia64_init_machine_status (void);
187 static void emit_insn_group_barriers (FILE *);
188 static void emit_all_insn_group_barriers (FILE *);
189 static void final_emit_insn_group_barriers (FILE *);
190 static void emit_predicate_relation_info (void);
191 static void ia64_reorg (void);
192 static bool ia64_in_small_data_p (tree);
193 static void process_epilogue (void);
194 static int process_set (FILE *, rtx);
196 static bool ia64_assemble_integer (rtx, unsigned int, int);
197 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
198 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
199 static void ia64_output_function_end_prologue (FILE *);
201 static int ia64_issue_rate (void);
202 static int ia64_adjust_cost (rtx, rtx, rtx, int);
203 static void ia64_sched_init (FILE *, int, int);
204 static void ia64_sched_finish (FILE *, int);
205 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
206 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
207 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
208 static int ia64_variable_issue (FILE *, int, rtx, int);
210 static struct bundle_state *get_free_bundle_state (void);
211 static void free_bundle_state (struct bundle_state *);
212 static void initiate_bundle_states (void);
213 static void finish_bundle_states (void);
214 static unsigned bundle_state_hash (const void *);
215 static int bundle_state_eq_p (const void *, const void *);
216 static int insert_bundle_state (struct bundle_state *);
217 static void initiate_bundle_state_table (void);
218 static void finish_bundle_state_table (void);
219 static int try_issue_nops (struct bundle_state *, int);
220 static int try_issue_insn (struct bundle_state *, rtx);
221 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
222 static int get_max_pos (state_t);
223 static int get_template (state_t, int);
225 static rtx get_next_important_insn (rtx, rtx);
226 static void bundling (FILE *, int, rtx, rtx);
228 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
229 HOST_WIDE_INT, tree);
230 static void ia64_file_start (void);
232 static section *ia64_select_rtx_section (enum machine_mode, rtx,
233 unsigned HOST_WIDE_INT);
234 static void ia64_output_dwarf_dtprel (FILE *, int, rtx)
235 ATTRIBUTE_UNUSED;
236 static section *ia64_rwreloc_select_section (tree, int, unsigned HOST_WIDE_INT)
237 ATTRIBUTE_UNUSED;
238 static void ia64_rwreloc_unique_section (tree, int)
239 ATTRIBUTE_UNUSED;
240 static section *ia64_rwreloc_select_rtx_section (enum machine_mode, rtx,
241 unsigned HOST_WIDE_INT)
242 ATTRIBUTE_UNUSED;
243 static unsigned int ia64_section_type_flags (tree, const char *, int);
244 static void ia64_hpux_add_extern_decl (tree decl)
245 ATTRIBUTE_UNUSED;
246 static void ia64_hpux_file_end (void)
247 ATTRIBUTE_UNUSED;
248 static void ia64_init_libfuncs (void)
249 ATTRIBUTE_UNUSED;
250 static void ia64_hpux_init_libfuncs (void)
251 ATTRIBUTE_UNUSED;
252 static void ia64_sysv4_init_libfuncs (void)
253 ATTRIBUTE_UNUSED;
254 static void ia64_vms_init_libfuncs (void)
255 ATTRIBUTE_UNUSED;
257 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
258 static void ia64_encode_section_info (tree, rtx, int);
259 static rtx ia64_struct_value_rtx (tree, int);
260 static tree ia64_gimplify_va_arg (tree, tree, tree *, tree *);
261 static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
262 static bool ia64_vector_mode_supported_p (enum machine_mode mode);
263 static bool ia64_cannot_force_const_mem (rtx);
264 static const char *ia64_mangle_fundamental_type (tree);
265 static const char *ia64_invalid_conversion (tree, tree);
266 static const char *ia64_invalid_unary_op (int, tree);
267 static const char *ia64_invalid_binary_op (int, tree, tree);
269 /* Table of valid machine attributes. */
270 static const struct attribute_spec ia64_attribute_table[] =
272 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
273 { "syscall_linkage", 0, 0, false, true, true, NULL },
274 { "model", 1, 1, true, false, false, ia64_handle_model_attribute },
275 { NULL, 0, 0, false, false, false, NULL }
278 /* Initialize the GCC target structure. */
279 #undef TARGET_ATTRIBUTE_TABLE
280 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
282 #undef TARGET_INIT_BUILTINS
283 #define TARGET_INIT_BUILTINS ia64_init_builtins
285 #undef TARGET_EXPAND_BUILTIN
286 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
288 #undef TARGET_ASM_BYTE_OP
289 #define TARGET_ASM_BYTE_OP "\tdata1\t"
290 #undef TARGET_ASM_ALIGNED_HI_OP
291 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
292 #undef TARGET_ASM_ALIGNED_SI_OP
293 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
294 #undef TARGET_ASM_ALIGNED_DI_OP
295 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
296 #undef TARGET_ASM_UNALIGNED_HI_OP
297 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
298 #undef TARGET_ASM_UNALIGNED_SI_OP
299 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
300 #undef TARGET_ASM_UNALIGNED_DI_OP
301 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
302 #undef TARGET_ASM_INTEGER
303 #define TARGET_ASM_INTEGER ia64_assemble_integer
305 #undef TARGET_ASM_FUNCTION_PROLOGUE
306 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
307 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
308 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
309 #undef TARGET_ASM_FUNCTION_EPILOGUE
310 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
312 #undef TARGET_IN_SMALL_DATA_P
313 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
315 #undef TARGET_SCHED_ADJUST_COST
316 #define TARGET_SCHED_ADJUST_COST ia64_adjust_cost
317 #undef TARGET_SCHED_ISSUE_RATE
318 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
319 #undef TARGET_SCHED_VARIABLE_ISSUE
320 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
321 #undef TARGET_SCHED_INIT
322 #define TARGET_SCHED_INIT ia64_sched_init
323 #undef TARGET_SCHED_FINISH
324 #define TARGET_SCHED_FINISH ia64_sched_finish
325 #undef TARGET_SCHED_REORDER
326 #define TARGET_SCHED_REORDER ia64_sched_reorder
327 #undef TARGET_SCHED_REORDER2
328 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
330 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
331 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
333 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
334 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
336 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
337 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
338 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
339 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
341 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
342 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
343 ia64_first_cycle_multipass_dfa_lookahead_guard
345 #undef TARGET_SCHED_DFA_NEW_CYCLE
346 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
348 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
349 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
350 #undef TARGET_ARG_PARTIAL_BYTES
351 #define TARGET_ARG_PARTIAL_BYTES ia64_arg_partial_bytes
353 #undef TARGET_ASM_OUTPUT_MI_THUNK
354 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
355 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
356 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
358 #undef TARGET_ASM_FILE_START
359 #define TARGET_ASM_FILE_START ia64_file_start
361 #undef TARGET_RTX_COSTS
362 #define TARGET_RTX_COSTS ia64_rtx_costs
363 #undef TARGET_ADDRESS_COST
364 #define TARGET_ADDRESS_COST hook_int_rtx_0
366 #undef TARGET_MACHINE_DEPENDENT_REORG
367 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
369 #undef TARGET_ENCODE_SECTION_INFO
370 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
372 #undef TARGET_SECTION_TYPE_FLAGS
373 #define TARGET_SECTION_TYPE_FLAGS ia64_section_type_flags
375 #ifdef HAVE_AS_TLS
376 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
377 #define TARGET_ASM_OUTPUT_DWARF_DTPREL ia64_output_dwarf_dtprel
378 #endif
380 /* ??? ABI doesn't allow us to define this. */
381 #if 0
382 #undef TARGET_PROMOTE_FUNCTION_ARGS
383 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
384 #endif
386 /* ??? ABI doesn't allow us to define this. */
387 #if 0
388 #undef TARGET_PROMOTE_FUNCTION_RETURN
389 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
390 #endif
392 /* ??? Investigate. */
393 #if 0
394 #undef TARGET_PROMOTE_PROTOTYPES
395 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
396 #endif
398 #undef TARGET_STRUCT_VALUE_RTX
399 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
400 #undef TARGET_RETURN_IN_MEMORY
401 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
402 #undef TARGET_SETUP_INCOMING_VARARGS
403 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
404 #undef TARGET_STRICT_ARGUMENT_NAMING
405 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
406 #undef TARGET_MUST_PASS_IN_STACK
407 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
409 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
410 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
412 #undef TARGET_UNWIND_EMIT
413 #define TARGET_UNWIND_EMIT process_for_unwind_directive
415 #undef TARGET_SCALAR_MODE_SUPPORTED_P
416 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
417 #undef TARGET_VECTOR_MODE_SUPPORTED_P
418 #define TARGET_VECTOR_MODE_SUPPORTED_P ia64_vector_mode_supported_p
420 /* ia64 architecture manual 4.4.7: ... reads, writes, and flushes may occur
421 in an order different from the specified program order. */
422 #undef TARGET_RELAXED_ORDERING
423 #define TARGET_RELAXED_ORDERING true
425 #undef TARGET_DEFAULT_TARGET_FLAGS
426 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
427 #undef TARGET_HANDLE_OPTION
428 #define TARGET_HANDLE_OPTION ia64_handle_option
430 #undef TARGET_CANNOT_FORCE_CONST_MEM
431 #define TARGET_CANNOT_FORCE_CONST_MEM ia64_cannot_force_const_mem
433 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
434 #define TARGET_MANGLE_FUNDAMENTAL_TYPE ia64_mangle_fundamental_type
436 #undef TARGET_INVALID_CONVERSION
437 #define TARGET_INVALID_CONVERSION ia64_invalid_conversion
438 #undef TARGET_INVALID_UNARY_OP
439 #define TARGET_INVALID_UNARY_OP ia64_invalid_unary_op
440 #undef TARGET_INVALID_BINARY_OP
441 #define TARGET_INVALID_BINARY_OP ia64_invalid_binary_op
443 struct gcc_target targetm = TARGET_INITIALIZER;
445 typedef enum
447 ADDR_AREA_NORMAL, /* normal address area */
448 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
450 ia64_addr_area;
452 static GTY(()) tree small_ident1;
453 static GTY(()) tree small_ident2;
455 static void
456 init_idents (void)
458 if (small_ident1 == 0)
460 small_ident1 = get_identifier ("small");
461 small_ident2 = get_identifier ("__small__");
465 /* Retrieve the address area that has been chosen for the given decl. */
467 static ia64_addr_area
468 ia64_get_addr_area (tree decl)
470 tree model_attr;
472 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
473 if (model_attr)
475 tree id;
477 init_idents ();
478 id = TREE_VALUE (TREE_VALUE (model_attr));
479 if (id == small_ident1 || id == small_ident2)
480 return ADDR_AREA_SMALL;
482 return ADDR_AREA_NORMAL;
485 static tree
486 ia64_handle_model_attribute (tree *node, tree name, tree args,
487 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
489 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
490 ia64_addr_area area;
491 tree arg, decl = *node;
493 init_idents ();
494 arg = TREE_VALUE (args);
495 if (arg == small_ident1 || arg == small_ident2)
497 addr_area = ADDR_AREA_SMALL;
499 else
501 warning (OPT_Wattributes, "invalid argument of %qs attribute",
502 IDENTIFIER_POINTER (name));
503 *no_add_attrs = true;
506 switch (TREE_CODE (decl))
508 case VAR_DECL:
509 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
510 == FUNCTION_DECL)
511 && !TREE_STATIC (decl))
513 error ("%Jan address area attribute cannot be specified for "
514 "local variables", decl);
515 *no_add_attrs = true;
517 area = ia64_get_addr_area (decl);
518 if (area != ADDR_AREA_NORMAL && addr_area != area)
520 error ("address area of %q+D conflicts with previous "
521 "declaration", decl);
522 *no_add_attrs = true;
524 break;
526 case FUNCTION_DECL:
527 error ("%Jaddress area attribute cannot be specified for functions",
528 decl);
529 *no_add_attrs = true;
530 break;
532 default:
533 warning (OPT_Wattributes, "%qs attribute ignored",
534 IDENTIFIER_POINTER (name));
535 *no_add_attrs = true;
536 break;
539 return NULL_TREE;
542 static void
543 ia64_encode_addr_area (tree decl, rtx symbol)
545 int flags;
547 flags = SYMBOL_REF_FLAGS (symbol);
548 switch (ia64_get_addr_area (decl))
550 case ADDR_AREA_NORMAL: break;
551 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
552 default: gcc_unreachable ();
554 SYMBOL_REF_FLAGS (symbol) = flags;
557 static void
558 ia64_encode_section_info (tree decl, rtx rtl, int first)
560 default_encode_section_info (decl, rtl, first);
562 /* Careful not to prod global register variables. */
563 if (TREE_CODE (decl) == VAR_DECL
564 && GET_CODE (DECL_RTL (decl)) == MEM
565 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
566 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
567 ia64_encode_addr_area (decl, XEXP (rtl, 0));
570 /* Implement CONST_OK_FOR_LETTER_P. */
572 bool
573 ia64_const_ok_for_letter_p (HOST_WIDE_INT value, char c)
575 switch (c)
577 case 'I':
578 return CONST_OK_FOR_I (value);
579 case 'J':
580 return CONST_OK_FOR_J (value);
581 case 'K':
582 return CONST_OK_FOR_K (value);
583 case 'L':
584 return CONST_OK_FOR_L (value);
585 case 'M':
586 return CONST_OK_FOR_M (value);
587 case 'N':
588 return CONST_OK_FOR_N (value);
589 case 'O':
590 return CONST_OK_FOR_O (value);
591 case 'P':
592 return CONST_OK_FOR_P (value);
593 default:
594 return false;
598 /* Implement CONST_DOUBLE_OK_FOR_LETTER_P. */
600 bool
601 ia64_const_double_ok_for_letter_p (rtx value, char c)
603 switch (c)
605 case 'G':
606 return CONST_DOUBLE_OK_FOR_G (value);
607 default:
608 return false;
612 /* Implement EXTRA_CONSTRAINT. */
614 bool
615 ia64_extra_constraint (rtx value, char c)
617 switch (c)
619 case 'Q':
620 /* Non-volatile memory for FP_REG loads/stores. */
621 return memory_operand(value, VOIDmode) && !MEM_VOLATILE_P (value);
623 case 'R':
624 /* 1..4 for shladd arguments. */
625 return (GET_CODE (value) == CONST_INT
626 && INTVAL (value) >= 1 && INTVAL (value) <= 4);
628 case 'S':
629 /* Non-post-inc memory for asms and other unsavory creatures. */
630 return (GET_CODE (value) == MEM
631 && GET_RTX_CLASS (GET_CODE (XEXP (value, 0))) != RTX_AUTOINC
632 && (reload_in_progress || memory_operand (value, VOIDmode)));
634 case 'T':
635 /* Symbol ref to small-address-area. */
636 return small_addr_symbolic_operand (value, VOIDmode);
638 case 'U':
639 /* Vector zero. */
640 return value == CONST0_RTX (GET_MODE (value));
642 case 'W':
643 /* An integer vector, such that conversion to an integer yields a
644 value appropriate for an integer 'J' constraint. */
645 if (GET_CODE (value) == CONST_VECTOR
646 && GET_MODE_CLASS (GET_MODE (value)) == MODE_VECTOR_INT)
648 value = simplify_subreg (DImode, value, GET_MODE (value), 0);
649 return ia64_const_ok_for_letter_p (INTVAL (value), 'J');
651 return false;
653 case 'Y':
654 /* A V2SF vector containing elements that satisfy 'G'. */
655 return
656 (GET_CODE (value) == CONST_VECTOR
657 && GET_MODE (value) == V2SFmode
658 && ia64_const_double_ok_for_letter_p (XVECEXP (value, 0, 0), 'G')
659 && ia64_const_double_ok_for_letter_p (XVECEXP (value, 0, 1), 'G'));
661 default:
662 return false;
666 /* Return 1 if the operands of a move are ok. */
669 ia64_move_ok (rtx dst, rtx src)
671 /* If we're under init_recog_no_volatile, we'll not be able to use
672 memory_operand. So check the code directly and don't worry about
673 the validity of the underlying address, which should have been
674 checked elsewhere anyway. */
675 if (GET_CODE (dst) != MEM)
676 return 1;
677 if (GET_CODE (src) == MEM)
678 return 0;
679 if (register_operand (src, VOIDmode))
680 return 1;
682 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
683 if (INTEGRAL_MODE_P (GET_MODE (dst)))
684 return src == const0_rtx;
685 else
686 return GET_CODE (src) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_G (src);
689 /* Return 1 if the operands are ok for a floating point load pair. */
692 ia64_load_pair_ok (rtx dst, rtx src)
694 if (GET_CODE (dst) != REG || !FP_REGNO_P (REGNO (dst)))
695 return 0;
696 if (GET_CODE (src) != MEM || MEM_VOLATILE_P (src))
697 return 0;
698 switch (GET_CODE (XEXP (src, 0)))
700 case REG:
701 case POST_INC:
702 break;
703 case POST_DEC:
704 return 0;
705 case POST_MODIFY:
707 rtx adjust = XEXP (XEXP (XEXP (src, 0), 1), 1);
709 if (GET_CODE (adjust) != CONST_INT
710 || INTVAL (adjust) != GET_MODE_SIZE (GET_MODE (src)))
711 return 0;
713 break;
714 default:
715 abort ();
717 return 1;
721 addp4_optimize_ok (rtx op1, rtx op2)
723 return (basereg_operand (op1, GET_MODE(op1)) !=
724 basereg_operand (op2, GET_MODE(op2)));
727 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
728 Return the length of the field, or <= 0 on failure. */
731 ia64_depz_field_mask (rtx rop, rtx rshift)
733 unsigned HOST_WIDE_INT op = INTVAL (rop);
734 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
736 /* Get rid of the zero bits we're shifting in. */
737 op >>= shift;
739 /* We must now have a solid block of 1's at bit 0. */
740 return exact_log2 (op + 1);
743 /* Return the TLS model to use for ADDR. */
745 static enum tls_model
746 tls_symbolic_operand_type (rtx addr)
748 enum tls_model tls_kind = 0;
750 if (GET_CODE (addr) == CONST)
752 if (GET_CODE (XEXP (addr, 0)) == PLUS
753 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF)
754 tls_kind = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (addr, 0), 0));
756 else if (GET_CODE (addr) == SYMBOL_REF)
757 tls_kind = SYMBOL_REF_TLS_MODEL (addr);
759 return tls_kind;
762 /* Return true if X is a constant that is valid for some immediate
763 field in an instruction. */
765 bool
766 ia64_legitimate_constant_p (rtx x)
768 switch (GET_CODE (x))
770 case CONST_INT:
771 case LABEL_REF:
772 return true;
774 case CONST_DOUBLE:
775 if (GET_MODE (x) == VOIDmode)
776 return true;
777 return CONST_DOUBLE_OK_FOR_G (x);
779 case CONST:
780 case SYMBOL_REF:
781 return tls_symbolic_operand_type (x) == 0;
783 case CONST_VECTOR:
785 enum machine_mode mode = GET_MODE (x);
787 if (mode == V2SFmode)
788 return ia64_extra_constraint (x, 'Y');
790 return (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
791 && GET_MODE_SIZE (mode) <= 8);
794 default:
795 return false;
799 /* Don't allow TLS addresses to get spilled to memory. */
801 static bool
802 ia64_cannot_force_const_mem (rtx x)
804 return tls_symbolic_operand_type (x) != 0;
807 /* Expand a symbolic constant load. */
809 bool
810 ia64_expand_load_address (rtx dest, rtx src)
812 gcc_assert (GET_CODE (dest) == REG);
814 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
815 having to pointer-extend the value afterward. Other forms of address
816 computation below are also more natural to compute as 64-bit quantities.
817 If we've been given an SImode destination register, change it. */
818 if (GET_MODE (dest) != Pmode)
819 dest = gen_rtx_REG_offset (dest, Pmode, REGNO (dest), 0);
821 if (TARGET_NO_PIC)
822 return false;
823 if (small_addr_symbolic_operand (src, VOIDmode))
824 return false;
826 if (TARGET_AUTO_PIC)
827 emit_insn (gen_load_gprel64 (dest, src));
828 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
829 emit_insn (gen_load_fptr (dest, src));
830 else if (sdata_symbolic_operand (src, VOIDmode))
831 emit_insn (gen_load_gprel (dest, src));
832 else
834 HOST_WIDE_INT addend = 0;
835 rtx tmp;
837 /* We did split constant offsets in ia64_expand_move, and we did try
838 to keep them split in move_operand, but we also allowed reload to
839 rematerialize arbitrary constants rather than spill the value to
840 the stack and reload it. So we have to be prepared here to split
841 them apart again. */
842 if (GET_CODE (src) == CONST)
844 HOST_WIDE_INT hi, lo;
846 hi = INTVAL (XEXP (XEXP (src, 0), 1));
847 lo = ((hi & 0x3fff) ^ 0x2000) - 0x2000;
848 hi = hi - lo;
850 if (lo != 0)
852 addend = lo;
853 src = plus_constant (XEXP (XEXP (src, 0), 0), hi);
857 tmp = gen_rtx_HIGH (Pmode, src);
858 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
859 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
861 tmp = gen_rtx_LO_SUM (Pmode, dest, src);
862 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
864 if (addend)
866 tmp = gen_rtx_PLUS (Pmode, dest, GEN_INT (addend));
867 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
871 return true;
874 static GTY(()) rtx gen_tls_tga;
875 static rtx
876 gen_tls_get_addr (void)
878 if (!gen_tls_tga)
879 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
880 return gen_tls_tga;
883 static GTY(()) rtx thread_pointer_rtx;
884 static rtx
885 gen_thread_pointer (void)
887 if (!thread_pointer_rtx)
888 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
889 return thread_pointer_rtx;
892 static rtx
893 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1,
894 rtx orig_op1, HOST_WIDE_INT addend)
896 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
897 rtx orig_op0 = op0;
898 HOST_WIDE_INT addend_lo, addend_hi;
900 switch (tls_kind)
902 case TLS_MODEL_GLOBAL_DYNAMIC:
903 start_sequence ();
905 tga_op1 = gen_reg_rtx (Pmode);
906 emit_insn (gen_load_dtpmod (tga_op1, op1));
908 tga_op2 = gen_reg_rtx (Pmode);
909 emit_insn (gen_load_dtprel (tga_op2, op1));
911 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
912 LCT_CONST, Pmode, 2, tga_op1,
913 Pmode, tga_op2, Pmode);
915 insns = get_insns ();
916 end_sequence ();
918 if (GET_MODE (op0) != Pmode)
919 op0 = tga_ret;
920 emit_libcall_block (insns, op0, tga_ret, op1);
921 break;
923 case TLS_MODEL_LOCAL_DYNAMIC:
924 /* ??? This isn't the completely proper way to do local-dynamic
925 If the call to __tls_get_addr is used only by a single symbol,
926 then we should (somehow) move the dtprel to the second arg
927 to avoid the extra add. */
928 start_sequence ();
930 tga_op1 = gen_reg_rtx (Pmode);
931 emit_insn (gen_load_dtpmod (tga_op1, op1));
933 tga_op2 = const0_rtx;
935 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
936 LCT_CONST, Pmode, 2, tga_op1,
937 Pmode, tga_op2, Pmode);
939 insns = get_insns ();
940 end_sequence ();
942 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
943 UNSPEC_LD_BASE);
944 tmp = gen_reg_rtx (Pmode);
945 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
947 if (!register_operand (op0, Pmode))
948 op0 = gen_reg_rtx (Pmode);
949 if (TARGET_TLS64)
951 emit_insn (gen_load_dtprel (op0, op1));
952 emit_insn (gen_adddi3 (op0, tmp, op0));
954 else
955 emit_insn (gen_add_dtprel (op0, op1, tmp));
956 break;
958 case TLS_MODEL_INITIAL_EXEC:
959 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
960 addend_hi = addend - addend_lo;
962 op1 = plus_constant (op1, addend_hi);
963 addend = addend_lo;
965 tmp = gen_reg_rtx (Pmode);
966 emit_insn (gen_load_tprel (tmp, op1));
968 if (!register_operand (op0, Pmode))
969 op0 = gen_reg_rtx (Pmode);
970 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
971 break;
973 case TLS_MODEL_LOCAL_EXEC:
974 if (!register_operand (op0, Pmode))
975 op0 = gen_reg_rtx (Pmode);
977 op1 = orig_op1;
978 addend = 0;
979 if (TARGET_TLS64)
981 emit_insn (gen_load_tprel (op0, op1));
982 emit_insn (gen_adddi3 (op0, op0, gen_thread_pointer ()));
984 else
985 emit_insn (gen_add_tprel (op0, op1, gen_thread_pointer ()));
986 break;
988 default:
989 gcc_unreachable ();
992 if (addend)
993 op0 = expand_simple_binop (Pmode, PLUS, op0, GEN_INT (addend),
994 orig_op0, 1, OPTAB_DIRECT);
995 if (orig_op0 == op0)
996 return NULL_RTX;
997 if (GET_MODE (orig_op0) == Pmode)
998 return op0;
999 return gen_lowpart (GET_MODE (orig_op0), op0);
1003 ia64_expand_move (rtx op0, rtx op1)
1005 enum machine_mode mode = GET_MODE (op0);
1007 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
1008 op1 = force_reg (mode, op1);
1010 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
1012 HOST_WIDE_INT addend = 0;
1013 enum tls_model tls_kind;
1014 rtx sym = op1;
1016 if (GET_CODE (op1) == CONST
1017 && GET_CODE (XEXP (op1, 0)) == PLUS
1018 && GET_CODE (XEXP (XEXP (op1, 0), 1)) == CONST_INT)
1020 addend = INTVAL (XEXP (XEXP (op1, 0), 1));
1021 sym = XEXP (XEXP (op1, 0), 0);
1024 tls_kind = tls_symbolic_operand_type (sym);
1025 if (tls_kind)
1026 return ia64_expand_tls_address (tls_kind, op0, sym, op1, addend);
1028 if (any_offset_symbol_operand (sym, mode))
1029 addend = 0;
1030 else if (aligned_offset_symbol_operand (sym, mode))
1032 HOST_WIDE_INT addend_lo, addend_hi;
1034 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1035 addend_hi = addend - addend_lo;
1037 if (addend_lo != 0)
1039 op1 = plus_constant (sym, addend_hi);
1040 addend = addend_lo;
1042 else
1043 addend = 0;
1045 else
1046 op1 = sym;
1048 if (reload_completed)
1050 /* We really should have taken care of this offset earlier. */
1051 gcc_assert (addend == 0);
1052 if (ia64_expand_load_address (op0, op1))
1053 return NULL_RTX;
1056 if (addend)
1058 rtx subtarget = no_new_pseudos ? op0 : gen_reg_rtx (mode);
1060 emit_insn (gen_rtx_SET (VOIDmode, subtarget, op1));
1062 op1 = expand_simple_binop (mode, PLUS, subtarget,
1063 GEN_INT (addend), op0, 1, OPTAB_DIRECT);
1064 if (op0 == op1)
1065 return NULL_RTX;
1069 return op1;
1072 /* Split a move from OP1 to OP0 conditional on COND. */
1074 void
1075 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
1077 rtx insn, first = get_last_insn ();
1079 emit_move_insn (op0, op1);
1081 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
1082 if (INSN_P (insn))
1083 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
1084 PATTERN (insn));
1087 /* Split a post-reload TImode or TFmode reference into two DImode
1088 components. This is made extra difficult by the fact that we do
1089 not get any scratch registers to work with, because reload cannot
1090 be prevented from giving us a scratch that overlaps the register
1091 pair involved. So instead, when addressing memory, we tweak the
1092 pointer register up and back down with POST_INCs. Or up and not
1093 back down when we can get away with it.
1095 REVERSED is true when the loads must be done in reversed order
1096 (high word first) for correctness. DEAD is true when the pointer
1097 dies with the second insn we generate and therefore the second
1098 address must not carry a postmodify.
1100 May return an insn which is to be emitted after the moves. */
1102 static rtx
1103 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
1105 rtx fixup = 0;
1107 switch (GET_CODE (in))
1109 case REG:
1110 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
1111 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
1112 break;
1114 case CONST_INT:
1115 case CONST_DOUBLE:
1116 /* Cannot occur reversed. */
1117 gcc_assert (!reversed);
1119 if (GET_MODE (in) != TFmode)
1120 split_double (in, &out[0], &out[1]);
1121 else
1122 /* split_double does not understand how to split a TFmode
1123 quantity into a pair of DImode constants. */
1125 REAL_VALUE_TYPE r;
1126 unsigned HOST_WIDE_INT p[2];
1127 long l[4]; /* TFmode is 128 bits */
1129 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
1130 real_to_target (l, &r, TFmode);
1132 if (FLOAT_WORDS_BIG_ENDIAN)
1134 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
1135 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
1137 else
1139 p[0] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
1140 p[1] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
1142 out[0] = GEN_INT (p[0]);
1143 out[1] = GEN_INT (p[1]);
1145 break;
1147 case MEM:
1149 rtx base = XEXP (in, 0);
1150 rtx offset;
1152 switch (GET_CODE (base))
1154 case REG:
1155 if (!reversed)
1157 out[0] = adjust_automodify_address
1158 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1159 out[1] = adjust_automodify_address
1160 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
1162 else
1164 /* Reversal requires a pre-increment, which can only
1165 be done as a separate insn. */
1166 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
1167 out[0] = adjust_automodify_address
1168 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
1169 out[1] = adjust_address (in, DImode, 0);
1171 break;
1173 case POST_INC:
1174 gcc_assert (!reversed && !dead);
1176 /* Just do the increment in two steps. */
1177 out[0] = adjust_automodify_address (in, DImode, 0, 0);
1178 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1179 break;
1181 case POST_DEC:
1182 gcc_assert (!reversed && !dead);
1184 /* Add 8, subtract 24. */
1185 base = XEXP (base, 0);
1186 out[0] = adjust_automodify_address
1187 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1188 out[1] = adjust_automodify_address
1189 (in, DImode,
1190 gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -24)),
1192 break;
1194 case POST_MODIFY:
1195 gcc_assert (!reversed && !dead);
1197 /* Extract and adjust the modification. This case is
1198 trickier than the others, because we might have an
1199 index register, or we might have a combined offset that
1200 doesn't fit a signed 9-bit displacement field. We can
1201 assume the incoming expression is already legitimate. */
1202 offset = XEXP (base, 1);
1203 base = XEXP (base, 0);
1205 out[0] = adjust_automodify_address
1206 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1208 if (GET_CODE (XEXP (offset, 1)) == REG)
1210 /* Can't adjust the postmodify to match. Emit the
1211 original, then a separate addition insn. */
1212 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1213 fixup = gen_adddi3 (base, base, GEN_INT (-8));
1215 else
1217 gcc_assert (GET_CODE (XEXP (offset, 1)) == CONST_INT);
1218 if (INTVAL (XEXP (offset, 1)) < -256 + 8)
1220 /* Again the postmodify cannot be made to match,
1221 but in this case it's more efficient to get rid
1222 of the postmodify entirely and fix up with an
1223 add insn. */
1224 out[1] = adjust_automodify_address (in, DImode, base, 8);
1225 fixup = gen_adddi3
1226 (base, base, GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
1228 else
1230 /* Combined offset still fits in the displacement field.
1231 (We cannot overflow it at the high end.) */
1232 out[1] = adjust_automodify_address
1233 (in, DImode, gen_rtx_POST_MODIFY
1234 (Pmode, base, gen_rtx_PLUS
1235 (Pmode, base,
1236 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
1240 break;
1242 default:
1243 gcc_unreachable ();
1245 break;
1248 default:
1249 gcc_unreachable ();
1252 return fixup;
1255 /* Split a TImode or TFmode move instruction after reload.
1256 This is used by *movtf_internal and *movti_internal. */
1257 void
1258 ia64_split_tmode_move (rtx operands[])
1260 rtx in[2], out[2], insn;
1261 rtx fixup[2];
1262 bool dead = false;
1263 bool reversed = false;
1265 /* It is possible for reload to decide to overwrite a pointer with
1266 the value it points to. In that case we have to do the loads in
1267 the appropriate order so that the pointer is not destroyed too
1268 early. Also we must not generate a postmodify for that second
1269 load, or rws_access_regno will die. */
1270 if (GET_CODE (operands[1]) == MEM
1271 && reg_overlap_mentioned_p (operands[0], operands[1]))
1273 rtx base = XEXP (operands[1], 0);
1274 while (GET_CODE (base) != REG)
1275 base = XEXP (base, 0);
1277 if (REGNO (base) == REGNO (operands[0]))
1278 reversed = true;
1279 dead = true;
1281 /* Another reason to do the moves in reversed order is if the first
1282 element of the target register pair is also the second element of
1283 the source register pair. */
1284 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1285 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1286 reversed = true;
1288 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1289 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1291 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1292 if (GET_CODE (EXP) == MEM \
1293 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1294 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1295 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1296 REG_NOTES (INSN) = gen_rtx_EXPR_LIST (REG_INC, \
1297 XEXP (XEXP (EXP, 0), 0), \
1298 REG_NOTES (INSN))
1300 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1301 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1302 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1304 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1305 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1306 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1308 if (fixup[0])
1309 emit_insn (fixup[0]);
1310 if (fixup[1])
1311 emit_insn (fixup[1]);
1313 #undef MAYBE_ADD_REG_INC_NOTE
1316 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1317 through memory plus an extra GR scratch register. Except that you can
1318 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1319 SECONDARY_RELOAD_CLASS, but not both.
1321 We got into problems in the first place by allowing a construct like
1322 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1323 This solution attempts to prevent this situation from occurring. When
1324 we see something like the above, we spill the inner register to memory. */
1326 static rtx
1327 spill_xfmode_rfmode_operand (rtx in, int force, enum machine_mode mode)
1329 if (GET_CODE (in) == SUBREG
1330 && GET_MODE (SUBREG_REG (in)) == TImode
1331 && GET_CODE (SUBREG_REG (in)) == REG)
1333 rtx memt = assign_stack_temp (TImode, 16, 0);
1334 emit_move_insn (memt, SUBREG_REG (in));
1335 return adjust_address (memt, mode, 0);
1337 else if (force && GET_CODE (in) == REG)
1339 rtx memx = assign_stack_temp (mode, 16, 0);
1340 emit_move_insn (memx, in);
1341 return memx;
1343 else
1344 return in;
1347 /* Expand the movxf or movrf pattern (MODE says which) with the given
1348 OPERANDS, returning true if the pattern should then invoke
1349 DONE. */
1351 bool
1352 ia64_expand_movxf_movrf (enum machine_mode mode, rtx operands[])
1354 rtx op0 = operands[0];
1356 if (GET_CODE (op0) == SUBREG)
1357 op0 = SUBREG_REG (op0);
1359 /* We must support XFmode loads into general registers for stdarg/vararg,
1360 unprototyped calls, and a rare case where a long double is passed as
1361 an argument after a float HFA fills the FP registers. We split them into
1362 DImode loads for convenience. We also need to support XFmode stores
1363 for the last case. This case does not happen for stdarg/vararg routines,
1364 because we do a block store to memory of unnamed arguments. */
1366 if (GET_CODE (op0) == REG && GR_REGNO_P (REGNO (op0)))
1368 rtx out[2];
1370 /* We're hoping to transform everything that deals with XFmode
1371 quantities and GR registers early in the compiler. */
1372 gcc_assert (!no_new_pseudos);
1374 /* Struct to register can just use TImode instead. */
1375 if ((GET_CODE (operands[1]) == SUBREG
1376 && GET_MODE (SUBREG_REG (operands[1])) == TImode)
1377 || (GET_CODE (operands[1]) == REG
1378 && GR_REGNO_P (REGNO (operands[1]))))
1380 rtx op1 = operands[1];
1382 if (GET_CODE (op1) == SUBREG)
1383 op1 = SUBREG_REG (op1);
1384 else
1385 op1 = gen_rtx_REG (TImode, REGNO (op1));
1387 emit_move_insn (gen_rtx_REG (TImode, REGNO (op0)), op1);
1388 return true;
1391 if (GET_CODE (operands[1]) == CONST_DOUBLE)
1393 /* Don't word-swap when reading in the constant. */
1394 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0)),
1395 operand_subword (operands[1], WORDS_BIG_ENDIAN,
1396 0, mode));
1397 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0) + 1),
1398 operand_subword (operands[1], !WORDS_BIG_ENDIAN,
1399 0, mode));
1400 return true;
1403 /* If the quantity is in a register not known to be GR, spill it. */
1404 if (register_operand (operands[1], mode))
1405 operands[1] = spill_xfmode_rfmode_operand (operands[1], 1, mode);
1407 gcc_assert (GET_CODE (operands[1]) == MEM);
1409 /* Don't word-swap when reading in the value. */
1410 out[0] = gen_rtx_REG (DImode, REGNO (op0));
1411 out[1] = gen_rtx_REG (DImode, REGNO (op0) + 1);
1413 emit_move_insn (out[0], adjust_address (operands[1], DImode, 0));
1414 emit_move_insn (out[1], adjust_address (operands[1], DImode, 8));
1415 return true;
1418 if (GET_CODE (operands[1]) == REG && GR_REGNO_P (REGNO (operands[1])))
1420 /* We're hoping to transform everything that deals with XFmode
1421 quantities and GR registers early in the compiler. */
1422 gcc_assert (!no_new_pseudos);
1424 /* Op0 can't be a GR_REG here, as that case is handled above.
1425 If op0 is a register, then we spill op1, so that we now have a
1426 MEM operand. This requires creating an XFmode subreg of a TImode reg
1427 to force the spill. */
1428 if (register_operand (operands[0], mode))
1430 rtx op1 = gen_rtx_REG (TImode, REGNO (operands[1]));
1431 op1 = gen_rtx_SUBREG (mode, op1, 0);
1432 operands[1] = spill_xfmode_rfmode_operand (op1, 0, mode);
1435 else
1437 rtx in[2];
1439 gcc_assert (GET_CODE (operands[0]) == MEM);
1441 /* Don't word-swap when writing out the value. */
1442 in[0] = gen_rtx_REG (DImode, REGNO (operands[1]));
1443 in[1] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
1445 emit_move_insn (adjust_address (operands[0], DImode, 0), in[0]);
1446 emit_move_insn (adjust_address (operands[0], DImode, 8), in[1]);
1447 return true;
1451 if (!reload_in_progress && !reload_completed)
1453 operands[1] = spill_xfmode_rfmode_operand (operands[1], 0, mode);
1455 if (GET_MODE (op0) == TImode && GET_CODE (op0) == REG)
1457 rtx memt, memx, in = operands[1];
1458 if (CONSTANT_P (in))
1459 in = validize_mem (force_const_mem (mode, in));
1460 if (GET_CODE (in) == MEM)
1461 memt = adjust_address (in, TImode, 0);
1462 else
1464 memt = assign_stack_temp (TImode, 16, 0);
1465 memx = adjust_address (memt, mode, 0);
1466 emit_move_insn (memx, in);
1468 emit_move_insn (op0, memt);
1469 return true;
1472 if (!ia64_move_ok (operands[0], operands[1]))
1473 operands[1] = force_reg (mode, operands[1]);
1476 return false;
1479 /* Emit comparison instruction if necessary, returning the expression
1480 that holds the compare result in the proper mode. */
1482 static GTY(()) rtx cmptf_libfunc;
1485 ia64_expand_compare (enum rtx_code code, enum machine_mode mode)
1487 rtx op0 = ia64_compare_op0, op1 = ia64_compare_op1;
1488 rtx cmp;
1490 /* If we have a BImode input, then we already have a compare result, and
1491 do not need to emit another comparison. */
1492 if (GET_MODE (op0) == BImode)
1494 gcc_assert ((code == NE || code == EQ) && op1 == const0_rtx);
1495 cmp = op0;
1497 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1498 magic number as its third argument, that indicates what to do.
1499 The return value is an integer to be compared against zero. */
1500 else if (GET_MODE (op0) == TFmode)
1502 enum qfcmp_magic {
1503 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1504 QCMP_UNORD = 2,
1505 QCMP_EQ = 4,
1506 QCMP_LT = 8,
1507 QCMP_GT = 16
1508 } magic;
1509 enum rtx_code ncode;
1510 rtx ret, insns;
1512 gcc_assert (cmptf_libfunc && GET_MODE (op1) == TFmode);
1513 switch (code)
1515 /* 1 = equal, 0 = not equal. Equality operators do
1516 not raise FP_INVALID when given an SNaN operand. */
1517 case EQ: magic = QCMP_EQ; ncode = NE; break;
1518 case NE: magic = QCMP_EQ; ncode = EQ; break;
1519 /* isunordered() from C99. */
1520 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1521 case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
1522 /* Relational operators raise FP_INVALID when given
1523 an SNaN operand. */
1524 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1525 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1526 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1527 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1528 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1529 Expanders for buneq etc. weuld have to be added to ia64.md
1530 for this to be useful. */
1531 default: gcc_unreachable ();
1534 start_sequence ();
1536 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1537 op0, TFmode, op1, TFmode,
1538 GEN_INT (magic), DImode);
1539 cmp = gen_reg_rtx (BImode);
1540 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1541 gen_rtx_fmt_ee (ncode, BImode,
1542 ret, const0_rtx)));
1544 insns = get_insns ();
1545 end_sequence ();
1547 emit_libcall_block (insns, cmp, cmp,
1548 gen_rtx_fmt_ee (code, BImode, op0, op1));
1549 code = NE;
1551 else
1553 cmp = gen_reg_rtx (BImode);
1554 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1555 gen_rtx_fmt_ee (code, BImode, op0, op1)));
1556 code = NE;
1559 return gen_rtx_fmt_ee (code, mode, cmp, const0_rtx);
1562 /* Generate an integral vector comparison. Return true if the condition has
1563 been reversed, and so the sense of the comparison should be inverted. */
1565 static bool
1566 ia64_expand_vecint_compare (enum rtx_code code, enum machine_mode mode,
1567 rtx dest, rtx op0, rtx op1)
1569 bool negate = false;
1570 rtx x;
1572 /* Canonicalize the comparison to EQ, GT, GTU. */
1573 switch (code)
1575 case EQ:
1576 case GT:
1577 case GTU:
1578 break;
1580 case NE:
1581 case LE:
1582 case LEU:
1583 code = reverse_condition (code);
1584 negate = true;
1585 break;
1587 case GE:
1588 case GEU:
1589 code = reverse_condition (code);
1590 negate = true;
1591 /* FALLTHRU */
1593 case LT:
1594 case LTU:
1595 code = swap_condition (code);
1596 x = op0, op0 = op1, op1 = x;
1597 break;
1599 default:
1600 gcc_unreachable ();
1603 /* Unsigned parallel compare is not supported by the hardware. Play some
1604 tricks to turn this into a signed comparison against 0. */
1605 if (code == GTU)
1607 switch (mode)
1609 case V2SImode:
1611 rtx t1, t2, mask;
1613 /* Perform a parallel modulo subtraction. */
1614 t1 = gen_reg_rtx (V2SImode);
1615 emit_insn (gen_subv2si3 (t1, op0, op1));
1617 /* Extract the original sign bit of op0. */
1618 mask = GEN_INT (-0x80000000);
1619 mask = gen_rtx_CONST_VECTOR (V2SImode, gen_rtvec (2, mask, mask));
1620 mask = force_reg (V2SImode, mask);
1621 t2 = gen_reg_rtx (V2SImode);
1622 emit_insn (gen_andv2si3 (t2, op0, mask));
1624 /* XOR it back into the result of the subtraction. This results
1625 in the sign bit set iff we saw unsigned underflow. */
1626 x = gen_reg_rtx (V2SImode);
1627 emit_insn (gen_xorv2si3 (x, t1, t2));
1629 code = GT;
1630 op0 = x;
1631 op1 = CONST0_RTX (mode);
1633 break;
1635 case V8QImode:
1636 case V4HImode:
1637 /* Perform a parallel unsigned saturating subtraction. */
1638 x = gen_reg_rtx (mode);
1639 emit_insn (gen_rtx_SET (VOIDmode, x,
1640 gen_rtx_US_MINUS (mode, op0, op1)));
1642 code = EQ;
1643 op0 = x;
1644 op1 = CONST0_RTX (mode);
1645 negate = !negate;
1646 break;
1648 default:
1649 gcc_unreachable ();
1653 x = gen_rtx_fmt_ee (code, mode, op0, op1);
1654 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
1656 return negate;
1659 /* Emit an integral vector conditional move. */
1661 void
1662 ia64_expand_vecint_cmov (rtx operands[])
1664 enum machine_mode mode = GET_MODE (operands[0]);
1665 enum rtx_code code = GET_CODE (operands[3]);
1666 bool negate;
1667 rtx cmp, x, ot, of;
1669 cmp = gen_reg_rtx (mode);
1670 negate = ia64_expand_vecint_compare (code, mode, cmp,
1671 operands[4], operands[5]);
1673 ot = operands[1+negate];
1674 of = operands[2-negate];
1676 if (ot == CONST0_RTX (mode))
1678 if (of == CONST0_RTX (mode))
1680 emit_move_insn (operands[0], ot);
1681 return;
1684 x = gen_rtx_NOT (mode, cmp);
1685 x = gen_rtx_AND (mode, x, of);
1686 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1688 else if (of == CONST0_RTX (mode))
1690 x = gen_rtx_AND (mode, cmp, ot);
1691 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1693 else
1695 rtx t, f;
1697 t = gen_reg_rtx (mode);
1698 x = gen_rtx_AND (mode, cmp, operands[1+negate]);
1699 emit_insn (gen_rtx_SET (VOIDmode, t, x));
1701 f = gen_reg_rtx (mode);
1702 x = gen_rtx_NOT (mode, cmp);
1703 x = gen_rtx_AND (mode, x, operands[2-negate]);
1704 emit_insn (gen_rtx_SET (VOIDmode, f, x));
1706 x = gen_rtx_IOR (mode, t, f);
1707 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1711 /* Emit an integral vector min or max operation. Return true if all done. */
1713 bool
1714 ia64_expand_vecint_minmax (enum rtx_code code, enum machine_mode mode,
1715 rtx operands[])
1717 rtx xops[6];
1719 /* These four combinations are supported directly. */
1720 if (mode == V8QImode && (code == UMIN || code == UMAX))
1721 return false;
1722 if (mode == V4HImode && (code == SMIN || code == SMAX))
1723 return false;
1725 /* This combination can be implemented with only saturating subtraction. */
1726 if (mode == V4HImode && code == UMAX)
1728 rtx x, tmp = gen_reg_rtx (mode);
1730 x = gen_rtx_US_MINUS (mode, operands[1], operands[2]);
1731 emit_insn (gen_rtx_SET (VOIDmode, tmp, x));
1733 emit_insn (gen_addv4hi3 (operands[0], tmp, operands[2]));
1734 return true;
1737 /* Everything else implemented via vector comparisons. */
1738 xops[0] = operands[0];
1739 xops[4] = xops[1] = operands[1];
1740 xops[5] = xops[2] = operands[2];
1742 switch (code)
1744 case UMIN:
1745 code = LTU;
1746 break;
1747 case UMAX:
1748 code = GTU;
1749 break;
1750 case SMIN:
1751 code = LT;
1752 break;
1753 case SMAX:
1754 code = GT;
1755 break;
1756 default:
1757 gcc_unreachable ();
1759 xops[3] = gen_rtx_fmt_ee (code, VOIDmode, operands[1], operands[2]);
1761 ia64_expand_vecint_cmov (xops);
1762 return true;
1765 /* Emit an integral vector widening sum operations. */
1767 void
1768 ia64_expand_widen_sum (rtx operands[3], bool unsignedp)
1770 rtx l, h, x, s;
1771 enum machine_mode wmode, mode;
1772 rtx (*unpack_l) (rtx, rtx, rtx);
1773 rtx (*unpack_h) (rtx, rtx, rtx);
1774 rtx (*plus) (rtx, rtx, rtx);
1776 wmode = GET_MODE (operands[0]);
1777 mode = GET_MODE (operands[1]);
1779 switch (mode)
1781 case V8QImode:
1782 unpack_l = gen_unpack1_l;
1783 unpack_h = gen_unpack1_h;
1784 plus = gen_addv4hi3;
1785 break;
1786 case V4HImode:
1787 unpack_l = gen_unpack2_l;
1788 unpack_h = gen_unpack2_h;
1789 plus = gen_addv2si3;
1790 break;
1791 default:
1792 gcc_unreachable ();
1795 /* Fill in x with the sign extension of each element in op1. */
1796 if (unsignedp)
1797 x = CONST0_RTX (mode);
1798 else
1800 bool neg;
1802 x = gen_reg_rtx (mode);
1804 neg = ia64_expand_vecint_compare (LT, mode, x, operands[1],
1805 CONST0_RTX (mode));
1806 gcc_assert (!neg);
1809 l = gen_reg_rtx (wmode);
1810 h = gen_reg_rtx (wmode);
1811 s = gen_reg_rtx (wmode);
1813 emit_insn (unpack_l (gen_lowpart (mode, l), operands[1], x));
1814 emit_insn (unpack_h (gen_lowpart (mode, h), operands[1], x));
1815 emit_insn (plus (s, l, operands[2]));
1816 emit_insn (plus (operands[0], h, s));
1819 /* Emit a signed or unsigned V8QI dot product operation. */
1821 void
1822 ia64_expand_dot_prod_v8qi (rtx operands[4], bool unsignedp)
1824 rtx l1, l2, h1, h2, x1, x2, p1, p2, p3, p4, s1, s2, s3;
1826 /* Fill in x1 and x2 with the sign extension of each element. */
1827 if (unsignedp)
1828 x1 = x2 = CONST0_RTX (V8QImode);
1829 else
1831 bool neg;
1833 x1 = gen_reg_rtx (V8QImode);
1834 x2 = gen_reg_rtx (V8QImode);
1836 neg = ia64_expand_vecint_compare (LT, V8QImode, x1, operands[1],
1837 CONST0_RTX (V8QImode));
1838 gcc_assert (!neg);
1839 neg = ia64_expand_vecint_compare (LT, V8QImode, x2, operands[2],
1840 CONST0_RTX (V8QImode));
1841 gcc_assert (!neg);
1844 l1 = gen_reg_rtx (V4HImode);
1845 l2 = gen_reg_rtx (V4HImode);
1846 h1 = gen_reg_rtx (V4HImode);
1847 h2 = gen_reg_rtx (V4HImode);
1849 emit_insn (gen_unpack1_l (gen_lowpart (V8QImode, l1), operands[1], x1));
1850 emit_insn (gen_unpack1_l (gen_lowpart (V8QImode, l2), operands[2], x2));
1851 emit_insn (gen_unpack1_h (gen_lowpart (V8QImode, h1), operands[1], x1));
1852 emit_insn (gen_unpack1_h (gen_lowpart (V8QImode, h2), operands[2], x2));
1854 p1 = gen_reg_rtx (V2SImode);
1855 p2 = gen_reg_rtx (V2SImode);
1856 p3 = gen_reg_rtx (V2SImode);
1857 p4 = gen_reg_rtx (V2SImode);
1858 emit_insn (gen_pmpy2_r (p1, l1, l2));
1859 emit_insn (gen_pmpy2_l (p2, l1, l2));
1860 emit_insn (gen_pmpy2_r (p3, h1, h2));
1861 emit_insn (gen_pmpy2_l (p4, h1, h2));
1863 s1 = gen_reg_rtx (V2SImode);
1864 s2 = gen_reg_rtx (V2SImode);
1865 s3 = gen_reg_rtx (V2SImode);
1866 emit_insn (gen_addv2si3 (s1, p1, p2));
1867 emit_insn (gen_addv2si3 (s2, p3, p4));
1868 emit_insn (gen_addv2si3 (s3, s1, operands[3]));
1869 emit_insn (gen_addv2si3 (operands[0], s2, s3));
1872 /* Emit the appropriate sequence for a call. */
1874 void
1875 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
1876 int sibcall_p)
1878 rtx insn, b0;
1880 addr = XEXP (addr, 0);
1881 addr = convert_memory_address (DImode, addr);
1882 b0 = gen_rtx_REG (DImode, R_BR (0));
1884 /* ??? Should do this for functions known to bind local too. */
1885 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
1887 if (sibcall_p)
1888 insn = gen_sibcall_nogp (addr);
1889 else if (! retval)
1890 insn = gen_call_nogp (addr, b0);
1891 else
1892 insn = gen_call_value_nogp (retval, addr, b0);
1893 insn = emit_call_insn (insn);
1895 else
1897 if (sibcall_p)
1898 insn = gen_sibcall_gp (addr);
1899 else if (! retval)
1900 insn = gen_call_gp (addr, b0);
1901 else
1902 insn = gen_call_value_gp (retval, addr, b0);
1903 insn = emit_call_insn (insn);
1905 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
1908 if (sibcall_p)
1909 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
1912 void
1913 ia64_reload_gp (void)
1915 rtx tmp;
1917 if (current_frame_info.reg_save_gp)
1918 tmp = gen_rtx_REG (DImode, current_frame_info.reg_save_gp);
1919 else
1921 HOST_WIDE_INT offset;
1923 offset = (current_frame_info.spill_cfa_off
1924 + current_frame_info.spill_size);
1925 if (frame_pointer_needed)
1927 tmp = hard_frame_pointer_rtx;
1928 offset = -offset;
1930 else
1932 tmp = stack_pointer_rtx;
1933 offset = current_frame_info.total_size - offset;
1936 if (CONST_OK_FOR_I (offset))
1937 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1938 tmp, GEN_INT (offset)));
1939 else
1941 emit_move_insn (pic_offset_table_rtx, GEN_INT (offset));
1942 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1943 pic_offset_table_rtx, tmp));
1946 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
1949 emit_move_insn (pic_offset_table_rtx, tmp);
1952 void
1953 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
1954 rtx scratch_b, int noreturn_p, int sibcall_p)
1956 rtx insn;
1957 bool is_desc = false;
1959 /* If we find we're calling through a register, then we're actually
1960 calling through a descriptor, so load up the values. */
1961 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
1963 rtx tmp;
1964 bool addr_dead_p;
1966 /* ??? We are currently constrained to *not* use peep2, because
1967 we can legitimately change the global lifetime of the GP
1968 (in the form of killing where previously live). This is
1969 because a call through a descriptor doesn't use the previous
1970 value of the GP, while a direct call does, and we do not
1971 commit to either form until the split here.
1973 That said, this means that we lack precise life info for
1974 whether ADDR is dead after this call. This is not terribly
1975 important, since we can fix things up essentially for free
1976 with the POST_DEC below, but it's nice to not use it when we
1977 can immediately tell it's not necessary. */
1978 addr_dead_p = ((noreturn_p || sibcall_p
1979 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
1980 REGNO (addr)))
1981 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
1983 /* Load the code address into scratch_b. */
1984 tmp = gen_rtx_POST_INC (Pmode, addr);
1985 tmp = gen_rtx_MEM (Pmode, tmp);
1986 emit_move_insn (scratch_r, tmp);
1987 emit_move_insn (scratch_b, scratch_r);
1989 /* Load the GP address. If ADDR is not dead here, then we must
1990 revert the change made above via the POST_INCREMENT. */
1991 if (!addr_dead_p)
1992 tmp = gen_rtx_POST_DEC (Pmode, addr);
1993 else
1994 tmp = addr;
1995 tmp = gen_rtx_MEM (Pmode, tmp);
1996 emit_move_insn (pic_offset_table_rtx, tmp);
1998 is_desc = true;
1999 addr = scratch_b;
2002 if (sibcall_p)
2003 insn = gen_sibcall_nogp (addr);
2004 else if (retval)
2005 insn = gen_call_value_nogp (retval, addr, retaddr);
2006 else
2007 insn = gen_call_nogp (addr, retaddr);
2008 emit_call_insn (insn);
2010 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
2011 ia64_reload_gp ();
2014 /* Expand an atomic operation. We want to perform MEM <CODE>= VAL atomically.
2016 This differs from the generic code in that we know about the zero-extending
2017 properties of cmpxchg, and the zero-extending requirements of ar.ccv. We
2018 also know that ld.acq+cmpxchg.rel equals a full barrier.
2020 The loop we want to generate looks like
2022 cmp_reg = mem;
2023 label:
2024 old_reg = cmp_reg;
2025 new_reg = cmp_reg op val;
2026 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
2027 if (cmp_reg != old_reg)
2028 goto label;
2030 Note that we only do the plain load from memory once. Subsequent
2031 iterations use the value loaded by the compare-and-swap pattern. */
2033 void
2034 ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
2035 rtx old_dst, rtx new_dst)
2037 enum machine_mode mode = GET_MODE (mem);
2038 rtx old_reg, new_reg, cmp_reg, ar_ccv, label;
2039 enum insn_code icode;
2041 /* Special case for using fetchadd. */
2042 if ((mode == SImode || mode == DImode)
2043 && (code == PLUS || code == MINUS)
2044 && fetchadd_operand (val, mode))
2046 if (code == MINUS)
2047 val = GEN_INT (-INTVAL (val));
2049 if (!old_dst)
2050 old_dst = gen_reg_rtx (mode);
2052 emit_insn (gen_memory_barrier ());
2054 if (mode == SImode)
2055 icode = CODE_FOR_fetchadd_acq_si;
2056 else
2057 icode = CODE_FOR_fetchadd_acq_di;
2058 emit_insn (GEN_FCN (icode) (old_dst, mem, val));
2060 if (new_dst)
2062 new_reg = expand_simple_binop (mode, PLUS, old_dst, val, new_dst,
2063 true, OPTAB_WIDEN);
2064 if (new_reg != new_dst)
2065 emit_move_insn (new_dst, new_reg);
2067 return;
2070 /* Because of the volatile mem read, we get an ld.acq, which is the
2071 front half of the full barrier. The end half is the cmpxchg.rel. */
2072 gcc_assert (MEM_VOLATILE_P (mem));
2074 old_reg = gen_reg_rtx (DImode);
2075 cmp_reg = gen_reg_rtx (DImode);
2076 label = gen_label_rtx ();
2078 if (mode != DImode)
2080 val = simplify_gen_subreg (DImode, val, mode, 0);
2081 emit_insn (gen_extend_insn (cmp_reg, mem, DImode, mode, 1));
2083 else
2084 emit_move_insn (cmp_reg, mem);
2086 emit_label (label);
2088 ar_ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
2089 emit_move_insn (old_reg, cmp_reg);
2090 emit_move_insn (ar_ccv, cmp_reg);
2092 if (old_dst)
2093 emit_move_insn (old_dst, gen_lowpart (mode, cmp_reg));
2095 new_reg = cmp_reg;
2096 if (code == NOT)
2098 new_reg = expand_simple_unop (DImode, NOT, new_reg, NULL_RTX, true);
2099 code = AND;
2101 new_reg = expand_simple_binop (DImode, code, new_reg, val, NULL_RTX,
2102 true, OPTAB_DIRECT);
2104 if (mode != DImode)
2105 new_reg = gen_lowpart (mode, new_reg);
2106 if (new_dst)
2107 emit_move_insn (new_dst, new_reg);
2109 switch (mode)
2111 case QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;
2112 case HImode: icode = CODE_FOR_cmpxchg_rel_hi; break;
2113 case SImode: icode = CODE_FOR_cmpxchg_rel_si; break;
2114 case DImode: icode = CODE_FOR_cmpxchg_rel_di; break;
2115 default:
2116 gcc_unreachable ();
2119 emit_insn (GEN_FCN (icode) (cmp_reg, mem, ar_ccv, new_reg));
2121 emit_cmp_and_jump_insns (cmp_reg, old_reg, NE, NULL, DImode, true, label);
2124 /* Begin the assembly file. */
2126 static void
2127 ia64_file_start (void)
2129 /* Variable tracking should be run after all optimizations which change order
2130 of insns. It also needs a valid CFG. This can't be done in
2131 ia64_override_options, because flag_var_tracking is finalized after
2132 that. */
2133 ia64_flag_var_tracking = flag_var_tracking;
2134 flag_var_tracking = 0;
2136 default_file_start ();
2137 emit_safe_across_calls ();
2140 void
2141 emit_safe_across_calls (void)
2143 unsigned int rs, re;
2144 int out_state;
2146 rs = 1;
2147 out_state = 0;
2148 while (1)
2150 while (rs < 64 && call_used_regs[PR_REG (rs)])
2151 rs++;
2152 if (rs >= 64)
2153 break;
2154 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
2155 continue;
2156 if (out_state == 0)
2158 fputs ("\t.pred.safe_across_calls ", asm_out_file);
2159 out_state = 1;
2161 else
2162 fputc (',', asm_out_file);
2163 if (re == rs + 1)
2164 fprintf (asm_out_file, "p%u", rs);
2165 else
2166 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
2167 rs = re + 1;
2169 if (out_state)
2170 fputc ('\n', asm_out_file);
2173 /* Helper function for ia64_compute_frame_size: find an appropriate general
2174 register to spill some special register to. SPECIAL_SPILL_MASK contains
2175 bits in GR0 to GR31 that have already been allocated by this routine.
2176 TRY_LOCALS is true if we should attempt to locate a local regnum. */
2178 static int
2179 find_gr_spill (int try_locals)
2181 int regno;
2183 /* If this is a leaf function, first try an otherwise unused
2184 call-clobbered register. */
2185 if (current_function_is_leaf)
2187 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2188 if (! regs_ever_live[regno]
2189 && call_used_regs[regno]
2190 && ! fixed_regs[regno]
2191 && ! global_regs[regno]
2192 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
2194 current_frame_info.gr_used_mask |= 1 << regno;
2195 return regno;
2199 if (try_locals)
2201 regno = current_frame_info.n_local_regs;
2202 /* If there is a frame pointer, then we can't use loc79, because
2203 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
2204 reg_name switching code in ia64_expand_prologue. */
2205 if (regno < (80 - frame_pointer_needed))
2207 current_frame_info.n_local_regs = regno + 1;
2208 return LOC_REG (0) + regno;
2212 /* Failed to find a general register to spill to. Must use stack. */
2213 return 0;
2216 /* In order to make for nice schedules, we try to allocate every temporary
2217 to a different register. We must of course stay away from call-saved,
2218 fixed, and global registers. We must also stay away from registers
2219 allocated in current_frame_info.gr_used_mask, since those include regs
2220 used all through the prologue.
2222 Any register allocated here must be used immediately. The idea is to
2223 aid scheduling, not to solve data flow problems. */
2225 static int last_scratch_gr_reg;
2227 static int
2228 next_scratch_gr_reg (void)
2230 int i, regno;
2232 for (i = 0; i < 32; ++i)
2234 regno = (last_scratch_gr_reg + i + 1) & 31;
2235 if (call_used_regs[regno]
2236 && ! fixed_regs[regno]
2237 && ! global_regs[regno]
2238 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
2240 last_scratch_gr_reg = regno;
2241 return regno;
2245 /* There must be _something_ available. */
2246 gcc_unreachable ();
2249 /* Helper function for ia64_compute_frame_size, called through
2250 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
2252 static void
2253 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
2255 unsigned int regno = REGNO (reg);
2256 if (regno < 32)
2258 unsigned int i, n = hard_regno_nregs[regno][GET_MODE (reg)];
2259 for (i = 0; i < n; ++i)
2260 current_frame_info.gr_used_mask |= 1 << (regno + i);
2264 /* Returns the number of bytes offset between the frame pointer and the stack
2265 pointer for the current function. SIZE is the number of bytes of space
2266 needed for local variables. */
2268 static void
2269 ia64_compute_frame_size (HOST_WIDE_INT size)
2271 HOST_WIDE_INT total_size;
2272 HOST_WIDE_INT spill_size = 0;
2273 HOST_WIDE_INT extra_spill_size = 0;
2274 HOST_WIDE_INT pretend_args_size;
2275 HARD_REG_SET mask;
2276 int n_spilled = 0;
2277 int spilled_gr_p = 0;
2278 int spilled_fr_p = 0;
2279 unsigned int regno;
2280 int i;
2282 if (current_frame_info.initialized)
2283 return;
2285 memset (&current_frame_info, 0, sizeof current_frame_info);
2286 CLEAR_HARD_REG_SET (mask);
2288 /* Don't allocate scratches to the return register. */
2289 diddle_return_value (mark_reg_gr_used_mask, NULL);
2291 /* Don't allocate scratches to the EH scratch registers. */
2292 if (cfun->machine->ia64_eh_epilogue_sp)
2293 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
2294 if (cfun->machine->ia64_eh_epilogue_bsp)
2295 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
2297 /* Find the size of the register stack frame. We have only 80 local
2298 registers, because we reserve 8 for the inputs and 8 for the
2299 outputs. */
2301 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
2302 since we'll be adjusting that down later. */
2303 regno = LOC_REG (78) + ! frame_pointer_needed;
2304 for (; regno >= LOC_REG (0); regno--)
2305 if (regs_ever_live[regno])
2306 break;
2307 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2309 /* For functions marked with the syscall_linkage attribute, we must mark
2310 all eight input registers as in use, so that locals aren't visible to
2311 the caller. */
2313 if (cfun->machine->n_varargs > 0
2314 || lookup_attribute ("syscall_linkage",
2315 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
2316 current_frame_info.n_input_regs = 8;
2317 else
2319 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
2320 if (regs_ever_live[regno])
2321 break;
2322 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
2325 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
2326 if (regs_ever_live[regno])
2327 break;
2328 i = regno - OUT_REG (0) + 1;
2330 #ifndef PROFILE_HOOK
2331 /* When -p profiling, we need one output register for the mcount argument.
2332 Likewise for -a profiling for the bb_init_func argument. For -ax
2333 profiling, we need two output registers for the two bb_init_trace_func
2334 arguments. */
2335 if (current_function_profile)
2336 i = MAX (i, 1);
2337 #endif
2338 current_frame_info.n_output_regs = i;
2340 /* ??? No rotating register support yet. */
2341 current_frame_info.n_rotate_regs = 0;
2343 /* Discover which registers need spilling, and how much room that
2344 will take. Begin with floating point and general registers,
2345 which will always wind up on the stack. */
2347 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
2348 if (regs_ever_live[regno] && ! call_used_regs[regno])
2350 SET_HARD_REG_BIT (mask, regno);
2351 spill_size += 16;
2352 n_spilled += 1;
2353 spilled_fr_p = 1;
2356 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2357 if (regs_ever_live[regno] && ! call_used_regs[regno])
2359 SET_HARD_REG_BIT (mask, regno);
2360 spill_size += 8;
2361 n_spilled += 1;
2362 spilled_gr_p = 1;
2365 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
2366 if (regs_ever_live[regno] && ! call_used_regs[regno])
2368 SET_HARD_REG_BIT (mask, regno);
2369 spill_size += 8;
2370 n_spilled += 1;
2373 /* Now come all special registers that might get saved in other
2374 general registers. */
2376 if (frame_pointer_needed)
2378 current_frame_info.reg_fp = find_gr_spill (1);
2379 /* If we did not get a register, then we take LOC79. This is guaranteed
2380 to be free, even if regs_ever_live is already set, because this is
2381 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
2382 as we don't count loc79 above. */
2383 if (current_frame_info.reg_fp == 0)
2385 current_frame_info.reg_fp = LOC_REG (79);
2386 current_frame_info.n_local_regs++;
2390 if (! current_function_is_leaf)
2392 /* Emit a save of BR0 if we call other functions. Do this even
2393 if this function doesn't return, as EH depends on this to be
2394 able to unwind the stack. */
2395 SET_HARD_REG_BIT (mask, BR_REG (0));
2397 current_frame_info.reg_save_b0 = find_gr_spill (1);
2398 if (current_frame_info.reg_save_b0 == 0)
2400 spill_size += 8;
2401 n_spilled += 1;
2404 /* Similarly for ar.pfs. */
2405 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2406 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
2407 if (current_frame_info.reg_save_ar_pfs == 0)
2409 extra_spill_size += 8;
2410 n_spilled += 1;
2413 /* Similarly for gp. Note that if we're calling setjmp, the stacked
2414 registers are clobbered, so we fall back to the stack. */
2415 current_frame_info.reg_save_gp
2416 = (current_function_calls_setjmp ? 0 : find_gr_spill (1));
2417 if (current_frame_info.reg_save_gp == 0)
2419 SET_HARD_REG_BIT (mask, GR_REG (1));
2420 spill_size += 8;
2421 n_spilled += 1;
2424 else
2426 if (regs_ever_live[BR_REG (0)] && ! call_used_regs[BR_REG (0)])
2428 SET_HARD_REG_BIT (mask, BR_REG (0));
2429 spill_size += 8;
2430 n_spilled += 1;
2433 if (regs_ever_live[AR_PFS_REGNUM])
2435 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2436 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
2437 if (current_frame_info.reg_save_ar_pfs == 0)
2439 extra_spill_size += 8;
2440 n_spilled += 1;
2445 /* Unwind descriptor hackery: things are most efficient if we allocate
2446 consecutive GR save registers for RP, PFS, FP in that order. However,
2447 it is absolutely critical that FP get the only hard register that's
2448 guaranteed to be free, so we allocated it first. If all three did
2449 happen to be allocated hard regs, and are consecutive, rearrange them
2450 into the preferred order now. */
2451 if (current_frame_info.reg_fp != 0
2452 && current_frame_info.reg_save_b0 == current_frame_info.reg_fp + 1
2453 && current_frame_info.reg_save_ar_pfs == current_frame_info.reg_fp + 2)
2455 current_frame_info.reg_save_b0 = current_frame_info.reg_fp;
2456 current_frame_info.reg_save_ar_pfs = current_frame_info.reg_fp + 1;
2457 current_frame_info.reg_fp = current_frame_info.reg_fp + 2;
2460 /* See if we need to store the predicate register block. */
2461 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2462 if (regs_ever_live[regno] && ! call_used_regs[regno])
2463 break;
2464 if (regno <= PR_REG (63))
2466 SET_HARD_REG_BIT (mask, PR_REG (0));
2467 current_frame_info.reg_save_pr = find_gr_spill (1);
2468 if (current_frame_info.reg_save_pr == 0)
2470 extra_spill_size += 8;
2471 n_spilled += 1;
2474 /* ??? Mark them all as used so that register renaming and such
2475 are free to use them. */
2476 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2477 regs_ever_live[regno] = 1;
2480 /* If we're forced to use st8.spill, we're forced to save and restore
2481 ar.unat as well. The check for existing liveness allows inline asm
2482 to touch ar.unat. */
2483 if (spilled_gr_p || cfun->machine->n_varargs
2484 || regs_ever_live[AR_UNAT_REGNUM])
2486 regs_ever_live[AR_UNAT_REGNUM] = 1;
2487 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
2488 current_frame_info.reg_save_ar_unat = find_gr_spill (spill_size == 0);
2489 if (current_frame_info.reg_save_ar_unat == 0)
2491 extra_spill_size += 8;
2492 n_spilled += 1;
2496 if (regs_ever_live[AR_LC_REGNUM])
2498 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
2499 current_frame_info.reg_save_ar_lc = find_gr_spill (spill_size == 0);
2500 if (current_frame_info.reg_save_ar_lc == 0)
2502 extra_spill_size += 8;
2503 n_spilled += 1;
2507 /* If we have an odd number of words of pretend arguments written to
2508 the stack, then the FR save area will be unaligned. We round the
2509 size of this area up to keep things 16 byte aligned. */
2510 if (spilled_fr_p)
2511 pretend_args_size = IA64_STACK_ALIGN (current_function_pretend_args_size);
2512 else
2513 pretend_args_size = current_function_pretend_args_size;
2515 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2516 + current_function_outgoing_args_size);
2517 total_size = IA64_STACK_ALIGN (total_size);
2519 /* We always use the 16-byte scratch area provided by the caller, but
2520 if we are a leaf function, there's no one to which we need to provide
2521 a scratch area. */
2522 if (current_function_is_leaf)
2523 total_size = MAX (0, total_size - 16);
2525 current_frame_info.total_size = total_size;
2526 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2527 current_frame_info.spill_size = spill_size;
2528 current_frame_info.extra_spill_size = extra_spill_size;
2529 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2530 current_frame_info.n_spilled = n_spilled;
2531 current_frame_info.initialized = reload_completed;
2534 /* Compute the initial difference between the specified pair of registers. */
2536 HOST_WIDE_INT
2537 ia64_initial_elimination_offset (int from, int to)
2539 HOST_WIDE_INT offset;
2541 ia64_compute_frame_size (get_frame_size ());
2542 switch (from)
2544 case FRAME_POINTER_REGNUM:
2545 switch (to)
2547 case HARD_FRAME_POINTER_REGNUM:
2548 if (current_function_is_leaf)
2549 offset = -current_frame_info.total_size;
2550 else
2551 offset = -(current_frame_info.total_size
2552 - current_function_outgoing_args_size - 16);
2553 break;
2555 case STACK_POINTER_REGNUM:
2556 if (current_function_is_leaf)
2557 offset = 0;
2558 else
2559 offset = 16 + current_function_outgoing_args_size;
2560 break;
2562 default:
2563 gcc_unreachable ();
2565 break;
2567 case ARG_POINTER_REGNUM:
2568 /* Arguments start above the 16 byte save area, unless stdarg
2569 in which case we store through the 16 byte save area. */
2570 switch (to)
2572 case HARD_FRAME_POINTER_REGNUM:
2573 offset = 16 - current_function_pretend_args_size;
2574 break;
2576 case STACK_POINTER_REGNUM:
2577 offset = (current_frame_info.total_size
2578 + 16 - current_function_pretend_args_size);
2579 break;
2581 default:
2582 gcc_unreachable ();
2584 break;
2586 default:
2587 gcc_unreachable ();
2590 return offset;
2593 /* If there are more than a trivial number of register spills, we use
2594 two interleaved iterators so that we can get two memory references
2595 per insn group.
2597 In order to simplify things in the prologue and epilogue expanders,
2598 we use helper functions to fix up the memory references after the
2599 fact with the appropriate offsets to a POST_MODIFY memory mode.
2600 The following data structure tracks the state of the two iterators
2601 while insns are being emitted. */
2603 struct spill_fill_data
2605 rtx init_after; /* point at which to emit initializations */
2606 rtx init_reg[2]; /* initial base register */
2607 rtx iter_reg[2]; /* the iterator registers */
2608 rtx *prev_addr[2]; /* address of last memory use */
2609 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
2610 HOST_WIDE_INT prev_off[2]; /* last offset */
2611 int n_iter; /* number of iterators in use */
2612 int next_iter; /* next iterator to use */
2613 unsigned int save_gr_used_mask;
2616 static struct spill_fill_data spill_fill_data;
2618 static void
2619 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
2621 int i;
2623 spill_fill_data.init_after = get_last_insn ();
2624 spill_fill_data.init_reg[0] = init_reg;
2625 spill_fill_data.init_reg[1] = init_reg;
2626 spill_fill_data.prev_addr[0] = NULL;
2627 spill_fill_data.prev_addr[1] = NULL;
2628 spill_fill_data.prev_insn[0] = NULL;
2629 spill_fill_data.prev_insn[1] = NULL;
2630 spill_fill_data.prev_off[0] = cfa_off;
2631 spill_fill_data.prev_off[1] = cfa_off;
2632 spill_fill_data.next_iter = 0;
2633 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
2635 spill_fill_data.n_iter = 1 + (n_spills > 2);
2636 for (i = 0; i < spill_fill_data.n_iter; ++i)
2638 int regno = next_scratch_gr_reg ();
2639 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
2640 current_frame_info.gr_used_mask |= 1 << regno;
2644 static void
2645 finish_spill_pointers (void)
2647 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
2650 static rtx
2651 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
2653 int iter = spill_fill_data.next_iter;
2654 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
2655 rtx disp_rtx = GEN_INT (disp);
2656 rtx mem;
2658 if (spill_fill_data.prev_addr[iter])
2660 if (CONST_OK_FOR_N (disp))
2662 *spill_fill_data.prev_addr[iter]
2663 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
2664 gen_rtx_PLUS (DImode,
2665 spill_fill_data.iter_reg[iter],
2666 disp_rtx));
2667 REG_NOTES (spill_fill_data.prev_insn[iter])
2668 = gen_rtx_EXPR_LIST (REG_INC, spill_fill_data.iter_reg[iter],
2669 REG_NOTES (spill_fill_data.prev_insn[iter]));
2671 else
2673 /* ??? Could use register post_modify for loads. */
2674 if (! CONST_OK_FOR_I (disp))
2676 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2677 emit_move_insn (tmp, disp_rtx);
2678 disp_rtx = tmp;
2680 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2681 spill_fill_data.iter_reg[iter], disp_rtx));
2684 /* Micro-optimization: if we've created a frame pointer, it's at
2685 CFA 0, which may allow the real iterator to be initialized lower,
2686 slightly increasing parallelism. Also, if there are few saves
2687 it may eliminate the iterator entirely. */
2688 else if (disp == 0
2689 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
2690 && frame_pointer_needed)
2692 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
2693 set_mem_alias_set (mem, get_varargs_alias_set ());
2694 return mem;
2696 else
2698 rtx seq, insn;
2700 if (disp == 0)
2701 seq = gen_movdi (spill_fill_data.iter_reg[iter],
2702 spill_fill_data.init_reg[iter]);
2703 else
2705 start_sequence ();
2707 if (! CONST_OK_FOR_I (disp))
2709 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2710 emit_move_insn (tmp, disp_rtx);
2711 disp_rtx = tmp;
2714 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2715 spill_fill_data.init_reg[iter],
2716 disp_rtx));
2718 seq = get_insns ();
2719 end_sequence ();
2722 /* Careful for being the first insn in a sequence. */
2723 if (spill_fill_data.init_after)
2724 insn = emit_insn_after (seq, spill_fill_data.init_after);
2725 else
2727 rtx first = get_insns ();
2728 if (first)
2729 insn = emit_insn_before (seq, first);
2730 else
2731 insn = emit_insn (seq);
2733 spill_fill_data.init_after = insn;
2735 /* If DISP is 0, we may or may not have a further adjustment
2736 afterward. If we do, then the load/store insn may be modified
2737 to be a post-modify. If we don't, then this copy may be
2738 eliminated by copyprop_hardreg_forward, which makes this
2739 insn garbage, which runs afoul of the sanity check in
2740 propagate_one_insn. So mark this insn as legal to delete. */
2741 if (disp == 0)
2742 REG_NOTES(insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx,
2743 REG_NOTES (insn));
2746 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
2748 /* ??? Not all of the spills are for varargs, but some of them are.
2749 The rest of the spills belong in an alias set of their own. But
2750 it doesn't actually hurt to include them here. */
2751 set_mem_alias_set (mem, get_varargs_alias_set ());
2753 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
2754 spill_fill_data.prev_off[iter] = cfa_off;
2756 if (++iter >= spill_fill_data.n_iter)
2757 iter = 0;
2758 spill_fill_data.next_iter = iter;
2760 return mem;
2763 static void
2764 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
2765 rtx frame_reg)
2767 int iter = spill_fill_data.next_iter;
2768 rtx mem, insn;
2770 mem = spill_restore_mem (reg, cfa_off);
2771 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
2772 spill_fill_data.prev_insn[iter] = insn;
2774 if (frame_reg)
2776 rtx base;
2777 HOST_WIDE_INT off;
2779 RTX_FRAME_RELATED_P (insn) = 1;
2781 /* Don't even pretend that the unwind code can intuit its way
2782 through a pair of interleaved post_modify iterators. Just
2783 provide the correct answer. */
2785 if (frame_pointer_needed)
2787 base = hard_frame_pointer_rtx;
2788 off = - cfa_off;
2790 else
2792 base = stack_pointer_rtx;
2793 off = current_frame_info.total_size - cfa_off;
2796 REG_NOTES (insn)
2797 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2798 gen_rtx_SET (VOIDmode,
2799 gen_rtx_MEM (GET_MODE (reg),
2800 plus_constant (base, off)),
2801 frame_reg),
2802 REG_NOTES (insn));
2806 static void
2807 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
2809 int iter = spill_fill_data.next_iter;
2810 rtx insn;
2812 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
2813 GEN_INT (cfa_off)));
2814 spill_fill_data.prev_insn[iter] = insn;
2817 /* Wrapper functions that discards the CONST_INT spill offset. These
2818 exist so that we can give gr_spill/gr_fill the offset they need and
2819 use a consistent function interface. */
2821 static rtx
2822 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2824 return gen_movdi (dest, src);
2827 static rtx
2828 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2830 return gen_fr_spill (dest, src);
2833 static rtx
2834 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2836 return gen_fr_restore (dest, src);
2839 /* Called after register allocation to add any instructions needed for the
2840 prologue. Using a prologue insn is favored compared to putting all of the
2841 instructions in output_function_prologue(), since it allows the scheduler
2842 to intermix instructions with the saves of the caller saved registers. In
2843 some cases, it might be necessary to emit a barrier instruction as the last
2844 insn to prevent such scheduling.
2846 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
2847 so that the debug info generation code can handle them properly.
2849 The register save area is layed out like so:
2850 cfa+16
2851 [ varargs spill area ]
2852 [ fr register spill area ]
2853 [ br register spill area ]
2854 [ ar register spill area ]
2855 [ pr register spill area ]
2856 [ gr register spill area ] */
2858 /* ??? Get inefficient code when the frame size is larger than can fit in an
2859 adds instruction. */
2861 void
2862 ia64_expand_prologue (void)
2864 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
2865 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
2866 rtx reg, alt_reg;
2868 ia64_compute_frame_size (get_frame_size ());
2869 last_scratch_gr_reg = 15;
2871 /* If there is no epilogue, then we don't need some prologue insns.
2872 We need to avoid emitting the dead prologue insns, because flow
2873 will complain about them. */
2874 if (optimize)
2876 edge e;
2877 edge_iterator ei;
2879 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
2880 if ((e->flags & EDGE_FAKE) == 0
2881 && (e->flags & EDGE_FALLTHRU) != 0)
2882 break;
2883 epilogue_p = (e != NULL);
2885 else
2886 epilogue_p = 1;
2888 /* Set the local, input, and output register names. We need to do this
2889 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
2890 half. If we use in/loc/out register names, then we get assembler errors
2891 in crtn.S because there is no alloc insn or regstk directive in there. */
2892 if (! TARGET_REG_NAMES)
2894 int inputs = current_frame_info.n_input_regs;
2895 int locals = current_frame_info.n_local_regs;
2896 int outputs = current_frame_info.n_output_regs;
2898 for (i = 0; i < inputs; i++)
2899 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
2900 for (i = 0; i < locals; i++)
2901 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
2902 for (i = 0; i < outputs; i++)
2903 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
2906 /* Set the frame pointer register name. The regnum is logically loc79,
2907 but of course we'll not have allocated that many locals. Rather than
2908 worrying about renumbering the existing rtxs, we adjust the name. */
2909 /* ??? This code means that we can never use one local register when
2910 there is a frame pointer. loc79 gets wasted in this case, as it is
2911 renamed to a register that will never be used. See also the try_locals
2912 code in find_gr_spill. */
2913 if (current_frame_info.reg_fp)
2915 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
2916 reg_names[HARD_FRAME_POINTER_REGNUM]
2917 = reg_names[current_frame_info.reg_fp];
2918 reg_names[current_frame_info.reg_fp] = tmp;
2921 /* We don't need an alloc instruction if we've used no outputs or locals. */
2922 if (current_frame_info.n_local_regs == 0
2923 && current_frame_info.n_output_regs == 0
2924 && current_frame_info.n_input_regs <= current_function_args_info.int_regs
2925 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
2927 /* If there is no alloc, but there are input registers used, then we
2928 need a .regstk directive. */
2929 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
2930 ar_pfs_save_reg = NULL_RTX;
2932 else
2934 current_frame_info.need_regstk = 0;
2936 if (current_frame_info.reg_save_ar_pfs)
2937 regno = current_frame_info.reg_save_ar_pfs;
2938 else
2939 regno = next_scratch_gr_reg ();
2940 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
2942 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
2943 GEN_INT (current_frame_info.n_input_regs),
2944 GEN_INT (current_frame_info.n_local_regs),
2945 GEN_INT (current_frame_info.n_output_regs),
2946 GEN_INT (current_frame_info.n_rotate_regs)));
2947 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_pfs != 0);
2950 /* Set up frame pointer, stack pointer, and spill iterators. */
2952 n_varargs = cfun->machine->n_varargs;
2953 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
2954 stack_pointer_rtx, 0);
2956 if (frame_pointer_needed)
2958 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
2959 RTX_FRAME_RELATED_P (insn) = 1;
2962 if (current_frame_info.total_size != 0)
2964 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
2965 rtx offset;
2967 if (CONST_OK_FOR_I (- current_frame_info.total_size))
2968 offset = frame_size_rtx;
2969 else
2971 regno = next_scratch_gr_reg ();
2972 offset = gen_rtx_REG (DImode, regno);
2973 emit_move_insn (offset, frame_size_rtx);
2976 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
2977 stack_pointer_rtx, offset));
2979 if (! frame_pointer_needed)
2981 RTX_FRAME_RELATED_P (insn) = 1;
2982 if (GET_CODE (offset) != CONST_INT)
2984 REG_NOTES (insn)
2985 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2986 gen_rtx_SET (VOIDmode,
2987 stack_pointer_rtx,
2988 gen_rtx_PLUS (DImode,
2989 stack_pointer_rtx,
2990 frame_size_rtx)),
2991 REG_NOTES (insn));
2995 /* ??? At this point we must generate a magic insn that appears to
2996 modify the stack pointer, the frame pointer, and all spill
2997 iterators. This would allow the most scheduling freedom. For
2998 now, just hard stop. */
2999 emit_insn (gen_blockage ());
3002 /* Must copy out ar.unat before doing any integer spills. */
3003 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3005 if (current_frame_info.reg_save_ar_unat)
3006 ar_unat_save_reg
3007 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
3008 else
3010 alt_regno = next_scratch_gr_reg ();
3011 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3012 current_frame_info.gr_used_mask |= 1 << alt_regno;
3015 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3016 insn = emit_move_insn (ar_unat_save_reg, reg);
3017 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_unat != 0);
3019 /* Even if we're not going to generate an epilogue, we still
3020 need to save the register so that EH works. */
3021 if (! epilogue_p && current_frame_info.reg_save_ar_unat)
3022 emit_insn (gen_prologue_use (ar_unat_save_reg));
3024 else
3025 ar_unat_save_reg = NULL_RTX;
3027 /* Spill all varargs registers. Do this before spilling any GR registers,
3028 since we want the UNAT bits for the GR registers to override the UNAT
3029 bits from varargs, which we don't care about. */
3031 cfa_off = -16;
3032 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
3034 reg = gen_rtx_REG (DImode, regno);
3035 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
3038 /* Locate the bottom of the register save area. */
3039 cfa_off = (current_frame_info.spill_cfa_off
3040 + current_frame_info.spill_size
3041 + current_frame_info.extra_spill_size);
3043 /* Save the predicate register block either in a register or in memory. */
3044 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3046 reg = gen_rtx_REG (DImode, PR_REG (0));
3047 if (current_frame_info.reg_save_pr != 0)
3049 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
3050 insn = emit_move_insn (alt_reg, reg);
3052 /* ??? Denote pr spill/fill by a DImode move that modifies all
3053 64 hard registers. */
3054 RTX_FRAME_RELATED_P (insn) = 1;
3055 REG_NOTES (insn)
3056 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3057 gen_rtx_SET (VOIDmode, alt_reg, reg),
3058 REG_NOTES (insn));
3060 /* Even if we're not going to generate an epilogue, we still
3061 need to save the register so that EH works. */
3062 if (! epilogue_p)
3063 emit_insn (gen_prologue_use (alt_reg));
3065 else
3067 alt_regno = next_scratch_gr_reg ();
3068 alt_reg = gen_rtx_REG (DImode, alt_regno);
3069 insn = emit_move_insn (alt_reg, reg);
3070 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3071 cfa_off -= 8;
3075 /* Handle AR regs in numerical order. All of them get special handling. */
3076 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
3077 && current_frame_info.reg_save_ar_unat == 0)
3079 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3080 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
3081 cfa_off -= 8;
3084 /* The alloc insn already copied ar.pfs into a general register. The
3085 only thing we have to do now is copy that register to a stack slot
3086 if we'd not allocated a local register for the job. */
3087 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
3088 && current_frame_info.reg_save_ar_pfs == 0)
3090 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3091 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
3092 cfa_off -= 8;
3095 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3097 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3098 if (current_frame_info.reg_save_ar_lc != 0)
3100 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
3101 insn = emit_move_insn (alt_reg, reg);
3102 RTX_FRAME_RELATED_P (insn) = 1;
3104 /* Even if we're not going to generate an epilogue, we still
3105 need to save the register so that EH works. */
3106 if (! epilogue_p)
3107 emit_insn (gen_prologue_use (alt_reg));
3109 else
3111 alt_regno = next_scratch_gr_reg ();
3112 alt_reg = gen_rtx_REG (DImode, alt_regno);
3113 emit_move_insn (alt_reg, reg);
3114 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3115 cfa_off -= 8;
3119 if (current_frame_info.reg_save_gp)
3121 insn = emit_move_insn (gen_rtx_REG (DImode,
3122 current_frame_info.reg_save_gp),
3123 pic_offset_table_rtx);
3124 /* We don't know for sure yet if this is actually needed, since
3125 we've not split the PIC call patterns. If all of the calls
3126 are indirect, and not followed by any uses of the gp, then
3127 this save is dead. Allow it to go away. */
3128 REG_NOTES (insn)
3129 = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, REG_NOTES (insn));
3132 /* We should now be at the base of the gr/br/fr spill area. */
3133 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3134 + current_frame_info.spill_size));
3136 /* Spill all general registers. */
3137 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3138 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3140 reg = gen_rtx_REG (DImode, regno);
3141 do_spill (gen_gr_spill, reg, cfa_off, reg);
3142 cfa_off -= 8;
3145 /* Handle BR0 specially -- it may be getting stored permanently in
3146 some GR register. */
3147 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3149 reg = gen_rtx_REG (DImode, BR_REG (0));
3150 if (current_frame_info.reg_save_b0 != 0)
3152 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
3153 insn = emit_move_insn (alt_reg, reg);
3154 RTX_FRAME_RELATED_P (insn) = 1;
3156 /* Even if we're not going to generate an epilogue, we still
3157 need to save the register so that EH works. */
3158 if (! epilogue_p)
3159 emit_insn (gen_prologue_use (alt_reg));
3161 else
3163 alt_regno = next_scratch_gr_reg ();
3164 alt_reg = gen_rtx_REG (DImode, alt_regno);
3165 emit_move_insn (alt_reg, reg);
3166 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3167 cfa_off -= 8;
3171 /* Spill the rest of the BR registers. */
3172 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3173 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3175 alt_regno = next_scratch_gr_reg ();
3176 alt_reg = gen_rtx_REG (DImode, alt_regno);
3177 reg = gen_rtx_REG (DImode, regno);
3178 emit_move_insn (alt_reg, reg);
3179 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3180 cfa_off -= 8;
3183 /* Align the frame and spill all FR registers. */
3184 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3185 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3187 gcc_assert (!(cfa_off & 15));
3188 reg = gen_rtx_REG (XFmode, regno);
3189 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
3190 cfa_off -= 16;
3193 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3195 finish_spill_pointers ();
3198 /* Called after register allocation to add any instructions needed for the
3199 epilogue. Using an epilogue insn is favored compared to putting all of the
3200 instructions in output_function_prologue(), since it allows the scheduler
3201 to intermix instructions with the saves of the caller saved registers. In
3202 some cases, it might be necessary to emit a barrier instruction as the last
3203 insn to prevent such scheduling. */
3205 void
3206 ia64_expand_epilogue (int sibcall_p)
3208 rtx insn, reg, alt_reg, ar_unat_save_reg;
3209 int regno, alt_regno, cfa_off;
3211 ia64_compute_frame_size (get_frame_size ());
3213 /* If there is a frame pointer, then we use it instead of the stack
3214 pointer, so that the stack pointer does not need to be valid when
3215 the epilogue starts. See EXIT_IGNORE_STACK. */
3216 if (frame_pointer_needed)
3217 setup_spill_pointers (current_frame_info.n_spilled,
3218 hard_frame_pointer_rtx, 0);
3219 else
3220 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
3221 current_frame_info.total_size);
3223 if (current_frame_info.total_size != 0)
3225 /* ??? At this point we must generate a magic insn that appears to
3226 modify the spill iterators and the frame pointer. This would
3227 allow the most scheduling freedom. For now, just hard stop. */
3228 emit_insn (gen_blockage ());
3231 /* Locate the bottom of the register save area. */
3232 cfa_off = (current_frame_info.spill_cfa_off
3233 + current_frame_info.spill_size
3234 + current_frame_info.extra_spill_size);
3236 /* Restore the predicate registers. */
3237 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3239 if (current_frame_info.reg_save_pr != 0)
3240 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
3241 else
3243 alt_regno = next_scratch_gr_reg ();
3244 alt_reg = gen_rtx_REG (DImode, alt_regno);
3245 do_restore (gen_movdi_x, alt_reg, cfa_off);
3246 cfa_off -= 8;
3248 reg = gen_rtx_REG (DImode, PR_REG (0));
3249 emit_move_insn (reg, alt_reg);
3252 /* Restore the application registers. */
3254 /* Load the saved unat from the stack, but do not restore it until
3255 after the GRs have been restored. */
3256 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3258 if (current_frame_info.reg_save_ar_unat != 0)
3259 ar_unat_save_reg
3260 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
3261 else
3263 alt_regno = next_scratch_gr_reg ();
3264 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3265 current_frame_info.gr_used_mask |= 1 << alt_regno;
3266 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
3267 cfa_off -= 8;
3270 else
3271 ar_unat_save_reg = NULL_RTX;
3273 if (current_frame_info.reg_save_ar_pfs != 0)
3275 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_pfs);
3276 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3277 emit_move_insn (reg, alt_reg);
3279 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3281 alt_regno = next_scratch_gr_reg ();
3282 alt_reg = gen_rtx_REG (DImode, alt_regno);
3283 do_restore (gen_movdi_x, alt_reg, cfa_off);
3284 cfa_off -= 8;
3285 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3286 emit_move_insn (reg, alt_reg);
3289 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3291 if (current_frame_info.reg_save_ar_lc != 0)
3292 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
3293 else
3295 alt_regno = next_scratch_gr_reg ();
3296 alt_reg = gen_rtx_REG (DImode, alt_regno);
3297 do_restore (gen_movdi_x, alt_reg, cfa_off);
3298 cfa_off -= 8;
3300 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3301 emit_move_insn (reg, alt_reg);
3304 /* We should now be at the base of the gr/br/fr spill area. */
3305 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3306 + current_frame_info.spill_size));
3308 /* The GP may be stored on the stack in the prologue, but it's
3309 never restored in the epilogue. Skip the stack slot. */
3310 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
3311 cfa_off -= 8;
3313 /* Restore all general registers. */
3314 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
3315 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3317 reg = gen_rtx_REG (DImode, regno);
3318 do_restore (gen_gr_restore, reg, cfa_off);
3319 cfa_off -= 8;
3322 /* Restore the branch registers. Handle B0 specially, as it may
3323 have gotten stored in some GR register. */
3324 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3326 if (current_frame_info.reg_save_b0 != 0)
3327 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
3328 else
3330 alt_regno = next_scratch_gr_reg ();
3331 alt_reg = gen_rtx_REG (DImode, alt_regno);
3332 do_restore (gen_movdi_x, alt_reg, cfa_off);
3333 cfa_off -= 8;
3335 reg = gen_rtx_REG (DImode, BR_REG (0));
3336 emit_move_insn (reg, alt_reg);
3339 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3340 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3342 alt_regno = next_scratch_gr_reg ();
3343 alt_reg = gen_rtx_REG (DImode, alt_regno);
3344 do_restore (gen_movdi_x, alt_reg, cfa_off);
3345 cfa_off -= 8;
3346 reg = gen_rtx_REG (DImode, regno);
3347 emit_move_insn (reg, alt_reg);
3350 /* Restore floating point registers. */
3351 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3352 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3354 gcc_assert (!(cfa_off & 15));
3355 reg = gen_rtx_REG (XFmode, regno);
3356 do_restore (gen_fr_restore_x, reg, cfa_off);
3357 cfa_off -= 16;
3360 /* Restore ar.unat for real. */
3361 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3363 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3364 emit_move_insn (reg, ar_unat_save_reg);
3367 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3369 finish_spill_pointers ();
3371 if (current_frame_info.total_size || cfun->machine->ia64_eh_epilogue_sp)
3373 /* ??? At this point we must generate a magic insn that appears to
3374 modify the spill iterators, the stack pointer, and the frame
3375 pointer. This would allow the most scheduling freedom. For now,
3376 just hard stop. */
3377 emit_insn (gen_blockage ());
3380 if (cfun->machine->ia64_eh_epilogue_sp)
3381 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
3382 else if (frame_pointer_needed)
3384 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
3385 RTX_FRAME_RELATED_P (insn) = 1;
3387 else if (current_frame_info.total_size)
3389 rtx offset, frame_size_rtx;
3391 frame_size_rtx = GEN_INT (current_frame_info.total_size);
3392 if (CONST_OK_FOR_I (current_frame_info.total_size))
3393 offset = frame_size_rtx;
3394 else
3396 regno = next_scratch_gr_reg ();
3397 offset = gen_rtx_REG (DImode, regno);
3398 emit_move_insn (offset, frame_size_rtx);
3401 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
3402 offset));
3404 RTX_FRAME_RELATED_P (insn) = 1;
3405 if (GET_CODE (offset) != CONST_INT)
3407 REG_NOTES (insn)
3408 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3409 gen_rtx_SET (VOIDmode,
3410 stack_pointer_rtx,
3411 gen_rtx_PLUS (DImode,
3412 stack_pointer_rtx,
3413 frame_size_rtx)),
3414 REG_NOTES (insn));
3418 if (cfun->machine->ia64_eh_epilogue_bsp)
3419 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
3421 if (! sibcall_p)
3422 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
3423 else
3425 int fp = GR_REG (2);
3426 /* We need a throw away register here, r0 and r1 are reserved, so r2 is the
3427 first available call clobbered register. If there was a frame_pointer
3428 register, we may have swapped the names of r2 and HARD_FRAME_POINTER_REGNUM,
3429 so we have to make sure we're using the string "r2" when emitting
3430 the register name for the assembler. */
3431 if (current_frame_info.reg_fp && current_frame_info.reg_fp == GR_REG (2))
3432 fp = HARD_FRAME_POINTER_REGNUM;
3434 /* We must emit an alloc to force the input registers to become output
3435 registers. Otherwise, if the callee tries to pass its parameters
3436 through to another call without an intervening alloc, then these
3437 values get lost. */
3438 /* ??? We don't need to preserve all input registers. We only need to
3439 preserve those input registers used as arguments to the sibling call.
3440 It is unclear how to compute that number here. */
3441 if (current_frame_info.n_input_regs != 0)
3443 rtx n_inputs = GEN_INT (current_frame_info.n_input_regs);
3444 insn = emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
3445 const0_rtx, const0_rtx,
3446 n_inputs, const0_rtx));
3447 RTX_FRAME_RELATED_P (insn) = 1;
3452 /* Return 1 if br.ret can do all the work required to return from a
3453 function. */
3456 ia64_direct_return (void)
3458 if (reload_completed && ! frame_pointer_needed)
3460 ia64_compute_frame_size (get_frame_size ());
3462 return (current_frame_info.total_size == 0
3463 && current_frame_info.n_spilled == 0
3464 && current_frame_info.reg_save_b0 == 0
3465 && current_frame_info.reg_save_pr == 0
3466 && current_frame_info.reg_save_ar_pfs == 0
3467 && current_frame_info.reg_save_ar_unat == 0
3468 && current_frame_info.reg_save_ar_lc == 0);
3470 return 0;
3473 /* Return the magic cookie that we use to hold the return address
3474 during early compilation. */
3477 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
3479 if (count != 0)
3480 return NULL;
3481 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
3484 /* Split this value after reload, now that we know where the return
3485 address is saved. */
3487 void
3488 ia64_split_return_addr_rtx (rtx dest)
3490 rtx src;
3492 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3494 if (current_frame_info.reg_save_b0 != 0)
3495 src = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
3496 else
3498 HOST_WIDE_INT off;
3499 unsigned int regno;
3501 /* Compute offset from CFA for BR0. */
3502 /* ??? Must be kept in sync with ia64_expand_prologue. */
3503 off = (current_frame_info.spill_cfa_off
3504 + current_frame_info.spill_size);
3505 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3506 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3507 off -= 8;
3509 /* Convert CFA offset to a register based offset. */
3510 if (frame_pointer_needed)
3511 src = hard_frame_pointer_rtx;
3512 else
3514 src = stack_pointer_rtx;
3515 off += current_frame_info.total_size;
3518 /* Load address into scratch register. */
3519 if (CONST_OK_FOR_I (off))
3520 emit_insn (gen_adddi3 (dest, src, GEN_INT (off)));
3521 else
3523 emit_move_insn (dest, GEN_INT (off));
3524 emit_insn (gen_adddi3 (dest, src, dest));
3527 src = gen_rtx_MEM (Pmode, dest);
3530 else
3531 src = gen_rtx_REG (DImode, BR_REG (0));
3533 emit_move_insn (dest, src);
3537 ia64_hard_regno_rename_ok (int from, int to)
3539 /* Don't clobber any of the registers we reserved for the prologue. */
3540 if (to == current_frame_info.reg_fp
3541 || to == current_frame_info.reg_save_b0
3542 || to == current_frame_info.reg_save_pr
3543 || to == current_frame_info.reg_save_ar_pfs
3544 || to == current_frame_info.reg_save_ar_unat
3545 || to == current_frame_info.reg_save_ar_lc)
3546 return 0;
3548 if (from == current_frame_info.reg_fp
3549 || from == current_frame_info.reg_save_b0
3550 || from == current_frame_info.reg_save_pr
3551 || from == current_frame_info.reg_save_ar_pfs
3552 || from == current_frame_info.reg_save_ar_unat
3553 || from == current_frame_info.reg_save_ar_lc)
3554 return 0;
3556 /* Don't use output registers outside the register frame. */
3557 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
3558 return 0;
3560 /* Retain even/oddness on predicate register pairs. */
3561 if (PR_REGNO_P (from) && PR_REGNO_P (to))
3562 return (from & 1) == (to & 1);
3564 return 1;
3567 /* Target hook for assembling integer objects. Handle word-sized
3568 aligned objects and detect the cases when @fptr is needed. */
3570 static bool
3571 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
3573 if (size == POINTER_SIZE / BITS_PER_UNIT
3574 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
3575 && GET_CODE (x) == SYMBOL_REF
3576 && SYMBOL_REF_FUNCTION_P (x))
3578 static const char * const directive[2][2] = {
3579 /* 64-bit pointer */ /* 32-bit pointer */
3580 { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
3581 { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
3583 fputs (directive[(aligned_p != 0)][POINTER_SIZE == 32], asm_out_file);
3584 output_addr_const (asm_out_file, x);
3585 fputs (")\n", asm_out_file);
3586 return true;
3588 return default_assemble_integer (x, size, aligned_p);
3591 /* Emit the function prologue. */
3593 static void
3594 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3596 int mask, grsave, grsave_prev;
3598 if (current_frame_info.need_regstk)
3599 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
3600 current_frame_info.n_input_regs,
3601 current_frame_info.n_local_regs,
3602 current_frame_info.n_output_regs,
3603 current_frame_info.n_rotate_regs);
3605 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3606 return;
3608 /* Emit the .prologue directive. */
3610 mask = 0;
3611 grsave = grsave_prev = 0;
3612 if (current_frame_info.reg_save_b0 != 0)
3614 mask |= 8;
3615 grsave = grsave_prev = current_frame_info.reg_save_b0;
3617 if (current_frame_info.reg_save_ar_pfs != 0
3618 && (grsave_prev == 0
3619 || current_frame_info.reg_save_ar_pfs == grsave_prev + 1))
3621 mask |= 4;
3622 if (grsave_prev == 0)
3623 grsave = current_frame_info.reg_save_ar_pfs;
3624 grsave_prev = current_frame_info.reg_save_ar_pfs;
3626 if (current_frame_info.reg_fp != 0
3627 && (grsave_prev == 0
3628 || current_frame_info.reg_fp == grsave_prev + 1))
3630 mask |= 2;
3631 if (grsave_prev == 0)
3632 grsave = HARD_FRAME_POINTER_REGNUM;
3633 grsave_prev = current_frame_info.reg_fp;
3635 if (current_frame_info.reg_save_pr != 0
3636 && (grsave_prev == 0
3637 || current_frame_info.reg_save_pr == grsave_prev + 1))
3639 mask |= 1;
3640 if (grsave_prev == 0)
3641 grsave = current_frame_info.reg_save_pr;
3644 if (mask && TARGET_GNU_AS)
3645 fprintf (file, "\t.prologue %d, %d\n", mask,
3646 ia64_dbx_register_number (grsave));
3647 else
3648 fputs ("\t.prologue\n", file);
3650 /* Emit a .spill directive, if necessary, to relocate the base of
3651 the register spill area. */
3652 if (current_frame_info.spill_cfa_off != -16)
3653 fprintf (file, "\t.spill %ld\n",
3654 (long) (current_frame_info.spill_cfa_off
3655 + current_frame_info.spill_size));
3658 /* Emit the .body directive at the scheduled end of the prologue. */
3660 static void
3661 ia64_output_function_end_prologue (FILE *file)
3663 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3664 return;
3666 fputs ("\t.body\n", file);
3669 /* Emit the function epilogue. */
3671 static void
3672 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
3673 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3675 int i;
3677 if (current_frame_info.reg_fp)
3679 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3680 reg_names[HARD_FRAME_POINTER_REGNUM]
3681 = reg_names[current_frame_info.reg_fp];
3682 reg_names[current_frame_info.reg_fp] = tmp;
3684 if (! TARGET_REG_NAMES)
3686 for (i = 0; i < current_frame_info.n_input_regs; i++)
3687 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
3688 for (i = 0; i < current_frame_info.n_local_regs; i++)
3689 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
3690 for (i = 0; i < current_frame_info.n_output_regs; i++)
3691 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
3694 current_frame_info.initialized = 0;
3698 ia64_dbx_register_number (int regno)
3700 /* In ia64_expand_prologue we quite literally renamed the frame pointer
3701 from its home at loc79 to something inside the register frame. We
3702 must perform the same renumbering here for the debug info. */
3703 if (current_frame_info.reg_fp)
3705 if (regno == HARD_FRAME_POINTER_REGNUM)
3706 regno = current_frame_info.reg_fp;
3707 else if (regno == current_frame_info.reg_fp)
3708 regno = HARD_FRAME_POINTER_REGNUM;
3711 if (IN_REGNO_P (regno))
3712 return 32 + regno - IN_REG (0);
3713 else if (LOC_REGNO_P (regno))
3714 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
3715 else if (OUT_REGNO_P (regno))
3716 return (32 + current_frame_info.n_input_regs
3717 + current_frame_info.n_local_regs + regno - OUT_REG (0));
3718 else
3719 return regno;
3722 void
3723 ia64_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
3725 rtx addr_reg, eight = GEN_INT (8);
3727 /* The Intel assembler requires that the global __ia64_trampoline symbol
3728 be declared explicitly */
3729 if (!TARGET_GNU_AS)
3731 static bool declared_ia64_trampoline = false;
3733 if (!declared_ia64_trampoline)
3735 declared_ia64_trampoline = true;
3736 (*targetm.asm_out.globalize_label) (asm_out_file,
3737 "__ia64_trampoline");
3741 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
3742 addr = convert_memory_address (Pmode, addr);
3743 fnaddr = convert_memory_address (Pmode, fnaddr);
3744 static_chain = convert_memory_address (Pmode, static_chain);
3746 /* Load up our iterator. */
3747 addr_reg = gen_reg_rtx (Pmode);
3748 emit_move_insn (addr_reg, addr);
3750 /* The first two words are the fake descriptor:
3751 __ia64_trampoline, ADDR+16. */
3752 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3753 gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline"));
3754 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3756 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3757 copy_to_reg (plus_constant (addr, 16)));
3758 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3760 /* The third word is the target descriptor. */
3761 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), fnaddr);
3762 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3764 /* The fourth word is the static chain. */
3765 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), static_chain);
3768 /* Do any needed setup for a variadic function. CUM has not been updated
3769 for the last named argument which has type TYPE and mode MODE.
3771 We generate the actual spill instructions during prologue generation. */
3773 static void
3774 ia64_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3775 tree type, int * pretend_size,
3776 int second_time ATTRIBUTE_UNUSED)
3778 CUMULATIVE_ARGS next_cum = *cum;
3780 /* Skip the current argument. */
3781 ia64_function_arg_advance (&next_cum, mode, type, 1);
3783 if (next_cum.words < MAX_ARGUMENT_SLOTS)
3785 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
3786 *pretend_size = n * UNITS_PER_WORD;
3787 cfun->machine->n_varargs = n;
3791 /* Check whether TYPE is a homogeneous floating point aggregate. If
3792 it is, return the mode of the floating point type that appears
3793 in all leafs. If it is not, return VOIDmode.
3795 An aggregate is a homogeneous floating point aggregate is if all
3796 fields/elements in it have the same floating point type (e.g,
3797 SFmode). 128-bit quad-precision floats are excluded.
3799 Variable sized aggregates should never arrive here, since we should
3800 have already decided to pass them by reference. Top-level zero-sized
3801 aggregates are excluded because our parallels crash the middle-end. */
3803 static enum machine_mode
3804 hfa_element_mode (tree type, bool nested)
3806 enum machine_mode element_mode = VOIDmode;
3807 enum machine_mode mode;
3808 enum tree_code code = TREE_CODE (type);
3809 int know_element_mode = 0;
3810 tree t;
3812 if (!nested && (!TYPE_SIZE (type) || integer_zerop (TYPE_SIZE (type))))
3813 return VOIDmode;
3815 switch (code)
3817 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
3818 case BOOLEAN_TYPE: case CHAR_TYPE: case POINTER_TYPE:
3819 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
3820 case LANG_TYPE: case FUNCTION_TYPE:
3821 return VOIDmode;
3823 /* Fortran complex types are supposed to be HFAs, so we need to handle
3824 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
3825 types though. */
3826 case COMPLEX_TYPE:
3827 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
3828 && TYPE_MODE (type) != TCmode)
3829 return GET_MODE_INNER (TYPE_MODE (type));
3830 else
3831 return VOIDmode;
3833 case REAL_TYPE:
3834 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
3835 mode if this is contained within an aggregate. */
3836 if (nested && TYPE_MODE (type) != TFmode)
3837 return TYPE_MODE (type);
3838 else
3839 return VOIDmode;
3841 case ARRAY_TYPE:
3842 return hfa_element_mode (TREE_TYPE (type), 1);
3844 case RECORD_TYPE:
3845 case UNION_TYPE:
3846 case QUAL_UNION_TYPE:
3847 for (t = TYPE_FIELDS (type); t; t = TREE_CHAIN (t))
3849 if (TREE_CODE (t) != FIELD_DECL)
3850 continue;
3852 mode = hfa_element_mode (TREE_TYPE (t), 1);
3853 if (know_element_mode)
3855 if (mode != element_mode)
3856 return VOIDmode;
3858 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
3859 return VOIDmode;
3860 else
3862 know_element_mode = 1;
3863 element_mode = mode;
3866 return element_mode;
3868 default:
3869 /* If we reach here, we probably have some front-end specific type
3870 that the backend doesn't know about. This can happen via the
3871 aggregate_value_p call in init_function_start. All we can do is
3872 ignore unknown tree types. */
3873 return VOIDmode;
3876 return VOIDmode;
3879 /* Return the number of words required to hold a quantity of TYPE and MODE
3880 when passed as an argument. */
3881 static int
3882 ia64_function_arg_words (tree type, enum machine_mode mode)
3884 int words;
3886 if (mode == BLKmode)
3887 words = int_size_in_bytes (type);
3888 else
3889 words = GET_MODE_SIZE (mode);
3891 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
3894 /* Return the number of registers that should be skipped so the current
3895 argument (described by TYPE and WORDS) will be properly aligned.
3897 Integer and float arguments larger than 8 bytes start at the next
3898 even boundary. Aggregates larger than 8 bytes start at the next
3899 even boundary if the aggregate has 16 byte alignment. Note that
3900 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
3901 but are still to be aligned in registers.
3903 ??? The ABI does not specify how to handle aggregates with
3904 alignment from 9 to 15 bytes, or greater than 16. We handle them
3905 all as if they had 16 byte alignment. Such aggregates can occur
3906 only if gcc extensions are used. */
3907 static int
3908 ia64_function_arg_offset (CUMULATIVE_ARGS *cum, tree type, int words)
3910 if ((cum->words & 1) == 0)
3911 return 0;
3913 if (type
3914 && TREE_CODE (type) != INTEGER_TYPE
3915 && TREE_CODE (type) != REAL_TYPE)
3916 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
3917 else
3918 return words > 1;
3921 /* Return rtx for register where argument is passed, or zero if it is passed
3922 on the stack. */
3923 /* ??? 128-bit quad-precision floats are always passed in general
3924 registers. */
3927 ia64_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
3928 int named, int incoming)
3930 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
3931 int words = ia64_function_arg_words (type, mode);
3932 int offset = ia64_function_arg_offset (cum, type, words);
3933 enum machine_mode hfa_mode = VOIDmode;
3935 /* If all argument slots are used, then it must go on the stack. */
3936 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
3937 return 0;
3939 /* Check for and handle homogeneous FP aggregates. */
3940 if (type)
3941 hfa_mode = hfa_element_mode (type, 0);
3943 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
3944 and unprototyped hfas are passed specially. */
3945 if (hfa_mode != VOIDmode && (! cum->prototype || named))
3947 rtx loc[16];
3948 int i = 0;
3949 int fp_regs = cum->fp_regs;
3950 int int_regs = cum->words + offset;
3951 int hfa_size = GET_MODE_SIZE (hfa_mode);
3952 int byte_size;
3953 int args_byte_size;
3955 /* If prototyped, pass it in FR regs then GR regs.
3956 If not prototyped, pass it in both FR and GR regs.
3958 If this is an SFmode aggregate, then it is possible to run out of
3959 FR regs while GR regs are still left. In that case, we pass the
3960 remaining part in the GR regs. */
3962 /* Fill the FP regs. We do this always. We stop if we reach the end
3963 of the argument, the last FP register, or the last argument slot. */
3965 byte_size = ((mode == BLKmode)
3966 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3967 args_byte_size = int_regs * UNITS_PER_WORD;
3968 offset = 0;
3969 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
3970 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
3972 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3973 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
3974 + fp_regs)),
3975 GEN_INT (offset));
3976 offset += hfa_size;
3977 args_byte_size += hfa_size;
3978 fp_regs++;
3981 /* If no prototype, then the whole thing must go in GR regs. */
3982 if (! cum->prototype)
3983 offset = 0;
3984 /* If this is an SFmode aggregate, then we might have some left over
3985 that needs to go in GR regs. */
3986 else if (byte_size != offset)
3987 int_regs += offset / UNITS_PER_WORD;
3989 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
3991 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
3993 enum machine_mode gr_mode = DImode;
3994 unsigned int gr_size;
3996 /* If we have an odd 4 byte hunk because we ran out of FR regs,
3997 then this goes in a GR reg left adjusted/little endian, right
3998 adjusted/big endian. */
3999 /* ??? Currently this is handled wrong, because 4-byte hunks are
4000 always right adjusted/little endian. */
4001 if (offset & 0x4)
4002 gr_mode = SImode;
4003 /* If we have an even 4 byte hunk because the aggregate is a
4004 multiple of 4 bytes in size, then this goes in a GR reg right
4005 adjusted/little endian. */
4006 else if (byte_size - offset == 4)
4007 gr_mode = SImode;
4009 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4010 gen_rtx_REG (gr_mode, (basereg
4011 + int_regs)),
4012 GEN_INT (offset));
4014 gr_size = GET_MODE_SIZE (gr_mode);
4015 offset += gr_size;
4016 if (gr_size == UNITS_PER_WORD
4017 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
4018 int_regs++;
4019 else if (gr_size > UNITS_PER_WORD)
4020 int_regs += gr_size / UNITS_PER_WORD;
4022 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4025 /* Integral and aggregates go in general registers. If we have run out of
4026 FR registers, then FP values must also go in general registers. This can
4027 happen when we have a SFmode HFA. */
4028 else if (mode == TFmode || mode == TCmode
4029 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4031 int byte_size = ((mode == BLKmode)
4032 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4033 if (BYTES_BIG_ENDIAN
4034 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
4035 && byte_size < UNITS_PER_WORD
4036 && byte_size > 0)
4038 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4039 gen_rtx_REG (DImode,
4040 (basereg + cum->words
4041 + offset)),
4042 const0_rtx);
4043 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
4045 else
4046 return gen_rtx_REG (mode, basereg + cum->words + offset);
4050 /* If there is a prototype, then FP values go in a FR register when
4051 named, and in a GR register when unnamed. */
4052 else if (cum->prototype)
4054 if (named)
4055 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
4056 /* In big-endian mode, an anonymous SFmode value must be represented
4057 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
4058 the value into the high half of the general register. */
4059 else if (BYTES_BIG_ENDIAN && mode == SFmode)
4060 return gen_rtx_PARALLEL (mode,
4061 gen_rtvec (1,
4062 gen_rtx_EXPR_LIST (VOIDmode,
4063 gen_rtx_REG (DImode, basereg + cum->words + offset),
4064 const0_rtx)));
4065 else
4066 return gen_rtx_REG (mode, basereg + cum->words + offset);
4068 /* If there is no prototype, then FP values go in both FR and GR
4069 registers. */
4070 else
4072 /* See comment above. */
4073 enum machine_mode inner_mode =
4074 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
4076 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
4077 gen_rtx_REG (mode, (FR_ARG_FIRST
4078 + cum->fp_regs)),
4079 const0_rtx);
4080 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4081 gen_rtx_REG (inner_mode,
4082 (basereg + cum->words
4083 + offset)),
4084 const0_rtx);
4086 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
4090 /* Return number of bytes, at the beginning of the argument, that must be
4091 put in registers. 0 is the argument is entirely in registers or entirely
4092 in memory. */
4094 static int
4095 ia64_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4096 tree type, bool named ATTRIBUTE_UNUSED)
4098 int words = ia64_function_arg_words (type, mode);
4099 int offset = ia64_function_arg_offset (cum, type, words);
4101 /* If all argument slots are used, then it must go on the stack. */
4102 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4103 return 0;
4105 /* It doesn't matter whether the argument goes in FR or GR regs. If
4106 it fits within the 8 argument slots, then it goes entirely in
4107 registers. If it extends past the last argument slot, then the rest
4108 goes on the stack. */
4110 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
4111 return 0;
4113 return (MAX_ARGUMENT_SLOTS - cum->words - offset) * UNITS_PER_WORD;
4116 /* Update CUM to point after this argument. This is patterned after
4117 ia64_function_arg. */
4119 void
4120 ia64_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4121 tree type, int named)
4123 int words = ia64_function_arg_words (type, mode);
4124 int offset = ia64_function_arg_offset (cum, type, words);
4125 enum machine_mode hfa_mode = VOIDmode;
4127 /* If all arg slots are already full, then there is nothing to do. */
4128 if (cum->words >= MAX_ARGUMENT_SLOTS)
4129 return;
4131 cum->words += words + offset;
4133 /* Check for and handle homogeneous FP aggregates. */
4134 if (type)
4135 hfa_mode = hfa_element_mode (type, 0);
4137 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4138 and unprototyped hfas are passed specially. */
4139 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4141 int fp_regs = cum->fp_regs;
4142 /* This is the original value of cum->words + offset. */
4143 int int_regs = cum->words - words;
4144 int hfa_size = GET_MODE_SIZE (hfa_mode);
4145 int byte_size;
4146 int args_byte_size;
4148 /* If prototyped, pass it in FR regs then GR regs.
4149 If not prototyped, pass it in both FR and GR regs.
4151 If this is an SFmode aggregate, then it is possible to run out of
4152 FR regs while GR regs are still left. In that case, we pass the
4153 remaining part in the GR regs. */
4155 /* Fill the FP regs. We do this always. We stop if we reach the end
4156 of the argument, the last FP register, or the last argument slot. */
4158 byte_size = ((mode == BLKmode)
4159 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4160 args_byte_size = int_regs * UNITS_PER_WORD;
4161 offset = 0;
4162 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4163 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
4165 offset += hfa_size;
4166 args_byte_size += hfa_size;
4167 fp_regs++;
4170 cum->fp_regs = fp_regs;
4173 /* Integral and aggregates go in general registers. So do TFmode FP values.
4174 If we have run out of FR registers, then other FP values must also go in
4175 general registers. This can happen when we have a SFmode HFA. */
4176 else if (mode == TFmode || mode == TCmode
4177 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4178 cum->int_regs = cum->words;
4180 /* If there is a prototype, then FP values go in a FR register when
4181 named, and in a GR register when unnamed. */
4182 else if (cum->prototype)
4184 if (! named)
4185 cum->int_regs = cum->words;
4186 else
4187 /* ??? Complex types should not reach here. */
4188 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4190 /* If there is no prototype, then FP values go in both FR and GR
4191 registers. */
4192 else
4194 /* ??? Complex types should not reach here. */
4195 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4196 cum->int_regs = cum->words;
4200 /* Arguments with alignment larger than 8 bytes start at the next even
4201 boundary. On ILP32 HPUX, TFmode arguments start on next even boundary
4202 even though their normal alignment is 8 bytes. See ia64_function_arg. */
4205 ia64_function_arg_boundary (enum machine_mode mode, tree type)
4208 if (mode == TFmode && TARGET_HPUX && TARGET_ILP32)
4209 return PARM_BOUNDARY * 2;
4211 if (type)
4213 if (TYPE_ALIGN (type) > PARM_BOUNDARY)
4214 return PARM_BOUNDARY * 2;
4215 else
4216 return PARM_BOUNDARY;
4219 if (GET_MODE_BITSIZE (mode) > PARM_BOUNDARY)
4220 return PARM_BOUNDARY * 2;
4221 else
4222 return PARM_BOUNDARY;
4225 /* True if it is OK to do sibling call optimization for the specified
4226 call expression EXP. DECL will be the called function, or NULL if
4227 this is an indirect call. */
4228 static bool
4229 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
4231 /* We can't perform a sibcall if the current function has the syscall_linkage
4232 attribute. */
4233 if (lookup_attribute ("syscall_linkage",
4234 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
4235 return false;
4237 /* We must always return with our current GP. This means we can
4238 only sibcall to functions defined in the current module. */
4239 return decl && (*targetm.binds_local_p) (decl);
4243 /* Implement va_arg. */
4245 static tree
4246 ia64_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
4248 /* Variable sized types are passed by reference. */
4249 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
4251 tree ptrtype = build_pointer_type (type);
4252 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
4253 return build_va_arg_indirect_ref (addr);
4256 /* Aggregate arguments with alignment larger than 8 bytes start at
4257 the next even boundary. Integer and floating point arguments
4258 do so if they are larger than 8 bytes, whether or not they are
4259 also aligned larger than 8 bytes. */
4260 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
4261 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
4263 tree t = build2 (PLUS_EXPR, TREE_TYPE (valist), valist,
4264 build_int_cst (NULL_TREE, 2 * UNITS_PER_WORD - 1));
4265 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
4266 build_int_cst (NULL_TREE, -2 * UNITS_PER_WORD));
4267 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
4268 gimplify_and_add (t, pre_p);
4271 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4274 /* Return 1 if function return value returned in memory. Return 0 if it is
4275 in a register. */
4277 static bool
4278 ia64_return_in_memory (tree valtype, tree fntype ATTRIBUTE_UNUSED)
4280 enum machine_mode mode;
4281 enum machine_mode hfa_mode;
4282 HOST_WIDE_INT byte_size;
4284 mode = TYPE_MODE (valtype);
4285 byte_size = GET_MODE_SIZE (mode);
4286 if (mode == BLKmode)
4288 byte_size = int_size_in_bytes (valtype);
4289 if (byte_size < 0)
4290 return true;
4293 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
4295 hfa_mode = hfa_element_mode (valtype, 0);
4296 if (hfa_mode != VOIDmode)
4298 int hfa_size = GET_MODE_SIZE (hfa_mode);
4300 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
4301 return true;
4302 else
4303 return false;
4305 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
4306 return true;
4307 else
4308 return false;
4311 /* Return rtx for register that holds the function return value. */
4314 ia64_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
4316 enum machine_mode mode;
4317 enum machine_mode hfa_mode;
4319 mode = TYPE_MODE (valtype);
4320 hfa_mode = hfa_element_mode (valtype, 0);
4322 if (hfa_mode != VOIDmode)
4324 rtx loc[8];
4325 int i;
4326 int hfa_size;
4327 int byte_size;
4328 int offset;
4330 hfa_size = GET_MODE_SIZE (hfa_mode);
4331 byte_size = ((mode == BLKmode)
4332 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
4333 offset = 0;
4334 for (i = 0; offset < byte_size; i++)
4336 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4337 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
4338 GEN_INT (offset));
4339 offset += hfa_size;
4341 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4343 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
4344 return gen_rtx_REG (mode, FR_ARG_FIRST);
4345 else
4347 bool need_parallel = false;
4349 /* In big-endian mode, we need to manage the layout of aggregates
4350 in the registers so that we get the bits properly aligned in
4351 the highpart of the registers. */
4352 if (BYTES_BIG_ENDIAN
4353 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
4354 need_parallel = true;
4356 /* Something like struct S { long double x; char a[0] } is not an
4357 HFA structure, and therefore doesn't go in fp registers. But
4358 the middle-end will give it XFmode anyway, and XFmode values
4359 don't normally fit in integer registers. So we need to smuggle
4360 the value inside a parallel. */
4361 else if (mode == XFmode || mode == XCmode || mode == RFmode)
4362 need_parallel = true;
4364 if (need_parallel)
4366 rtx loc[8];
4367 int offset;
4368 int bytesize;
4369 int i;
4371 offset = 0;
4372 bytesize = int_size_in_bytes (valtype);
4373 /* An empty PARALLEL is invalid here, but the return value
4374 doesn't matter for empty structs. */
4375 if (bytesize == 0)
4376 return gen_rtx_REG (mode, GR_RET_FIRST);
4377 for (i = 0; offset < bytesize; i++)
4379 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4380 gen_rtx_REG (DImode,
4381 GR_RET_FIRST + i),
4382 GEN_INT (offset));
4383 offset += UNITS_PER_WORD;
4385 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4388 return gen_rtx_REG (mode, GR_RET_FIRST);
4392 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4393 We need to emit DTP-relative relocations. */
4395 static void
4396 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
4398 gcc_assert (size == 4 || size == 8);
4399 if (size == 4)
4400 fputs ("\tdata4.ua\t@dtprel(", file);
4401 else
4402 fputs ("\tdata8.ua\t@dtprel(", file);
4403 output_addr_const (file, x);
4404 fputs (")", file);
4407 /* Print a memory address as an operand to reference that memory location. */
4409 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
4410 also call this from ia64_print_operand for memory addresses. */
4412 void
4413 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
4414 rtx address ATTRIBUTE_UNUSED)
4418 /* Print an operand to an assembler instruction.
4419 C Swap and print a comparison operator.
4420 D Print an FP comparison operator.
4421 E Print 32 - constant, for SImode shifts as extract.
4422 e Print 64 - constant, for DImode rotates.
4423 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
4424 a floating point register emitted normally.
4425 I Invert a predicate register by adding 1.
4426 J Select the proper predicate register for a condition.
4427 j Select the inverse predicate register for a condition.
4428 O Append .acq for volatile load.
4429 P Postincrement of a MEM.
4430 Q Append .rel for volatile store.
4431 S Shift amount for shladd instruction.
4432 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
4433 for Intel assembler.
4434 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
4435 for Intel assembler.
4436 X A pair of floating point registers.
4437 r Print register name, or constant 0 as r0. HP compatibility for
4438 Linux kernel.
4439 v Print vector constant value as an 8-byte integer value. */
4441 void
4442 ia64_print_operand (FILE * file, rtx x, int code)
4444 const char *str;
4446 switch (code)
4448 case 0:
4449 /* Handled below. */
4450 break;
4452 case 'C':
4454 enum rtx_code c = swap_condition (GET_CODE (x));
4455 fputs (GET_RTX_NAME (c), file);
4456 return;
4459 case 'D':
4460 switch (GET_CODE (x))
4462 case NE:
4463 str = "neq";
4464 break;
4465 case UNORDERED:
4466 str = "unord";
4467 break;
4468 case ORDERED:
4469 str = "ord";
4470 break;
4471 default:
4472 str = GET_RTX_NAME (GET_CODE (x));
4473 break;
4475 fputs (str, file);
4476 return;
4478 case 'E':
4479 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
4480 return;
4482 case 'e':
4483 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
4484 return;
4486 case 'F':
4487 if (x == CONST0_RTX (GET_MODE (x)))
4488 str = reg_names [FR_REG (0)];
4489 else if (x == CONST1_RTX (GET_MODE (x)))
4490 str = reg_names [FR_REG (1)];
4491 else
4493 gcc_assert (GET_CODE (x) == REG);
4494 str = reg_names [REGNO (x)];
4496 fputs (str, file);
4497 return;
4499 case 'I':
4500 fputs (reg_names [REGNO (x) + 1], file);
4501 return;
4503 case 'J':
4504 case 'j':
4506 unsigned int regno = REGNO (XEXP (x, 0));
4507 if (GET_CODE (x) == EQ)
4508 regno += 1;
4509 if (code == 'j')
4510 regno ^= 1;
4511 fputs (reg_names [regno], file);
4513 return;
4515 case 'O':
4516 if (MEM_VOLATILE_P (x))
4517 fputs(".acq", file);
4518 return;
4520 case 'P':
4522 HOST_WIDE_INT value;
4524 switch (GET_CODE (XEXP (x, 0)))
4526 default:
4527 return;
4529 case POST_MODIFY:
4530 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
4531 if (GET_CODE (x) == CONST_INT)
4532 value = INTVAL (x);
4533 else
4535 gcc_assert (GET_CODE (x) == REG);
4536 fprintf (file, ", %s", reg_names[REGNO (x)]);
4537 return;
4539 break;
4541 case POST_INC:
4542 value = GET_MODE_SIZE (GET_MODE (x));
4543 break;
4545 case POST_DEC:
4546 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
4547 break;
4550 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
4551 return;
4554 case 'Q':
4555 if (MEM_VOLATILE_P (x))
4556 fputs(".rel", file);
4557 return;
4559 case 'S':
4560 fprintf (file, "%d", exact_log2 (INTVAL (x)));
4561 return;
4563 case 'T':
4564 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4566 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
4567 return;
4569 break;
4571 case 'U':
4572 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4574 const char *prefix = "0x";
4575 if (INTVAL (x) & 0x80000000)
4577 fprintf (file, "0xffffffff");
4578 prefix = "";
4580 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
4581 return;
4583 break;
4585 case 'X':
4587 unsigned int regno = REGNO (x);
4588 fprintf (file, "%s, %s", reg_names [regno], reg_names [regno + 1]);
4590 return;
4592 case 'r':
4593 /* If this operand is the constant zero, write it as register zero.
4594 Any register, zero, or CONST_INT value is OK here. */
4595 if (GET_CODE (x) == REG)
4596 fputs (reg_names[REGNO (x)], file);
4597 else if (x == CONST0_RTX (GET_MODE (x)))
4598 fputs ("r0", file);
4599 else if (GET_CODE (x) == CONST_INT)
4600 output_addr_const (file, x);
4601 else
4602 output_operand_lossage ("invalid %%r value");
4603 return;
4605 case 'v':
4606 gcc_assert (GET_CODE (x) == CONST_VECTOR);
4607 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
4608 break;
4610 case '+':
4612 const char *which;
4614 /* For conditional branches, returns or calls, substitute
4615 sptk, dptk, dpnt, or spnt for %s. */
4616 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
4617 if (x)
4619 int pred_val = INTVAL (XEXP (x, 0));
4621 /* Guess top and bottom 10% statically predicted. */
4622 if (pred_val < REG_BR_PROB_BASE / 50)
4623 which = ".spnt";
4624 else if (pred_val < REG_BR_PROB_BASE / 2)
4625 which = ".dpnt";
4626 else if (pred_val < REG_BR_PROB_BASE / 100 * 98)
4627 which = ".dptk";
4628 else
4629 which = ".sptk";
4631 else if (GET_CODE (current_output_insn) == CALL_INSN)
4632 which = ".sptk";
4633 else
4634 which = ".dptk";
4636 fputs (which, file);
4637 return;
4640 case ',':
4641 x = current_insn_predicate;
4642 if (x)
4644 unsigned int regno = REGNO (XEXP (x, 0));
4645 if (GET_CODE (x) == EQ)
4646 regno += 1;
4647 fprintf (file, "(%s) ", reg_names [regno]);
4649 return;
4651 default:
4652 output_operand_lossage ("ia64_print_operand: unknown code");
4653 return;
4656 switch (GET_CODE (x))
4658 /* This happens for the spill/restore instructions. */
4659 case POST_INC:
4660 case POST_DEC:
4661 case POST_MODIFY:
4662 x = XEXP (x, 0);
4663 /* ... fall through ... */
4665 case REG:
4666 fputs (reg_names [REGNO (x)], file);
4667 break;
4669 case MEM:
4671 rtx addr = XEXP (x, 0);
4672 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
4673 addr = XEXP (addr, 0);
4674 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
4675 break;
4678 default:
4679 output_addr_const (file, x);
4680 break;
4683 return;
4686 /* Compute a (partial) cost for rtx X. Return true if the complete
4687 cost has been computed, and false if subexpressions should be
4688 scanned. In either case, *TOTAL contains the cost result. */
4689 /* ??? This is incomplete. */
4691 static bool
4692 ia64_rtx_costs (rtx x, int code, int outer_code, int *total)
4694 switch (code)
4696 case CONST_INT:
4697 switch (outer_code)
4699 case SET:
4700 *total = CONST_OK_FOR_J (INTVAL (x)) ? 0 : COSTS_N_INSNS (1);
4701 return true;
4702 case PLUS:
4703 if (CONST_OK_FOR_I (INTVAL (x)))
4704 *total = 0;
4705 else if (CONST_OK_FOR_J (INTVAL (x)))
4706 *total = 1;
4707 else
4708 *total = COSTS_N_INSNS (1);
4709 return true;
4710 default:
4711 if (CONST_OK_FOR_K (INTVAL (x)) || CONST_OK_FOR_L (INTVAL (x)))
4712 *total = 0;
4713 else
4714 *total = COSTS_N_INSNS (1);
4715 return true;
4718 case CONST_DOUBLE:
4719 *total = COSTS_N_INSNS (1);
4720 return true;
4722 case CONST:
4723 case SYMBOL_REF:
4724 case LABEL_REF:
4725 *total = COSTS_N_INSNS (3);
4726 return true;
4728 case MULT:
4729 /* For multiplies wider than HImode, we have to go to the FPU,
4730 which normally involves copies. Plus there's the latency
4731 of the multiply itself, and the latency of the instructions to
4732 transfer integer regs to FP regs. */
4733 /* ??? Check for FP mode. */
4734 if (GET_MODE_SIZE (GET_MODE (x)) > 2)
4735 *total = COSTS_N_INSNS (10);
4736 else
4737 *total = COSTS_N_INSNS (2);
4738 return true;
4740 case PLUS:
4741 case MINUS:
4742 case ASHIFT:
4743 case ASHIFTRT:
4744 case LSHIFTRT:
4745 *total = COSTS_N_INSNS (1);
4746 return true;
4748 case DIV:
4749 case UDIV:
4750 case MOD:
4751 case UMOD:
4752 /* We make divide expensive, so that divide-by-constant will be
4753 optimized to a multiply. */
4754 *total = COSTS_N_INSNS (60);
4755 return true;
4757 default:
4758 return false;
4762 /* Calculate the cost of moving data from a register in class FROM to
4763 one in class TO, using MODE. */
4766 ia64_register_move_cost (enum machine_mode mode, enum reg_class from,
4767 enum reg_class to)
4769 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
4770 if (to == ADDL_REGS)
4771 to = GR_REGS;
4772 if (from == ADDL_REGS)
4773 from = GR_REGS;
4775 /* All costs are symmetric, so reduce cases by putting the
4776 lower number class as the destination. */
4777 if (from < to)
4779 enum reg_class tmp = to;
4780 to = from, from = tmp;
4783 /* Moving from FR<->GR in XFmode must be more expensive than 2,
4784 so that we get secondary memory reloads. Between FR_REGS,
4785 we have to make this at least as expensive as MEMORY_MOVE_COST
4786 to avoid spectacularly poor register class preferencing. */
4787 if (mode == XFmode || mode == RFmode)
4789 if (to != GR_REGS || from != GR_REGS)
4790 return MEMORY_MOVE_COST (mode, to, 0);
4791 else
4792 return 3;
4795 switch (to)
4797 case PR_REGS:
4798 /* Moving between PR registers takes two insns. */
4799 if (from == PR_REGS)
4800 return 3;
4801 /* Moving between PR and anything but GR is impossible. */
4802 if (from != GR_REGS)
4803 return MEMORY_MOVE_COST (mode, to, 0);
4804 break;
4806 case BR_REGS:
4807 /* Moving between BR and anything but GR is impossible. */
4808 if (from != GR_REGS && from != GR_AND_BR_REGS)
4809 return MEMORY_MOVE_COST (mode, to, 0);
4810 break;
4812 case AR_I_REGS:
4813 case AR_M_REGS:
4814 /* Moving between AR and anything but GR is impossible. */
4815 if (from != GR_REGS)
4816 return MEMORY_MOVE_COST (mode, to, 0);
4817 break;
4819 case GR_REGS:
4820 case FR_REGS:
4821 case FP_REGS:
4822 case GR_AND_FR_REGS:
4823 case GR_AND_BR_REGS:
4824 case ALL_REGS:
4825 break;
4827 default:
4828 gcc_unreachable ();
4831 return 2;
4834 /* Implement PREFERRED_RELOAD_CLASS. Place additional restrictions on CLASS
4835 to use when copying X into that class. */
4837 enum reg_class
4838 ia64_preferred_reload_class (rtx x, enum reg_class class)
4840 switch (class)
4842 case FR_REGS:
4843 case FP_REGS:
4844 /* Don't allow volatile mem reloads into floating point registers.
4845 This is defined to force reload to choose the r/m case instead
4846 of the f/f case when reloading (set (reg fX) (mem/v)). */
4847 if (MEM_P (x) && MEM_VOLATILE_P (x))
4848 return NO_REGS;
4850 /* Force all unrecognized constants into the constant pool. */
4851 if (CONSTANT_P (x))
4852 return NO_REGS;
4853 break;
4855 case AR_M_REGS:
4856 case AR_I_REGS:
4857 if (!OBJECT_P (x))
4858 return NO_REGS;
4859 break;
4861 default:
4862 break;
4865 return class;
4868 /* This function returns the register class required for a secondary
4869 register when copying between one of the registers in CLASS, and X,
4870 using MODE. A return value of NO_REGS means that no secondary register
4871 is required. */
4873 enum reg_class
4874 ia64_secondary_reload_class (enum reg_class class,
4875 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
4877 int regno = -1;
4879 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
4880 regno = true_regnum (x);
4882 switch (class)
4884 case BR_REGS:
4885 case AR_M_REGS:
4886 case AR_I_REGS:
4887 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
4888 interaction. We end up with two pseudos with overlapping lifetimes
4889 both of which are equiv to the same constant, and both which need
4890 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
4891 changes depending on the path length, which means the qty_first_reg
4892 check in make_regs_eqv can give different answers at different times.
4893 At some point I'll probably need a reload_indi pattern to handle
4894 this.
4896 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
4897 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
4898 non-general registers for good measure. */
4899 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
4900 return GR_REGS;
4902 /* This is needed if a pseudo used as a call_operand gets spilled to a
4903 stack slot. */
4904 if (GET_CODE (x) == MEM)
4905 return GR_REGS;
4906 break;
4908 case FR_REGS:
4909 case FP_REGS:
4910 /* Need to go through general registers to get to other class regs. */
4911 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
4912 return GR_REGS;
4914 /* This can happen when a paradoxical subreg is an operand to the
4915 muldi3 pattern. */
4916 /* ??? This shouldn't be necessary after instruction scheduling is
4917 enabled, because paradoxical subregs are not accepted by
4918 register_operand when INSN_SCHEDULING is defined. Or alternatively,
4919 stop the paradoxical subreg stupidity in the *_operand functions
4920 in recog.c. */
4921 if (GET_CODE (x) == MEM
4922 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
4923 || GET_MODE (x) == QImode))
4924 return GR_REGS;
4926 /* This can happen because of the ior/and/etc patterns that accept FP
4927 registers as operands. If the third operand is a constant, then it
4928 needs to be reloaded into a FP register. */
4929 if (GET_CODE (x) == CONST_INT)
4930 return GR_REGS;
4932 /* This can happen because of register elimination in a muldi3 insn.
4933 E.g. `26107 * (unsigned long)&u'. */
4934 if (GET_CODE (x) == PLUS)
4935 return GR_REGS;
4936 break;
4938 case PR_REGS:
4939 /* ??? This happens if we cse/gcse a BImode value across a call,
4940 and the function has a nonlocal goto. This is because global
4941 does not allocate call crossing pseudos to hard registers when
4942 current_function_has_nonlocal_goto is true. This is relatively
4943 common for C++ programs that use exceptions. To reproduce,
4944 return NO_REGS and compile libstdc++. */
4945 if (GET_CODE (x) == MEM)
4946 return GR_REGS;
4948 /* This can happen when we take a BImode subreg of a DImode value,
4949 and that DImode value winds up in some non-GR register. */
4950 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
4951 return GR_REGS;
4952 break;
4954 default:
4955 break;
4958 return NO_REGS;
4962 /* Emit text to declare externally defined variables and functions, because
4963 the Intel assembler does not support undefined externals. */
4965 void
4966 ia64_asm_output_external (FILE *file, tree decl, const char *name)
4968 int save_referenced;
4970 /* GNU as does not need anything here, but the HP linker does need
4971 something for external functions. */
4973 if (TARGET_GNU_AS
4974 && (!TARGET_HPUX_LD
4975 || TREE_CODE (decl) != FUNCTION_DECL
4976 || strstr (name, "__builtin_") == name))
4977 return;
4979 /* ??? The Intel assembler creates a reference that needs to be satisfied by
4980 the linker when we do this, so we need to be careful not to do this for
4981 builtin functions which have no library equivalent. Unfortunately, we
4982 can't tell here whether or not a function will actually be called by
4983 expand_expr, so we pull in library functions even if we may not need
4984 them later. */
4985 if (! strcmp (name, "__builtin_next_arg")
4986 || ! strcmp (name, "alloca")
4987 || ! strcmp (name, "__builtin_constant_p")
4988 || ! strcmp (name, "__builtin_args_info"))
4989 return;
4991 if (TARGET_HPUX_LD)
4992 ia64_hpux_add_extern_decl (decl);
4993 else
4995 /* assemble_name will set TREE_SYMBOL_REFERENCED, so we must save and
4996 restore it. */
4997 save_referenced = TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl));
4998 if (TREE_CODE (decl) == FUNCTION_DECL)
4999 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
5000 (*targetm.asm_out.globalize_label) (file, name);
5001 TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)) = save_referenced;
5005 /* Parse the -mfixed-range= option string. */
5007 static void
5008 fix_range (const char *const_str)
5010 int i, first, last;
5011 char *str, *dash, *comma;
5013 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
5014 REG2 are either register names or register numbers. The effect
5015 of this option is to mark the registers in the range from REG1 to
5016 REG2 as ``fixed'' so they won't be used by the compiler. This is
5017 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
5019 i = strlen (const_str);
5020 str = (char *) alloca (i + 1);
5021 memcpy (str, const_str, i + 1);
5023 while (1)
5025 dash = strchr (str, '-');
5026 if (!dash)
5028 warning (0, "value of -mfixed-range must have form REG1-REG2");
5029 return;
5031 *dash = '\0';
5033 comma = strchr (dash + 1, ',');
5034 if (comma)
5035 *comma = '\0';
5037 first = decode_reg_name (str);
5038 if (first < 0)
5040 warning (0, "unknown register name: %s", str);
5041 return;
5044 last = decode_reg_name (dash + 1);
5045 if (last < 0)
5047 warning (0, "unknown register name: %s", dash + 1);
5048 return;
5051 *dash = '-';
5053 if (first > last)
5055 warning (0, "%s-%s is an empty range", str, dash + 1);
5056 return;
5059 for (i = first; i <= last; ++i)
5060 fixed_regs[i] = call_used_regs[i] = 1;
5062 if (!comma)
5063 break;
5065 *comma = ',';
5066 str = comma + 1;
5070 /* Implement TARGET_HANDLE_OPTION. */
5072 static bool
5073 ia64_handle_option (size_t code, const char *arg, int value)
5075 switch (code)
5077 case OPT_mfixed_range_:
5078 fix_range (arg);
5079 return true;
5081 case OPT_mtls_size_:
5082 if (value != 14 && value != 22 && value != 64)
5083 error ("bad value %<%s%> for -mtls-size= switch", arg);
5084 return true;
5086 case OPT_mtune_:
5088 static struct pta
5090 const char *name; /* processor name or nickname. */
5091 enum processor_type processor;
5093 const processor_alias_table[] =
5095 {"itanium", PROCESSOR_ITANIUM},
5096 {"itanium1", PROCESSOR_ITANIUM},
5097 {"merced", PROCESSOR_ITANIUM},
5098 {"itanium2", PROCESSOR_ITANIUM2},
5099 {"mckinley", PROCESSOR_ITANIUM2},
5101 int const pta_size = ARRAY_SIZE (processor_alias_table);
5102 int i;
5104 for (i = 0; i < pta_size; i++)
5105 if (!strcmp (arg, processor_alias_table[i].name))
5107 ia64_tune = processor_alias_table[i].processor;
5108 break;
5110 if (i == pta_size)
5111 error ("bad value %<%s%> for -mtune= switch", arg);
5112 return true;
5115 default:
5116 return true;
5120 /* Implement OVERRIDE_OPTIONS. */
5122 void
5123 ia64_override_options (void)
5125 if (TARGET_AUTO_PIC)
5126 target_flags |= MASK_CONST_GP;
5128 if (TARGET_INLINE_SQRT == INL_MIN_LAT)
5130 warning (0, "not yet implemented: latency-optimized inline square root");
5131 TARGET_INLINE_SQRT = INL_MAX_THR;
5134 ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
5135 flag_schedule_insns_after_reload = 0;
5137 ia64_section_threshold = g_switch_set ? g_switch_value : IA64_DEFAULT_GVALUE;
5139 init_machine_status = ia64_init_machine_status;
5142 static struct machine_function *
5143 ia64_init_machine_status (void)
5145 return ggc_alloc_cleared (sizeof (struct machine_function));
5148 static enum attr_itanium_class ia64_safe_itanium_class (rtx);
5149 static enum attr_type ia64_safe_type (rtx);
5151 static enum attr_itanium_class
5152 ia64_safe_itanium_class (rtx insn)
5154 if (recog_memoized (insn) >= 0)
5155 return get_attr_itanium_class (insn);
5156 else
5157 return ITANIUM_CLASS_UNKNOWN;
5160 static enum attr_type
5161 ia64_safe_type (rtx insn)
5163 if (recog_memoized (insn) >= 0)
5164 return get_attr_type (insn);
5165 else
5166 return TYPE_UNKNOWN;
5169 /* The following collection of routines emit instruction group stop bits as
5170 necessary to avoid dependencies. */
5172 /* Need to track some additional registers as far as serialization is
5173 concerned so we can properly handle br.call and br.ret. We could
5174 make these registers visible to gcc, but since these registers are
5175 never explicitly used in gcc generated code, it seems wasteful to
5176 do so (plus it would make the call and return patterns needlessly
5177 complex). */
5178 #define REG_RP (BR_REG (0))
5179 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
5180 /* This is used for volatile asms which may require a stop bit immediately
5181 before and after them. */
5182 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
5183 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
5184 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
5186 /* For each register, we keep track of how it has been written in the
5187 current instruction group.
5189 If a register is written unconditionally (no qualifying predicate),
5190 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
5192 If a register is written if its qualifying predicate P is true, we
5193 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
5194 may be written again by the complement of P (P^1) and when this happens,
5195 WRITE_COUNT gets set to 2.
5197 The result of this is that whenever an insn attempts to write a register
5198 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
5200 If a predicate register is written by a floating-point insn, we set
5201 WRITTEN_BY_FP to true.
5203 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
5204 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
5206 struct reg_write_state
5208 unsigned int write_count : 2;
5209 unsigned int first_pred : 16;
5210 unsigned int written_by_fp : 1;
5211 unsigned int written_by_and : 1;
5212 unsigned int written_by_or : 1;
5215 /* Cumulative info for the current instruction group. */
5216 struct reg_write_state rws_sum[NUM_REGS];
5217 /* Info for the current instruction. This gets copied to rws_sum after a
5218 stop bit is emitted. */
5219 struct reg_write_state rws_insn[NUM_REGS];
5221 /* Indicates whether this is the first instruction after a stop bit,
5222 in which case we don't need another stop bit. Without this,
5223 ia64_variable_issue will die when scheduling an alloc. */
5224 static int first_instruction;
5226 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
5227 RTL for one instruction. */
5228 struct reg_flags
5230 unsigned int is_write : 1; /* Is register being written? */
5231 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
5232 unsigned int is_branch : 1; /* Is register used as part of a branch? */
5233 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
5234 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
5235 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
5238 static void rws_update (struct reg_write_state *, int, struct reg_flags, int);
5239 static int rws_access_regno (int, struct reg_flags, int);
5240 static int rws_access_reg (rtx, struct reg_flags, int);
5241 static void update_set_flags (rtx, struct reg_flags *);
5242 static int set_src_needs_barrier (rtx, struct reg_flags, int);
5243 static int rtx_needs_barrier (rtx, struct reg_flags, int);
5244 static void init_insn_group_barriers (void);
5245 static int group_barrier_needed (rtx);
5246 static int safe_group_barrier_needed (rtx);
5248 /* Update *RWS for REGNO, which is being written by the current instruction,
5249 with predicate PRED, and associated register flags in FLAGS. */
5251 static void
5252 rws_update (struct reg_write_state *rws, int regno, struct reg_flags flags, int pred)
5254 if (pred)
5255 rws[regno].write_count++;
5256 else
5257 rws[regno].write_count = 2;
5258 rws[regno].written_by_fp |= flags.is_fp;
5259 /* ??? Not tracking and/or across differing predicates. */
5260 rws[regno].written_by_and = flags.is_and;
5261 rws[regno].written_by_or = flags.is_or;
5262 rws[regno].first_pred = pred;
5265 /* Handle an access to register REGNO of type FLAGS using predicate register
5266 PRED. Update rws_insn and rws_sum arrays. Return 1 if this access creates
5267 a dependency with an earlier instruction in the same group. */
5269 static int
5270 rws_access_regno (int regno, struct reg_flags flags, int pred)
5272 int need_barrier = 0;
5274 gcc_assert (regno < NUM_REGS);
5276 if (! PR_REGNO_P (regno))
5277 flags.is_and = flags.is_or = 0;
5279 if (flags.is_write)
5281 int write_count;
5283 /* One insn writes same reg multiple times? */
5284 gcc_assert (!rws_insn[regno].write_count);
5286 /* Update info for current instruction. */
5287 rws_update (rws_insn, regno, flags, pred);
5288 write_count = rws_sum[regno].write_count;
5290 switch (write_count)
5292 case 0:
5293 /* The register has not been written yet. */
5294 rws_update (rws_sum, regno, flags, pred);
5295 break;
5297 case 1:
5298 /* The register has been written via a predicate. If this is
5299 not a complementary predicate, then we need a barrier. */
5300 /* ??? This assumes that P and P+1 are always complementary
5301 predicates for P even. */
5302 if (flags.is_and && rws_sum[regno].written_by_and)
5304 else if (flags.is_or && rws_sum[regno].written_by_or)
5306 else if ((rws_sum[regno].first_pred ^ 1) != pred)
5307 need_barrier = 1;
5308 rws_update (rws_sum, regno, flags, pred);
5309 break;
5311 case 2:
5312 /* The register has been unconditionally written already. We
5313 need a barrier. */
5314 if (flags.is_and && rws_sum[regno].written_by_and)
5316 else if (flags.is_or && rws_sum[regno].written_by_or)
5318 else
5319 need_barrier = 1;
5320 rws_sum[regno].written_by_and = flags.is_and;
5321 rws_sum[regno].written_by_or = flags.is_or;
5322 break;
5324 default:
5325 gcc_unreachable ();
5328 else
5330 if (flags.is_branch)
5332 /* Branches have several RAW exceptions that allow to avoid
5333 barriers. */
5335 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
5336 /* RAW dependencies on branch regs are permissible as long
5337 as the writer is a non-branch instruction. Since we
5338 never generate code that uses a branch register written
5339 by a branch instruction, handling this case is
5340 easy. */
5341 return 0;
5343 if (REGNO_REG_CLASS (regno) == PR_REGS
5344 && ! rws_sum[regno].written_by_fp)
5345 /* The predicates of a branch are available within the
5346 same insn group as long as the predicate was written by
5347 something other than a floating-point instruction. */
5348 return 0;
5351 if (flags.is_and && rws_sum[regno].written_by_and)
5352 return 0;
5353 if (flags.is_or && rws_sum[regno].written_by_or)
5354 return 0;
5356 switch (rws_sum[regno].write_count)
5358 case 0:
5359 /* The register has not been written yet. */
5360 break;
5362 case 1:
5363 /* The register has been written via a predicate. If this is
5364 not a complementary predicate, then we need a barrier. */
5365 /* ??? This assumes that P and P+1 are always complementary
5366 predicates for P even. */
5367 if ((rws_sum[regno].first_pred ^ 1) != pred)
5368 need_barrier = 1;
5369 break;
5371 case 2:
5372 /* The register has been unconditionally written already. We
5373 need a barrier. */
5374 need_barrier = 1;
5375 break;
5377 default:
5378 gcc_unreachable ();
5382 return need_barrier;
5385 static int
5386 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
5388 int regno = REGNO (reg);
5389 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
5391 if (n == 1)
5392 return rws_access_regno (regno, flags, pred);
5393 else
5395 int need_barrier = 0;
5396 while (--n >= 0)
5397 need_barrier |= rws_access_regno (regno + n, flags, pred);
5398 return need_barrier;
5402 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
5403 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
5405 static void
5406 update_set_flags (rtx x, struct reg_flags *pflags)
5408 rtx src = SET_SRC (x);
5410 switch (GET_CODE (src))
5412 case CALL:
5413 return;
5415 case IF_THEN_ELSE:
5416 /* There are three cases here:
5417 (1) The destination is (pc), in which case this is a branch,
5418 nothing here applies.
5419 (2) The destination is ar.lc, in which case this is a
5420 doloop_end_internal,
5421 (3) The destination is an fp register, in which case this is
5422 an fselect instruction.
5423 In all cases, nothing we do in this function applies. */
5424 return;
5426 default:
5427 if (COMPARISON_P (src)
5428 && SCALAR_FLOAT_MODE_P (GET_MODE (XEXP (src, 0))))
5429 /* Set pflags->is_fp to 1 so that we know we're dealing
5430 with a floating point comparison when processing the
5431 destination of the SET. */
5432 pflags->is_fp = 1;
5434 /* Discover if this is a parallel comparison. We only handle
5435 and.orcm and or.andcm at present, since we must retain a
5436 strict inverse on the predicate pair. */
5437 else if (GET_CODE (src) == AND)
5438 pflags->is_and = 1;
5439 else if (GET_CODE (src) == IOR)
5440 pflags->is_or = 1;
5442 break;
5446 /* Subroutine of rtx_needs_barrier; this function determines whether the
5447 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
5448 are as in rtx_needs_barrier. COND is an rtx that holds the condition
5449 for this insn. */
5451 static int
5452 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred)
5454 int need_barrier = 0;
5455 rtx dst;
5456 rtx src = SET_SRC (x);
5458 if (GET_CODE (src) == CALL)
5459 /* We don't need to worry about the result registers that
5460 get written by subroutine call. */
5461 return rtx_needs_barrier (src, flags, pred);
5462 else if (SET_DEST (x) == pc_rtx)
5464 /* X is a conditional branch. */
5465 /* ??? This seems redundant, as the caller sets this bit for
5466 all JUMP_INSNs. */
5467 flags.is_branch = 1;
5468 return rtx_needs_barrier (src, flags, pred);
5471 need_barrier = rtx_needs_barrier (src, flags, pred);
5473 dst = SET_DEST (x);
5474 if (GET_CODE (dst) == ZERO_EXTRACT)
5476 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
5477 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
5478 dst = XEXP (dst, 0);
5480 return need_barrier;
5483 /* Handle an access to rtx X of type FLAGS using predicate register
5484 PRED. Return 1 if this access creates a dependency with an earlier
5485 instruction in the same group. */
5487 static int
5488 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
5490 int i, j;
5491 int is_complemented = 0;
5492 int need_barrier = 0;
5493 const char *format_ptr;
5494 struct reg_flags new_flags;
5495 rtx cond;
5497 if (! x)
5498 return 0;
5500 new_flags = flags;
5502 switch (GET_CODE (x))
5504 case SET:
5505 update_set_flags (x, &new_flags);
5506 need_barrier = set_src_needs_barrier (x, new_flags, pred);
5507 if (GET_CODE (SET_SRC (x)) != CALL)
5509 new_flags.is_write = 1;
5510 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
5512 break;
5514 case CALL:
5515 new_flags.is_write = 0;
5516 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5518 /* Avoid multiple register writes, in case this is a pattern with
5519 multiple CALL rtx. This avoids a failure in rws_access_reg. */
5520 if (! flags.is_sibcall && ! rws_insn[REG_AR_CFM].write_count)
5522 new_flags.is_write = 1;
5523 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
5524 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
5525 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5527 break;
5529 case COND_EXEC:
5530 /* X is a predicated instruction. */
5532 cond = COND_EXEC_TEST (x);
5533 gcc_assert (!pred);
5534 need_barrier = rtx_needs_barrier (cond, flags, 0);
5536 if (GET_CODE (cond) == EQ)
5537 is_complemented = 1;
5538 cond = XEXP (cond, 0);
5539 gcc_assert (GET_CODE (cond) == REG
5540 && REGNO_REG_CLASS (REGNO (cond)) == PR_REGS);
5541 pred = REGNO (cond);
5542 if (is_complemented)
5543 ++pred;
5545 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
5546 return need_barrier;
5548 case CLOBBER:
5549 case USE:
5550 /* Clobber & use are for earlier compiler-phases only. */
5551 break;
5553 case ASM_OPERANDS:
5554 case ASM_INPUT:
5555 /* We always emit stop bits for traditional asms. We emit stop bits
5556 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
5557 if (GET_CODE (x) != ASM_OPERANDS
5558 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
5560 /* Avoid writing the register multiple times if we have multiple
5561 asm outputs. This avoids a failure in rws_access_reg. */
5562 if (! rws_insn[REG_VOLATILE].write_count)
5564 new_flags.is_write = 1;
5565 rws_access_regno (REG_VOLATILE, new_flags, pred);
5567 return 1;
5570 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
5571 We cannot just fall through here since then we would be confused
5572 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
5573 traditional asms unlike their normal usage. */
5575 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
5576 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
5577 need_barrier = 1;
5578 break;
5580 case PARALLEL:
5581 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5583 rtx pat = XVECEXP (x, 0, i);
5584 switch (GET_CODE (pat))
5586 case SET:
5587 update_set_flags (pat, &new_flags);
5588 need_barrier |= set_src_needs_barrier (pat, new_flags, pred);
5589 break;
5591 case USE:
5592 case CALL:
5593 case ASM_OPERANDS:
5594 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5595 break;
5597 case CLOBBER:
5598 case RETURN:
5599 break;
5601 default:
5602 gcc_unreachable ();
5605 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5607 rtx pat = XVECEXP (x, 0, i);
5608 if (GET_CODE (pat) == SET)
5610 if (GET_CODE (SET_SRC (pat)) != CALL)
5612 new_flags.is_write = 1;
5613 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
5614 pred);
5617 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
5618 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5620 break;
5622 case SUBREG:
5623 need_barrier |= rtx_needs_barrier (SUBREG_REG (x), flags, pred);
5624 break;
5625 case REG:
5626 if (REGNO (x) == AR_UNAT_REGNUM)
5628 for (i = 0; i < 64; ++i)
5629 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
5631 else
5632 need_barrier = rws_access_reg (x, flags, pred);
5633 break;
5635 case MEM:
5636 /* Find the regs used in memory address computation. */
5637 new_flags.is_write = 0;
5638 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5639 break;
5641 case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR:
5642 case SYMBOL_REF: case LABEL_REF: case CONST:
5643 break;
5645 /* Operators with side-effects. */
5646 case POST_INC: case POST_DEC:
5647 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
5649 new_flags.is_write = 0;
5650 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5651 new_flags.is_write = 1;
5652 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5653 break;
5655 case POST_MODIFY:
5656 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
5658 new_flags.is_write = 0;
5659 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5660 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5661 new_flags.is_write = 1;
5662 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5663 break;
5665 /* Handle common unary and binary ops for efficiency. */
5666 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
5667 case MOD: case UDIV: case UMOD: case AND: case IOR:
5668 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
5669 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
5670 case NE: case EQ: case GE: case GT: case LE:
5671 case LT: case GEU: case GTU: case LEU: case LTU:
5672 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5673 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5674 break;
5676 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
5677 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
5678 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
5679 case SQRT: case FFS: case POPCOUNT:
5680 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5681 break;
5683 case VEC_SELECT:
5684 /* VEC_SELECT's second argument is a PARALLEL with integers that
5685 describe the elements selected. On ia64, those integers are
5686 always constants. Avoid walking the PARALLEL so that we don't
5687 get confused with "normal" parallels and then die. */
5688 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5689 break;
5691 case UNSPEC:
5692 switch (XINT (x, 1))
5694 case UNSPEC_LTOFF_DTPMOD:
5695 case UNSPEC_LTOFF_DTPREL:
5696 case UNSPEC_DTPREL:
5697 case UNSPEC_LTOFF_TPREL:
5698 case UNSPEC_TPREL:
5699 case UNSPEC_PRED_REL_MUTEX:
5700 case UNSPEC_PIC_CALL:
5701 case UNSPEC_MF:
5702 case UNSPEC_FETCHADD_ACQ:
5703 case UNSPEC_BSP_VALUE:
5704 case UNSPEC_FLUSHRS:
5705 case UNSPEC_BUNDLE_SELECTOR:
5706 break;
5708 case UNSPEC_GR_SPILL:
5709 case UNSPEC_GR_RESTORE:
5711 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
5712 HOST_WIDE_INT bit = (offset >> 3) & 63;
5714 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5715 new_flags.is_write = (XINT (x, 1) == UNSPEC_GR_SPILL);
5716 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
5717 new_flags, pred);
5718 break;
5721 case UNSPEC_FR_SPILL:
5722 case UNSPEC_FR_RESTORE:
5723 case UNSPEC_GETF_EXP:
5724 case UNSPEC_SETF_EXP:
5725 case UNSPEC_ADDP4:
5726 case UNSPEC_FR_SQRT_RECIP_APPROX:
5727 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5728 break;
5730 case UNSPEC_FR_RECIP_APPROX:
5731 case UNSPEC_SHRP:
5732 case UNSPEC_COPYSIGN:
5733 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5734 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5735 break;
5737 case UNSPEC_CMPXCHG_ACQ:
5738 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5739 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
5740 break;
5742 default:
5743 gcc_unreachable ();
5745 break;
5747 case UNSPEC_VOLATILE:
5748 switch (XINT (x, 1))
5750 case UNSPECV_ALLOC:
5751 /* Alloc must always be the first instruction of a group.
5752 We force this by always returning true. */
5753 /* ??? We might get better scheduling if we explicitly check for
5754 input/local/output register dependencies, and modify the
5755 scheduler so that alloc is always reordered to the start of
5756 the current group. We could then eliminate all of the
5757 first_instruction code. */
5758 rws_access_regno (AR_PFS_REGNUM, flags, pred);
5760 new_flags.is_write = 1;
5761 rws_access_regno (REG_AR_CFM, new_flags, pred);
5762 return 1;
5764 case UNSPECV_SET_BSP:
5765 need_barrier = 1;
5766 break;
5768 case UNSPECV_BLOCKAGE:
5769 case UNSPECV_INSN_GROUP_BARRIER:
5770 case UNSPECV_BREAK:
5771 case UNSPECV_PSAC_ALL:
5772 case UNSPECV_PSAC_NORMAL:
5773 return 0;
5775 default:
5776 gcc_unreachable ();
5778 break;
5780 case RETURN:
5781 new_flags.is_write = 0;
5782 need_barrier = rws_access_regno (REG_RP, flags, pred);
5783 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
5785 new_flags.is_write = 1;
5786 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5787 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5788 break;
5790 default:
5791 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
5792 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5793 switch (format_ptr[i])
5795 case '0': /* unused field */
5796 case 'i': /* integer */
5797 case 'n': /* note */
5798 case 'w': /* wide integer */
5799 case 's': /* pointer to string */
5800 case 'S': /* optional pointer to string */
5801 break;
5803 case 'e':
5804 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
5805 need_barrier = 1;
5806 break;
5808 case 'E':
5809 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
5810 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
5811 need_barrier = 1;
5812 break;
5814 default:
5815 gcc_unreachable ();
5817 break;
5819 return need_barrier;
5822 /* Clear out the state for group_barrier_needed at the start of a
5823 sequence of insns. */
5825 static void
5826 init_insn_group_barriers (void)
5828 memset (rws_sum, 0, sizeof (rws_sum));
5829 first_instruction = 1;
5832 /* Given the current state, determine whether a group barrier (a stop bit) is
5833 necessary before INSN. Return nonzero if so. This modifies the state to
5834 include the effects of INSN as a side-effect. */
5836 static int
5837 group_barrier_needed (rtx insn)
5839 rtx pat;
5840 int need_barrier = 0;
5841 struct reg_flags flags;
5843 memset (&flags, 0, sizeof (flags));
5844 switch (GET_CODE (insn))
5846 case NOTE:
5847 break;
5849 case BARRIER:
5850 /* A barrier doesn't imply an instruction group boundary. */
5851 break;
5853 case CODE_LABEL:
5854 memset (rws_insn, 0, sizeof (rws_insn));
5855 return 1;
5857 case CALL_INSN:
5858 flags.is_branch = 1;
5859 flags.is_sibcall = SIBLING_CALL_P (insn);
5860 memset (rws_insn, 0, sizeof (rws_insn));
5862 /* Don't bundle a call following another call. */
5863 if ((pat = prev_active_insn (insn))
5864 && GET_CODE (pat) == CALL_INSN)
5866 need_barrier = 1;
5867 break;
5870 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
5871 break;
5873 case JUMP_INSN:
5874 flags.is_branch = 1;
5876 /* Don't bundle a jump following a call. */
5877 if ((pat = prev_active_insn (insn))
5878 && GET_CODE (pat) == CALL_INSN)
5880 need_barrier = 1;
5881 break;
5883 /* FALLTHRU */
5885 case INSN:
5886 if (GET_CODE (PATTERN (insn)) == USE
5887 || GET_CODE (PATTERN (insn)) == CLOBBER)
5888 /* Don't care about USE and CLOBBER "insns"---those are used to
5889 indicate to the optimizer that it shouldn't get rid of
5890 certain operations. */
5891 break;
5893 pat = PATTERN (insn);
5895 /* Ug. Hack hacks hacked elsewhere. */
5896 switch (recog_memoized (insn))
5898 /* We play dependency tricks with the epilogue in order
5899 to get proper schedules. Undo this for dv analysis. */
5900 case CODE_FOR_epilogue_deallocate_stack:
5901 case CODE_FOR_prologue_allocate_stack:
5902 pat = XVECEXP (pat, 0, 0);
5903 break;
5905 /* The pattern we use for br.cloop confuses the code above.
5906 The second element of the vector is representative. */
5907 case CODE_FOR_doloop_end_internal:
5908 pat = XVECEXP (pat, 0, 1);
5909 break;
5911 /* Doesn't generate code. */
5912 case CODE_FOR_pred_rel_mutex:
5913 case CODE_FOR_prologue_use:
5914 return 0;
5916 default:
5917 break;
5920 memset (rws_insn, 0, sizeof (rws_insn));
5921 need_barrier = rtx_needs_barrier (pat, flags, 0);
5923 /* Check to see if the previous instruction was a volatile
5924 asm. */
5925 if (! need_barrier)
5926 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
5927 break;
5929 default:
5930 gcc_unreachable ();
5933 if (first_instruction && INSN_P (insn)
5934 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
5935 && GET_CODE (PATTERN (insn)) != USE
5936 && GET_CODE (PATTERN (insn)) != CLOBBER)
5938 need_barrier = 0;
5939 first_instruction = 0;
5942 return need_barrier;
5945 /* Like group_barrier_needed, but do not clobber the current state. */
5947 static int
5948 safe_group_barrier_needed (rtx insn)
5950 struct reg_write_state rws_saved[NUM_REGS];
5951 int saved_first_instruction;
5952 int t;
5954 memcpy (rws_saved, rws_sum, NUM_REGS * sizeof *rws_saved);
5955 saved_first_instruction = first_instruction;
5957 t = group_barrier_needed (insn);
5959 memcpy (rws_sum, rws_saved, NUM_REGS * sizeof *rws_saved);
5960 first_instruction = saved_first_instruction;
5962 return t;
5965 /* Scan the current function and insert stop bits as necessary to
5966 eliminate dependencies. This function assumes that a final
5967 instruction scheduling pass has been run which has already
5968 inserted most of the necessary stop bits. This function only
5969 inserts new ones at basic block boundaries, since these are
5970 invisible to the scheduler. */
5972 static void
5973 emit_insn_group_barriers (FILE *dump)
5975 rtx insn;
5976 rtx last_label = 0;
5977 int insns_since_last_label = 0;
5979 init_insn_group_barriers ();
5981 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5983 if (GET_CODE (insn) == CODE_LABEL)
5985 if (insns_since_last_label)
5986 last_label = insn;
5987 insns_since_last_label = 0;
5989 else if (GET_CODE (insn) == NOTE
5990 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
5992 if (insns_since_last_label)
5993 last_label = insn;
5994 insns_since_last_label = 0;
5996 else if (GET_CODE (insn) == INSN
5997 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
5998 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
6000 init_insn_group_barriers ();
6001 last_label = 0;
6003 else if (INSN_P (insn))
6005 insns_since_last_label = 1;
6007 if (group_barrier_needed (insn))
6009 if (last_label)
6011 if (dump)
6012 fprintf (dump, "Emitting stop before label %d\n",
6013 INSN_UID (last_label));
6014 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
6015 insn = last_label;
6017 init_insn_group_barriers ();
6018 last_label = 0;
6025 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
6026 This function has to emit all necessary group barriers. */
6028 static void
6029 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
6031 rtx insn;
6033 init_insn_group_barriers ();
6035 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6037 if (GET_CODE (insn) == BARRIER)
6039 rtx last = prev_active_insn (insn);
6041 if (! last)
6042 continue;
6043 if (GET_CODE (last) == JUMP_INSN
6044 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
6045 last = prev_active_insn (last);
6046 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
6047 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
6049 init_insn_group_barriers ();
6051 else if (INSN_P (insn))
6053 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
6054 init_insn_group_barriers ();
6055 else if (group_barrier_needed (insn))
6057 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
6058 init_insn_group_barriers ();
6059 group_barrier_needed (insn);
6067 /* Instruction scheduling support. */
6069 #define NR_BUNDLES 10
6071 /* A list of names of all available bundles. */
6073 static const char *bundle_name [NR_BUNDLES] =
6075 ".mii",
6076 ".mmi",
6077 ".mfi",
6078 ".mmf",
6079 #if NR_BUNDLES == 10
6080 ".bbb",
6081 ".mbb",
6082 #endif
6083 ".mib",
6084 ".mmb",
6085 ".mfb",
6086 ".mlx"
6089 /* Nonzero if we should insert stop bits into the schedule. */
6091 int ia64_final_schedule = 0;
6093 /* Codes of the corresponding queried units: */
6095 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
6096 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
6098 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
6099 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
6101 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
6103 /* The following variable value is an insn group barrier. */
6105 static rtx dfa_stop_insn;
6107 /* The following variable value is the last issued insn. */
6109 static rtx last_scheduled_insn;
6111 /* The following variable value is size of the DFA state. */
6113 static size_t dfa_state_size;
6115 /* The following variable value is pointer to a DFA state used as
6116 temporary variable. */
6118 static state_t temp_dfa_state = NULL;
6120 /* The following variable value is DFA state after issuing the last
6121 insn. */
6123 static state_t prev_cycle_state = NULL;
6125 /* The following array element values are TRUE if the corresponding
6126 insn requires to add stop bits before it. */
6128 static char *stops_p;
6130 /* The following variable is used to set up the mentioned above array. */
6132 static int stop_before_p = 0;
6134 /* The following variable value is length of the arrays `clocks' and
6135 `add_cycles'. */
6137 static int clocks_length;
6139 /* The following array element values are cycles on which the
6140 corresponding insn will be issued. The array is used only for
6141 Itanium1. */
6143 static int *clocks;
6145 /* The following array element values are numbers of cycles should be
6146 added to improve insn scheduling for MM_insns for Itanium1. */
6148 static int *add_cycles;
6150 static rtx ia64_single_set (rtx);
6151 static void ia64_emit_insn_before (rtx, rtx);
6153 /* Map a bundle number to its pseudo-op. */
6155 const char *
6156 get_bundle_name (int b)
6158 return bundle_name[b];
6162 /* Return the maximum number of instructions a cpu can issue. */
6164 static int
6165 ia64_issue_rate (void)
6167 return 6;
6170 /* Helper function - like single_set, but look inside COND_EXEC. */
6172 static rtx
6173 ia64_single_set (rtx insn)
6175 rtx x = PATTERN (insn), ret;
6176 if (GET_CODE (x) == COND_EXEC)
6177 x = COND_EXEC_CODE (x);
6178 if (GET_CODE (x) == SET)
6179 return x;
6181 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
6182 Although they are not classical single set, the second set is there just
6183 to protect it from moving past FP-relative stack accesses. */
6184 switch (recog_memoized (insn))
6186 case CODE_FOR_prologue_allocate_stack:
6187 case CODE_FOR_epilogue_deallocate_stack:
6188 ret = XVECEXP (x, 0, 0);
6189 break;
6191 default:
6192 ret = single_set_2 (insn, x);
6193 break;
6196 return ret;
6199 /* Adjust the cost of a scheduling dependency. Return the new cost of
6200 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
6202 static int
6203 ia64_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
6205 enum attr_itanium_class dep_class;
6206 enum attr_itanium_class insn_class;
6208 if (REG_NOTE_KIND (link) != REG_DEP_OUTPUT)
6209 return cost;
6211 insn_class = ia64_safe_itanium_class (insn);
6212 dep_class = ia64_safe_itanium_class (dep_insn);
6213 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
6214 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
6215 return 0;
6217 return cost;
6220 /* Like emit_insn_before, but skip cycle_display notes.
6221 ??? When cycle display notes are implemented, update this. */
6223 static void
6224 ia64_emit_insn_before (rtx insn, rtx before)
6226 emit_insn_before (insn, before);
6229 /* The following function marks insns who produce addresses for load
6230 and store insns. Such insns will be placed into M slots because it
6231 decrease latency time for Itanium1 (see function
6232 `ia64_produce_address_p' and the DFA descriptions). */
6234 static void
6235 ia64_dependencies_evaluation_hook (rtx head, rtx tail)
6237 rtx insn, link, next, next_tail;
6239 /* Before reload, which_alternative is not set, which means that
6240 ia64_safe_itanium_class will produce wrong results for (at least)
6241 move instructions. */
6242 if (!reload_completed)
6243 return;
6245 next_tail = NEXT_INSN (tail);
6246 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6247 if (INSN_P (insn))
6248 insn->call = 0;
6249 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6250 if (INSN_P (insn)
6251 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
6253 for (link = INSN_DEPEND (insn); link != 0; link = XEXP (link, 1))
6255 enum attr_itanium_class c;
6257 if (REG_NOTE_KIND (link) != REG_DEP_TRUE)
6258 continue;
6259 next = XEXP (link, 0);
6260 c = ia64_safe_itanium_class (next);
6261 if ((c == ITANIUM_CLASS_ST
6262 || c == ITANIUM_CLASS_STF)
6263 && ia64_st_address_bypass_p (insn, next))
6264 break;
6265 else if ((c == ITANIUM_CLASS_LD
6266 || c == ITANIUM_CLASS_FLD
6267 || c == ITANIUM_CLASS_FLDP)
6268 && ia64_ld_address_bypass_p (insn, next))
6269 break;
6271 insn->call = link != 0;
6275 /* We're beginning a new block. Initialize data structures as necessary. */
6277 static void
6278 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
6279 int sched_verbose ATTRIBUTE_UNUSED,
6280 int max_ready ATTRIBUTE_UNUSED)
6282 #ifdef ENABLE_CHECKING
6283 rtx insn;
6285 if (reload_completed)
6286 for (insn = NEXT_INSN (current_sched_info->prev_head);
6287 insn != current_sched_info->next_tail;
6288 insn = NEXT_INSN (insn))
6289 gcc_assert (!SCHED_GROUP_P (insn));
6290 #endif
6291 last_scheduled_insn = NULL_RTX;
6292 init_insn_group_barriers ();
6295 /* We are about to being issuing insns for this clock cycle.
6296 Override the default sort algorithm to better slot instructions. */
6298 static int
6299 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
6300 int *pn_ready, int clock_var ATTRIBUTE_UNUSED,
6301 int reorder_type)
6303 int n_asms;
6304 int n_ready = *pn_ready;
6305 rtx *e_ready = ready + n_ready;
6306 rtx *insnp;
6308 if (sched_verbose)
6309 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
6311 if (reorder_type == 0)
6313 /* First, move all USEs, CLOBBERs and other crud out of the way. */
6314 n_asms = 0;
6315 for (insnp = ready; insnp < e_ready; insnp++)
6316 if (insnp < e_ready)
6318 rtx insn = *insnp;
6319 enum attr_type t = ia64_safe_type (insn);
6320 if (t == TYPE_UNKNOWN)
6322 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6323 || asm_noperands (PATTERN (insn)) >= 0)
6325 rtx lowest = ready[n_asms];
6326 ready[n_asms] = insn;
6327 *insnp = lowest;
6328 n_asms++;
6330 else
6332 rtx highest = ready[n_ready - 1];
6333 ready[n_ready - 1] = insn;
6334 *insnp = highest;
6335 return 1;
6340 if (n_asms < n_ready)
6342 /* Some normal insns to process. Skip the asms. */
6343 ready += n_asms;
6344 n_ready -= n_asms;
6346 else if (n_ready > 0)
6347 return 1;
6350 if (ia64_final_schedule)
6352 int deleted = 0;
6353 int nr_need_stop = 0;
6355 for (insnp = ready; insnp < e_ready; insnp++)
6356 if (safe_group_barrier_needed (*insnp))
6357 nr_need_stop++;
6359 if (reorder_type == 1 && n_ready == nr_need_stop)
6360 return 0;
6361 if (reorder_type == 0)
6362 return 1;
6363 insnp = e_ready;
6364 /* Move down everything that needs a stop bit, preserving
6365 relative order. */
6366 while (insnp-- > ready + deleted)
6367 while (insnp >= ready + deleted)
6369 rtx insn = *insnp;
6370 if (! safe_group_barrier_needed (insn))
6371 break;
6372 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
6373 *ready = insn;
6374 deleted++;
6376 n_ready -= deleted;
6377 ready += deleted;
6380 return 1;
6383 /* We are about to being issuing insns for this clock cycle. Override
6384 the default sort algorithm to better slot instructions. */
6386 static int
6387 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
6388 int clock_var)
6390 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
6391 pn_ready, clock_var, 0);
6394 /* Like ia64_sched_reorder, but called after issuing each insn.
6395 Override the default sort algorithm to better slot instructions. */
6397 static int
6398 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
6399 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
6400 int *pn_ready, int clock_var)
6402 if (ia64_tune == PROCESSOR_ITANIUM && reload_completed && last_scheduled_insn)
6403 clocks [INSN_UID (last_scheduled_insn)] = clock_var;
6404 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
6405 clock_var, 1);
6408 /* We are about to issue INSN. Return the number of insns left on the
6409 ready queue that can be issued this cycle. */
6411 static int
6412 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
6413 int sched_verbose ATTRIBUTE_UNUSED,
6414 rtx insn ATTRIBUTE_UNUSED,
6415 int can_issue_more ATTRIBUTE_UNUSED)
6417 last_scheduled_insn = insn;
6418 memcpy (prev_cycle_state, curr_state, dfa_state_size);
6419 if (reload_completed)
6421 int needed = group_barrier_needed (insn);
6423 gcc_assert (!needed);
6424 if (GET_CODE (insn) == CALL_INSN)
6425 init_insn_group_barriers ();
6426 stops_p [INSN_UID (insn)] = stop_before_p;
6427 stop_before_p = 0;
6429 return 1;
6432 /* We are choosing insn from the ready queue. Return nonzero if INSN
6433 can be chosen. */
6435 static int
6436 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn)
6438 gcc_assert (insn && INSN_P (insn));
6439 return (!reload_completed
6440 || !safe_group_barrier_needed (insn));
6443 /* The following variable value is pseudo-insn used by the DFA insn
6444 scheduler to change the DFA state when the simulated clock is
6445 increased. */
6447 static rtx dfa_pre_cycle_insn;
6449 /* We are about to being issuing INSN. Return nonzero if we cannot
6450 issue it on given cycle CLOCK and return zero if we should not sort
6451 the ready queue on the next clock start. */
6453 static int
6454 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
6455 int clock, int *sort_p)
6457 int setup_clocks_p = FALSE;
6459 gcc_assert (insn && INSN_P (insn));
6460 if ((reload_completed && safe_group_barrier_needed (insn))
6461 || (last_scheduled_insn
6462 && (GET_CODE (last_scheduled_insn) == CALL_INSN
6463 || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6464 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
6466 init_insn_group_barriers ();
6467 if (verbose && dump)
6468 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
6469 last_clock == clock ? " + cycle advance" : "");
6470 stop_before_p = 1;
6471 if (last_clock == clock)
6473 state_transition (curr_state, dfa_stop_insn);
6474 if (TARGET_EARLY_STOP_BITS)
6475 *sort_p = (last_scheduled_insn == NULL_RTX
6476 || GET_CODE (last_scheduled_insn) != CALL_INSN);
6477 else
6478 *sort_p = 0;
6479 return 1;
6481 else if (reload_completed)
6482 setup_clocks_p = TRUE;
6483 if (GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6484 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)
6485 state_reset (curr_state);
6486 else
6488 memcpy (curr_state, prev_cycle_state, dfa_state_size);
6489 state_transition (curr_state, dfa_stop_insn);
6490 state_transition (curr_state, dfa_pre_cycle_insn);
6491 state_transition (curr_state, NULL);
6494 else if (reload_completed)
6495 setup_clocks_p = TRUE;
6496 if (setup_clocks_p && ia64_tune == PROCESSOR_ITANIUM
6497 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6498 && asm_noperands (PATTERN (insn)) < 0)
6500 enum attr_itanium_class c = ia64_safe_itanium_class (insn);
6502 if (c != ITANIUM_CLASS_MMMUL && c != ITANIUM_CLASS_MMSHF)
6504 rtx link;
6505 int d = -1;
6507 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
6508 if (REG_NOTE_KIND (link) == 0)
6510 enum attr_itanium_class dep_class;
6511 rtx dep_insn = XEXP (link, 0);
6513 dep_class = ia64_safe_itanium_class (dep_insn);
6514 if ((dep_class == ITANIUM_CLASS_MMMUL
6515 || dep_class == ITANIUM_CLASS_MMSHF)
6516 && last_clock - clocks [INSN_UID (dep_insn)] < 4
6517 && (d < 0
6518 || last_clock - clocks [INSN_UID (dep_insn)] < d))
6519 d = last_clock - clocks [INSN_UID (dep_insn)];
6521 if (d >= 0)
6522 add_cycles [INSN_UID (insn)] = 3 - d;
6525 return 0;
6530 /* The following page contains abstract data `bundle states' which are
6531 used for bundling insns (inserting nops and template generation). */
6533 /* The following describes state of insn bundling. */
6535 struct bundle_state
6537 /* Unique bundle state number to identify them in the debugging
6538 output */
6539 int unique_num;
6540 rtx insn; /* corresponding insn, NULL for the 1st and the last state */
6541 /* number nops before and after the insn */
6542 short before_nops_num, after_nops_num;
6543 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
6544 insn */
6545 int cost; /* cost of the state in cycles */
6546 int accumulated_insns_num; /* number of all previous insns including
6547 nops. L is considered as 2 insns */
6548 int branch_deviation; /* deviation of previous branches from 3rd slots */
6549 struct bundle_state *next; /* next state with the same insn_num */
6550 struct bundle_state *originator; /* originator (previous insn state) */
6551 /* All bundle states are in the following chain. */
6552 struct bundle_state *allocated_states_chain;
6553 /* The DFA State after issuing the insn and the nops. */
6554 state_t dfa_state;
6557 /* The following is map insn number to the corresponding bundle state. */
6559 static struct bundle_state **index_to_bundle_states;
6561 /* The unique number of next bundle state. */
6563 static int bundle_states_num;
6565 /* All allocated bundle states are in the following chain. */
6567 static struct bundle_state *allocated_bundle_states_chain;
6569 /* All allocated but not used bundle states are in the following
6570 chain. */
6572 static struct bundle_state *free_bundle_state_chain;
6575 /* The following function returns a free bundle state. */
6577 static struct bundle_state *
6578 get_free_bundle_state (void)
6580 struct bundle_state *result;
6582 if (free_bundle_state_chain != NULL)
6584 result = free_bundle_state_chain;
6585 free_bundle_state_chain = result->next;
6587 else
6589 result = xmalloc (sizeof (struct bundle_state));
6590 result->dfa_state = xmalloc (dfa_state_size);
6591 result->allocated_states_chain = allocated_bundle_states_chain;
6592 allocated_bundle_states_chain = result;
6594 result->unique_num = bundle_states_num++;
6595 return result;
6599 /* The following function frees given bundle state. */
6601 static void
6602 free_bundle_state (struct bundle_state *state)
6604 state->next = free_bundle_state_chain;
6605 free_bundle_state_chain = state;
6608 /* Start work with abstract data `bundle states'. */
6610 static void
6611 initiate_bundle_states (void)
6613 bundle_states_num = 0;
6614 free_bundle_state_chain = NULL;
6615 allocated_bundle_states_chain = NULL;
6618 /* Finish work with abstract data `bundle states'. */
6620 static void
6621 finish_bundle_states (void)
6623 struct bundle_state *curr_state, *next_state;
6625 for (curr_state = allocated_bundle_states_chain;
6626 curr_state != NULL;
6627 curr_state = next_state)
6629 next_state = curr_state->allocated_states_chain;
6630 free (curr_state->dfa_state);
6631 free (curr_state);
6635 /* Hash table of the bundle states. The key is dfa_state and insn_num
6636 of the bundle states. */
6638 static htab_t bundle_state_table;
6640 /* The function returns hash of BUNDLE_STATE. */
6642 static unsigned
6643 bundle_state_hash (const void *bundle_state)
6645 const struct bundle_state *state = (struct bundle_state *) bundle_state;
6646 unsigned result, i;
6648 for (result = i = 0; i < dfa_state_size; i++)
6649 result += (((unsigned char *) state->dfa_state) [i]
6650 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
6651 return result + state->insn_num;
6654 /* The function returns nonzero if the bundle state keys are equal. */
6656 static int
6657 bundle_state_eq_p (const void *bundle_state_1, const void *bundle_state_2)
6659 const struct bundle_state * state1 = (struct bundle_state *) bundle_state_1;
6660 const struct bundle_state * state2 = (struct bundle_state *) bundle_state_2;
6662 return (state1->insn_num == state2->insn_num
6663 && memcmp (state1->dfa_state, state2->dfa_state,
6664 dfa_state_size) == 0);
6667 /* The function inserts the BUNDLE_STATE into the hash table. The
6668 function returns nonzero if the bundle has been inserted into the
6669 table. The table contains the best bundle state with given key. */
6671 static int
6672 insert_bundle_state (struct bundle_state *bundle_state)
6674 void **entry_ptr;
6676 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, 1);
6677 if (*entry_ptr == NULL)
6679 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
6680 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
6681 *entry_ptr = (void *) bundle_state;
6682 return TRUE;
6684 else if (bundle_state->cost < ((struct bundle_state *) *entry_ptr)->cost
6685 || (bundle_state->cost == ((struct bundle_state *) *entry_ptr)->cost
6686 && (((struct bundle_state *)*entry_ptr)->accumulated_insns_num
6687 > bundle_state->accumulated_insns_num
6688 || (((struct bundle_state *)
6689 *entry_ptr)->accumulated_insns_num
6690 == bundle_state->accumulated_insns_num
6691 && ((struct bundle_state *)
6692 *entry_ptr)->branch_deviation
6693 > bundle_state->branch_deviation))))
6696 struct bundle_state temp;
6698 temp = *(struct bundle_state *) *entry_ptr;
6699 *(struct bundle_state *) *entry_ptr = *bundle_state;
6700 ((struct bundle_state *) *entry_ptr)->next = temp.next;
6701 *bundle_state = temp;
6703 return FALSE;
6706 /* Start work with the hash table. */
6708 static void
6709 initiate_bundle_state_table (void)
6711 bundle_state_table = htab_create (50, bundle_state_hash, bundle_state_eq_p,
6712 (htab_del) 0);
6715 /* Finish work with the hash table. */
6717 static void
6718 finish_bundle_state_table (void)
6720 htab_delete (bundle_state_table);
6725 /* The following variable is a insn `nop' used to check bundle states
6726 with different number of inserted nops. */
6728 static rtx ia64_nop;
6730 /* The following function tries to issue NOPS_NUM nops for the current
6731 state without advancing processor cycle. If it failed, the
6732 function returns FALSE and frees the current state. */
6734 static int
6735 try_issue_nops (struct bundle_state *curr_state, int nops_num)
6737 int i;
6739 for (i = 0; i < nops_num; i++)
6740 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
6742 free_bundle_state (curr_state);
6743 return FALSE;
6745 return TRUE;
6748 /* The following function tries to issue INSN for the current
6749 state without advancing processor cycle. If it failed, the
6750 function returns FALSE and frees the current state. */
6752 static int
6753 try_issue_insn (struct bundle_state *curr_state, rtx insn)
6755 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
6757 free_bundle_state (curr_state);
6758 return FALSE;
6760 return TRUE;
6763 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
6764 starting with ORIGINATOR without advancing processor cycle. If
6765 TRY_BUNDLE_END_P is TRUE, the function also/only (if
6766 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
6767 If it was successful, the function creates new bundle state and
6768 insert into the hash table and into `index_to_bundle_states'. */
6770 static void
6771 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
6772 rtx insn, int try_bundle_end_p, int only_bundle_end_p)
6774 struct bundle_state *curr_state;
6776 curr_state = get_free_bundle_state ();
6777 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
6778 curr_state->insn = insn;
6779 curr_state->insn_num = originator->insn_num + 1;
6780 curr_state->cost = originator->cost;
6781 curr_state->originator = originator;
6782 curr_state->before_nops_num = before_nops_num;
6783 curr_state->after_nops_num = 0;
6784 curr_state->accumulated_insns_num
6785 = originator->accumulated_insns_num + before_nops_num;
6786 curr_state->branch_deviation = originator->branch_deviation;
6787 gcc_assert (insn);
6788 if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
6790 gcc_assert (GET_MODE (insn) != TImode);
6791 if (!try_issue_nops (curr_state, before_nops_num))
6792 return;
6793 if (!try_issue_insn (curr_state, insn))
6794 return;
6795 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
6796 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
6797 && curr_state->accumulated_insns_num % 3 != 0)
6799 free_bundle_state (curr_state);
6800 return;
6803 else if (GET_MODE (insn) != TImode)
6805 if (!try_issue_nops (curr_state, before_nops_num))
6806 return;
6807 if (!try_issue_insn (curr_state, insn))
6808 return;
6809 curr_state->accumulated_insns_num++;
6810 gcc_assert (GET_CODE (PATTERN (insn)) != ASM_INPUT
6811 && asm_noperands (PATTERN (insn)) < 0);
6813 if (ia64_safe_type (insn) == TYPE_L)
6814 curr_state->accumulated_insns_num++;
6816 else
6818 /* If this is an insn that must be first in a group, then don't allow
6819 nops to be emitted before it. Currently, alloc is the only such
6820 supported instruction. */
6821 /* ??? The bundling automatons should handle this for us, but they do
6822 not yet have support for the first_insn attribute. */
6823 if (before_nops_num > 0 && get_attr_first_insn (insn) == FIRST_INSN_YES)
6825 free_bundle_state (curr_state);
6826 return;
6829 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
6830 state_transition (curr_state->dfa_state, NULL);
6831 curr_state->cost++;
6832 if (!try_issue_nops (curr_state, before_nops_num))
6833 return;
6834 if (!try_issue_insn (curr_state, insn))
6835 return;
6836 curr_state->accumulated_insns_num++;
6837 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6838 || asm_noperands (PATTERN (insn)) >= 0)
6840 /* Finish bundle containing asm insn. */
6841 curr_state->after_nops_num
6842 = 3 - curr_state->accumulated_insns_num % 3;
6843 curr_state->accumulated_insns_num
6844 += 3 - curr_state->accumulated_insns_num % 3;
6846 else if (ia64_safe_type (insn) == TYPE_L)
6847 curr_state->accumulated_insns_num++;
6849 if (ia64_safe_type (insn) == TYPE_B)
6850 curr_state->branch_deviation
6851 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
6852 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
6854 if (!only_bundle_end_p && insert_bundle_state (curr_state))
6856 state_t dfa_state;
6857 struct bundle_state *curr_state1;
6858 struct bundle_state *allocated_states_chain;
6860 curr_state1 = get_free_bundle_state ();
6861 dfa_state = curr_state1->dfa_state;
6862 allocated_states_chain = curr_state1->allocated_states_chain;
6863 *curr_state1 = *curr_state;
6864 curr_state1->dfa_state = dfa_state;
6865 curr_state1->allocated_states_chain = allocated_states_chain;
6866 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
6867 dfa_state_size);
6868 curr_state = curr_state1;
6870 if (!try_issue_nops (curr_state,
6871 3 - curr_state->accumulated_insns_num % 3))
6872 return;
6873 curr_state->after_nops_num
6874 = 3 - curr_state->accumulated_insns_num % 3;
6875 curr_state->accumulated_insns_num
6876 += 3 - curr_state->accumulated_insns_num % 3;
6878 if (!insert_bundle_state (curr_state))
6879 free_bundle_state (curr_state);
6880 return;
6883 /* The following function returns position in the two window bundle
6884 for given STATE. */
6886 static int
6887 get_max_pos (state_t state)
6889 if (cpu_unit_reservation_p (state, pos_6))
6890 return 6;
6891 else if (cpu_unit_reservation_p (state, pos_5))
6892 return 5;
6893 else if (cpu_unit_reservation_p (state, pos_4))
6894 return 4;
6895 else if (cpu_unit_reservation_p (state, pos_3))
6896 return 3;
6897 else if (cpu_unit_reservation_p (state, pos_2))
6898 return 2;
6899 else if (cpu_unit_reservation_p (state, pos_1))
6900 return 1;
6901 else
6902 return 0;
6905 /* The function returns code of a possible template for given position
6906 and state. The function should be called only with 2 values of
6907 position equal to 3 or 6. We avoid generating F NOPs by putting
6908 templates containing F insns at the end of the template search
6909 because undocumented anomaly in McKinley derived cores which can
6910 cause stalls if an F-unit insn (including a NOP) is issued within a
6911 six-cycle window after reading certain application registers (such
6912 as ar.bsp). Furthermore, power-considerations also argue against
6913 the use of F-unit instructions unless they're really needed. */
6915 static int
6916 get_template (state_t state, int pos)
6918 switch (pos)
6920 case 3:
6921 if (cpu_unit_reservation_p (state, _0mmi_))
6922 return 1;
6923 else if (cpu_unit_reservation_p (state, _0mii_))
6924 return 0;
6925 else if (cpu_unit_reservation_p (state, _0mmb_))
6926 return 7;
6927 else if (cpu_unit_reservation_p (state, _0mib_))
6928 return 6;
6929 else if (cpu_unit_reservation_p (state, _0mbb_))
6930 return 5;
6931 else if (cpu_unit_reservation_p (state, _0bbb_))
6932 return 4;
6933 else if (cpu_unit_reservation_p (state, _0mmf_))
6934 return 3;
6935 else if (cpu_unit_reservation_p (state, _0mfi_))
6936 return 2;
6937 else if (cpu_unit_reservation_p (state, _0mfb_))
6938 return 8;
6939 else if (cpu_unit_reservation_p (state, _0mlx_))
6940 return 9;
6941 else
6942 gcc_unreachable ();
6943 case 6:
6944 if (cpu_unit_reservation_p (state, _1mmi_))
6945 return 1;
6946 else if (cpu_unit_reservation_p (state, _1mii_))
6947 return 0;
6948 else if (cpu_unit_reservation_p (state, _1mmb_))
6949 return 7;
6950 else if (cpu_unit_reservation_p (state, _1mib_))
6951 return 6;
6952 else if (cpu_unit_reservation_p (state, _1mbb_))
6953 return 5;
6954 else if (cpu_unit_reservation_p (state, _1bbb_))
6955 return 4;
6956 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
6957 return 3;
6958 else if (cpu_unit_reservation_p (state, _1mfi_))
6959 return 2;
6960 else if (cpu_unit_reservation_p (state, _1mfb_))
6961 return 8;
6962 else if (cpu_unit_reservation_p (state, _1mlx_))
6963 return 9;
6964 else
6965 gcc_unreachable ();
6966 default:
6967 gcc_unreachable ();
6971 /* The following function returns an insn important for insn bundling
6972 followed by INSN and before TAIL. */
6974 static rtx
6975 get_next_important_insn (rtx insn, rtx tail)
6977 for (; insn && insn != tail; insn = NEXT_INSN (insn))
6978 if (INSN_P (insn)
6979 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
6980 && GET_CODE (PATTERN (insn)) != USE
6981 && GET_CODE (PATTERN (insn)) != CLOBBER)
6982 return insn;
6983 return NULL_RTX;
6986 /* The following function does insn bundling. Bundling means
6987 inserting templates and nop insns to fit insn groups into permitted
6988 templates. Instruction scheduling uses NDFA (non-deterministic
6989 finite automata) encoding informations about the templates and the
6990 inserted nops. Nondeterminism of the automata permits follows
6991 all possible insn sequences very fast.
6993 Unfortunately it is not possible to get information about inserting
6994 nop insns and used templates from the automata states. The
6995 automata only says that we can issue an insn possibly inserting
6996 some nops before it and using some template. Therefore insn
6997 bundling in this function is implemented by using DFA
6998 (deterministic finite automata). We follows all possible insn
6999 sequences by inserting 0-2 nops (that is what the NDFA describe for
7000 insn scheduling) before/after each insn being bundled. We know the
7001 start of simulated processor cycle from insn scheduling (insn
7002 starting a new cycle has TImode).
7004 Simple implementation of insn bundling would create enormous
7005 number of possible insn sequences satisfying information about new
7006 cycle ticks taken from the insn scheduling. To make the algorithm
7007 practical we use dynamic programming. Each decision (about
7008 inserting nops and implicitly about previous decisions) is described
7009 by structure bundle_state (see above). If we generate the same
7010 bundle state (key is automaton state after issuing the insns and
7011 nops for it), we reuse already generated one. As consequence we
7012 reject some decisions which cannot improve the solution and
7013 reduce memory for the algorithm.
7015 When we reach the end of EBB (extended basic block), we choose the
7016 best sequence and then, moving back in EBB, insert templates for
7017 the best alternative. The templates are taken from querying
7018 automaton state for each insn in chosen bundle states.
7020 So the algorithm makes two (forward and backward) passes through
7021 EBB. There is an additional forward pass through EBB for Itanium1
7022 processor. This pass inserts more nops to make dependency between
7023 a producer insn and MMMUL/MMSHF at least 4 cycles long. */
7025 static void
7026 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
7028 struct bundle_state *curr_state, *next_state, *best_state;
7029 rtx insn, next_insn;
7030 int insn_num;
7031 int i, bundle_end_p, only_bundle_end_p, asm_p;
7032 int pos = 0, max_pos, template0, template1;
7033 rtx b;
7034 rtx nop;
7035 enum attr_type type;
7037 insn_num = 0;
7038 /* Count insns in the EBB. */
7039 for (insn = NEXT_INSN (prev_head_insn);
7040 insn && insn != tail;
7041 insn = NEXT_INSN (insn))
7042 if (INSN_P (insn))
7043 insn_num++;
7044 if (insn_num == 0)
7045 return;
7046 bundling_p = 1;
7047 dfa_clean_insn_cache ();
7048 initiate_bundle_state_table ();
7049 index_to_bundle_states = xmalloc ((insn_num + 2)
7050 * sizeof (struct bundle_state *));
7051 /* First (forward) pass -- generation of bundle states. */
7052 curr_state = get_free_bundle_state ();
7053 curr_state->insn = NULL;
7054 curr_state->before_nops_num = 0;
7055 curr_state->after_nops_num = 0;
7056 curr_state->insn_num = 0;
7057 curr_state->cost = 0;
7058 curr_state->accumulated_insns_num = 0;
7059 curr_state->branch_deviation = 0;
7060 curr_state->next = NULL;
7061 curr_state->originator = NULL;
7062 state_reset (curr_state->dfa_state);
7063 index_to_bundle_states [0] = curr_state;
7064 insn_num = 0;
7065 /* Shift cycle mark if it is put on insn which could be ignored. */
7066 for (insn = NEXT_INSN (prev_head_insn);
7067 insn != tail;
7068 insn = NEXT_INSN (insn))
7069 if (INSN_P (insn)
7070 && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
7071 || GET_CODE (PATTERN (insn)) == USE
7072 || GET_CODE (PATTERN (insn)) == CLOBBER)
7073 && GET_MODE (insn) == TImode)
7075 PUT_MODE (insn, VOIDmode);
7076 for (next_insn = NEXT_INSN (insn);
7077 next_insn != tail;
7078 next_insn = NEXT_INSN (next_insn))
7079 if (INSN_P (next_insn)
7080 && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
7081 && GET_CODE (PATTERN (next_insn)) != USE
7082 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
7084 PUT_MODE (next_insn, TImode);
7085 break;
7088 /* Froward pass: generation of bundle states. */
7089 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
7090 insn != NULL_RTX;
7091 insn = next_insn)
7093 gcc_assert (INSN_P (insn)
7094 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
7095 && GET_CODE (PATTERN (insn)) != USE
7096 && GET_CODE (PATTERN (insn)) != CLOBBER);
7097 type = ia64_safe_type (insn);
7098 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
7099 insn_num++;
7100 index_to_bundle_states [insn_num] = NULL;
7101 for (curr_state = index_to_bundle_states [insn_num - 1];
7102 curr_state != NULL;
7103 curr_state = next_state)
7105 pos = curr_state->accumulated_insns_num % 3;
7106 next_state = curr_state->next;
7107 /* We must fill up the current bundle in order to start a
7108 subsequent asm insn in a new bundle. Asm insn is always
7109 placed in a separate bundle. */
7110 only_bundle_end_p
7111 = (next_insn != NULL_RTX
7112 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
7113 && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
7114 /* We may fill up the current bundle if it is the cycle end
7115 without a group barrier. */
7116 bundle_end_p
7117 = (only_bundle_end_p || next_insn == NULL_RTX
7118 || (GET_MODE (next_insn) == TImode
7119 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
7120 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
7121 || type == TYPE_S
7122 /* We need to insert 2 nops for cases like M_MII. To
7123 guarantee issuing all insns on the same cycle for
7124 Itanium 1, we need to issue 2 nops after the first M
7125 insn (MnnMII where n is a nop insn). */
7126 || ((type == TYPE_M || type == TYPE_A)
7127 && ia64_tune == PROCESSOR_ITANIUM
7128 && !bundle_end_p && pos == 1))
7129 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
7130 only_bundle_end_p);
7131 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
7132 only_bundle_end_p);
7133 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
7134 only_bundle_end_p);
7136 gcc_assert (index_to_bundle_states [insn_num]);
7137 for (curr_state = index_to_bundle_states [insn_num];
7138 curr_state != NULL;
7139 curr_state = curr_state->next)
7140 if (verbose >= 2 && dump)
7142 /* This structure is taken from generated code of the
7143 pipeline hazard recognizer (see file insn-attrtab.c).
7144 Please don't forget to change the structure if a new
7145 automaton is added to .md file. */
7146 struct DFA_chip
7148 unsigned short one_automaton_state;
7149 unsigned short oneb_automaton_state;
7150 unsigned short two_automaton_state;
7151 unsigned short twob_automaton_state;
7154 fprintf
7155 (dump,
7156 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
7157 curr_state->unique_num,
7158 (curr_state->originator == NULL
7159 ? -1 : curr_state->originator->unique_num),
7160 curr_state->cost,
7161 curr_state->before_nops_num, curr_state->after_nops_num,
7162 curr_state->accumulated_insns_num, curr_state->branch_deviation,
7163 (ia64_tune == PROCESSOR_ITANIUM
7164 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
7165 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
7166 INSN_UID (insn));
7170 /* We should find a solution because the 2nd insn scheduling has
7171 found one. */
7172 gcc_assert (index_to_bundle_states [insn_num]);
7173 /* Find a state corresponding to the best insn sequence. */
7174 best_state = NULL;
7175 for (curr_state = index_to_bundle_states [insn_num];
7176 curr_state != NULL;
7177 curr_state = curr_state->next)
7178 /* We are just looking at the states with fully filled up last
7179 bundle. The first we prefer insn sequences with minimal cost
7180 then with minimal inserted nops and finally with branch insns
7181 placed in the 3rd slots. */
7182 if (curr_state->accumulated_insns_num % 3 == 0
7183 && (best_state == NULL || best_state->cost > curr_state->cost
7184 || (best_state->cost == curr_state->cost
7185 && (curr_state->accumulated_insns_num
7186 < best_state->accumulated_insns_num
7187 || (curr_state->accumulated_insns_num
7188 == best_state->accumulated_insns_num
7189 && curr_state->branch_deviation
7190 < best_state->branch_deviation)))))
7191 best_state = curr_state;
7192 /* Second (backward) pass: adding nops and templates. */
7193 insn_num = best_state->before_nops_num;
7194 template0 = template1 = -1;
7195 for (curr_state = best_state;
7196 curr_state->originator != NULL;
7197 curr_state = curr_state->originator)
7199 insn = curr_state->insn;
7200 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
7201 || asm_noperands (PATTERN (insn)) >= 0);
7202 insn_num++;
7203 if (verbose >= 2 && dump)
7205 struct DFA_chip
7207 unsigned short one_automaton_state;
7208 unsigned short oneb_automaton_state;
7209 unsigned short two_automaton_state;
7210 unsigned short twob_automaton_state;
7213 fprintf
7214 (dump,
7215 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
7216 curr_state->unique_num,
7217 (curr_state->originator == NULL
7218 ? -1 : curr_state->originator->unique_num),
7219 curr_state->cost,
7220 curr_state->before_nops_num, curr_state->after_nops_num,
7221 curr_state->accumulated_insns_num, curr_state->branch_deviation,
7222 (ia64_tune == PROCESSOR_ITANIUM
7223 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
7224 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
7225 INSN_UID (insn));
7227 /* Find the position in the current bundle window. The window can
7228 contain at most two bundles. Two bundle window means that
7229 the processor will make two bundle rotation. */
7230 max_pos = get_max_pos (curr_state->dfa_state);
7231 if (max_pos == 6
7232 /* The following (negative template number) means that the
7233 processor did one bundle rotation. */
7234 || (max_pos == 3 && template0 < 0))
7236 /* We are at the end of the window -- find template(s) for
7237 its bundle(s). */
7238 pos = max_pos;
7239 if (max_pos == 3)
7240 template0 = get_template (curr_state->dfa_state, 3);
7241 else
7243 template1 = get_template (curr_state->dfa_state, 3);
7244 template0 = get_template (curr_state->dfa_state, 6);
7247 if (max_pos > 3 && template1 < 0)
7248 /* It may happen when we have the stop inside a bundle. */
7250 gcc_assert (pos <= 3);
7251 template1 = get_template (curr_state->dfa_state, 3);
7252 pos += 3;
7254 if (!asm_p)
7255 /* Emit nops after the current insn. */
7256 for (i = 0; i < curr_state->after_nops_num; i++)
7258 nop = gen_nop ();
7259 emit_insn_after (nop, insn);
7260 pos--;
7261 gcc_assert (pos >= 0);
7262 if (pos % 3 == 0)
7264 /* We are at the start of a bundle: emit the template
7265 (it should be defined). */
7266 gcc_assert (template0 >= 0);
7267 b = gen_bundle_selector (GEN_INT (template0));
7268 ia64_emit_insn_before (b, nop);
7269 /* If we have two bundle window, we make one bundle
7270 rotation. Otherwise template0 will be undefined
7271 (negative value). */
7272 template0 = template1;
7273 template1 = -1;
7276 /* Move the position backward in the window. Group barrier has
7277 no slot. Asm insn takes all bundle. */
7278 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
7279 && GET_CODE (PATTERN (insn)) != ASM_INPUT
7280 && asm_noperands (PATTERN (insn)) < 0)
7281 pos--;
7282 /* Long insn takes 2 slots. */
7283 if (ia64_safe_type (insn) == TYPE_L)
7284 pos--;
7285 gcc_assert (pos >= 0);
7286 if (pos % 3 == 0
7287 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
7288 && GET_CODE (PATTERN (insn)) != ASM_INPUT
7289 && asm_noperands (PATTERN (insn)) < 0)
7291 /* The current insn is at the bundle start: emit the
7292 template. */
7293 gcc_assert (template0 >= 0);
7294 b = gen_bundle_selector (GEN_INT (template0));
7295 ia64_emit_insn_before (b, insn);
7296 b = PREV_INSN (insn);
7297 insn = b;
7298 /* See comment above in analogous place for emitting nops
7299 after the insn. */
7300 template0 = template1;
7301 template1 = -1;
7303 /* Emit nops after the current insn. */
7304 for (i = 0; i < curr_state->before_nops_num; i++)
7306 nop = gen_nop ();
7307 ia64_emit_insn_before (nop, insn);
7308 nop = PREV_INSN (insn);
7309 insn = nop;
7310 pos--;
7311 gcc_assert (pos >= 0);
7312 if (pos % 3 == 0)
7314 /* See comment above in analogous place for emitting nops
7315 after the insn. */
7316 gcc_assert (template0 >= 0);
7317 b = gen_bundle_selector (GEN_INT (template0));
7318 ia64_emit_insn_before (b, insn);
7319 b = PREV_INSN (insn);
7320 insn = b;
7321 template0 = template1;
7322 template1 = -1;
7326 if (ia64_tune == PROCESSOR_ITANIUM)
7327 /* Insert additional cycles for MM-insns (MMMUL and MMSHF).
7328 Itanium1 has a strange design, if the distance between an insn
7329 and dependent MM-insn is less 4 then we have a 6 additional
7330 cycles stall. So we make the distance equal to 4 cycles if it
7331 is less. */
7332 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
7333 insn != NULL_RTX;
7334 insn = next_insn)
7336 gcc_assert (INSN_P (insn)
7337 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
7338 && GET_CODE (PATTERN (insn)) != USE
7339 && GET_CODE (PATTERN (insn)) != CLOBBER);
7340 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
7341 if (INSN_UID (insn) < clocks_length && add_cycles [INSN_UID (insn)])
7342 /* We found a MM-insn which needs additional cycles. */
7344 rtx last;
7345 int i, j, n;
7346 int pred_stop_p;
7348 /* Now we are searching for a template of the bundle in
7349 which the MM-insn is placed and the position of the
7350 insn in the bundle (0, 1, 2). Also we are searching
7351 for that there is a stop before the insn. */
7352 last = prev_active_insn (insn);
7353 pred_stop_p = recog_memoized (last) == CODE_FOR_insn_group_barrier;
7354 if (pred_stop_p)
7355 last = prev_active_insn (last);
7356 n = 0;
7357 for (;; last = prev_active_insn (last))
7358 if (recog_memoized (last) == CODE_FOR_bundle_selector)
7360 template0 = XINT (XVECEXP (PATTERN (last), 0, 0), 0);
7361 if (template0 == 9)
7362 /* The insn is in MLX bundle. Change the template
7363 onto MFI because we will add nops before the
7364 insn. It simplifies subsequent code a lot. */
7365 PATTERN (last)
7366 = gen_bundle_selector (const2_rtx); /* -> MFI */
7367 break;
7369 else if (recog_memoized (last) != CODE_FOR_insn_group_barrier
7370 && (ia64_safe_itanium_class (last)
7371 != ITANIUM_CLASS_IGNORE))
7372 n++;
7373 /* Some check of correctness: the stop is not at the
7374 bundle start, there are no more 3 insns in the bundle,
7375 and the MM-insn is not at the start of bundle with
7376 template MLX. */
7377 gcc_assert ((!pred_stop_p || n)
7378 && n <= 2
7379 && (template0 != 9 || !n));
7380 /* Put nops after the insn in the bundle. */
7381 for (j = 3 - n; j > 0; j --)
7382 ia64_emit_insn_before (gen_nop (), insn);
7383 /* It takes into account that we will add more N nops
7384 before the insn lately -- please see code below. */
7385 add_cycles [INSN_UID (insn)]--;
7386 if (!pred_stop_p || add_cycles [INSN_UID (insn)])
7387 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7388 insn);
7389 if (pred_stop_p)
7390 add_cycles [INSN_UID (insn)]--;
7391 for (i = add_cycles [INSN_UID (insn)]; i > 0; i--)
7393 /* Insert "MII;" template. */
7394 ia64_emit_insn_before (gen_bundle_selector (const0_rtx),
7395 insn);
7396 ia64_emit_insn_before (gen_nop (), insn);
7397 ia64_emit_insn_before (gen_nop (), insn);
7398 if (i > 1)
7400 /* To decrease code size, we use "MI;I;"
7401 template. */
7402 ia64_emit_insn_before
7403 (gen_insn_group_barrier (GEN_INT (3)), insn);
7404 i--;
7406 ia64_emit_insn_before (gen_nop (), insn);
7407 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7408 insn);
7410 /* Put the MM-insn in the same slot of a bundle with the
7411 same template as the original one. */
7412 ia64_emit_insn_before (gen_bundle_selector (GEN_INT (template0)),
7413 insn);
7414 /* To put the insn in the same slot, add necessary number
7415 of nops. */
7416 for (j = n; j > 0; j --)
7417 ia64_emit_insn_before (gen_nop (), insn);
7418 /* Put the stop if the original bundle had it. */
7419 if (pred_stop_p)
7420 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7421 insn);
7424 free (index_to_bundle_states);
7425 finish_bundle_state_table ();
7426 bundling_p = 0;
7427 dfa_clean_insn_cache ();
7430 /* The following function is called at the end of scheduling BB or
7431 EBB. After reload, it inserts stop bits and does insn bundling. */
7433 static void
7434 ia64_sched_finish (FILE *dump, int sched_verbose)
7436 if (sched_verbose)
7437 fprintf (dump, "// Finishing schedule.\n");
7438 if (!reload_completed)
7439 return;
7440 if (reload_completed)
7442 final_emit_insn_group_barriers (dump);
7443 bundling (dump, sched_verbose, current_sched_info->prev_head,
7444 current_sched_info->next_tail);
7445 if (sched_verbose && dump)
7446 fprintf (dump, "// finishing %d-%d\n",
7447 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
7448 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
7450 return;
7454 /* The following function inserts stop bits in scheduled BB or EBB. */
7456 static void
7457 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
7459 rtx insn;
7460 int need_barrier_p = 0;
7461 rtx prev_insn = NULL_RTX;
7463 init_insn_group_barriers ();
7465 for (insn = NEXT_INSN (current_sched_info->prev_head);
7466 insn != current_sched_info->next_tail;
7467 insn = NEXT_INSN (insn))
7469 if (GET_CODE (insn) == BARRIER)
7471 rtx last = prev_active_insn (insn);
7473 if (! last)
7474 continue;
7475 if (GET_CODE (last) == JUMP_INSN
7476 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
7477 last = prev_active_insn (last);
7478 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
7479 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
7481 init_insn_group_barriers ();
7482 need_barrier_p = 0;
7483 prev_insn = NULL_RTX;
7485 else if (INSN_P (insn))
7487 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
7489 init_insn_group_barriers ();
7490 need_barrier_p = 0;
7491 prev_insn = NULL_RTX;
7493 else if (need_barrier_p || group_barrier_needed (insn))
7495 if (TARGET_EARLY_STOP_BITS)
7497 rtx last;
7499 for (last = insn;
7500 last != current_sched_info->prev_head;
7501 last = PREV_INSN (last))
7502 if (INSN_P (last) && GET_MODE (last) == TImode
7503 && stops_p [INSN_UID (last)])
7504 break;
7505 if (last == current_sched_info->prev_head)
7506 last = insn;
7507 last = prev_active_insn (last);
7508 if (last
7509 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
7510 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
7511 last);
7512 init_insn_group_barriers ();
7513 for (last = NEXT_INSN (last);
7514 last != insn;
7515 last = NEXT_INSN (last))
7516 if (INSN_P (last))
7517 group_barrier_needed (last);
7519 else
7521 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7522 insn);
7523 init_insn_group_barriers ();
7525 group_barrier_needed (insn);
7526 prev_insn = NULL_RTX;
7528 else if (recog_memoized (insn) >= 0)
7529 prev_insn = insn;
7530 need_barrier_p = (GET_CODE (insn) == CALL_INSN
7531 || GET_CODE (PATTERN (insn)) == ASM_INPUT
7532 || asm_noperands (PATTERN (insn)) >= 0);
7539 /* If the following function returns TRUE, we will use the DFA
7540 insn scheduler. */
7542 static int
7543 ia64_first_cycle_multipass_dfa_lookahead (void)
7545 return (reload_completed ? 6 : 4);
7548 /* The following function initiates variable `dfa_pre_cycle_insn'. */
7550 static void
7551 ia64_init_dfa_pre_cycle_insn (void)
7553 if (temp_dfa_state == NULL)
7555 dfa_state_size = state_size ();
7556 temp_dfa_state = xmalloc (dfa_state_size);
7557 prev_cycle_state = xmalloc (dfa_state_size);
7559 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
7560 PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
7561 recog_memoized (dfa_pre_cycle_insn);
7562 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
7563 PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX;
7564 recog_memoized (dfa_stop_insn);
7567 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
7568 used by the DFA insn scheduler. */
7570 static rtx
7571 ia64_dfa_pre_cycle_insn (void)
7573 return dfa_pre_cycle_insn;
7576 /* The following function returns TRUE if PRODUCER (of type ilog or
7577 ld) produces address for CONSUMER (of type st or stf). */
7580 ia64_st_address_bypass_p (rtx producer, rtx consumer)
7582 rtx dest, reg, mem;
7584 gcc_assert (producer && consumer);
7585 dest = ia64_single_set (producer);
7586 gcc_assert (dest);
7587 reg = SET_DEST (dest);
7588 gcc_assert (reg);
7589 if (GET_CODE (reg) == SUBREG)
7590 reg = SUBREG_REG (reg);
7591 gcc_assert (GET_CODE (reg) == REG);
7593 dest = ia64_single_set (consumer);
7594 gcc_assert (dest);
7595 mem = SET_DEST (dest);
7596 gcc_assert (mem && GET_CODE (mem) == MEM);
7597 return reg_mentioned_p (reg, mem);
7600 /* The following function returns TRUE if PRODUCER (of type ilog or
7601 ld) produces address for CONSUMER (of type ld or fld). */
7604 ia64_ld_address_bypass_p (rtx producer, rtx consumer)
7606 rtx dest, src, reg, mem;
7608 gcc_assert (producer && consumer);
7609 dest = ia64_single_set (producer);
7610 gcc_assert (dest);
7611 reg = SET_DEST (dest);
7612 gcc_assert (reg);
7613 if (GET_CODE (reg) == SUBREG)
7614 reg = SUBREG_REG (reg);
7615 gcc_assert (GET_CODE (reg) == REG);
7617 src = ia64_single_set (consumer);
7618 gcc_assert (src);
7619 mem = SET_SRC (src);
7620 gcc_assert (mem);
7621 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
7622 mem = XVECEXP (mem, 0, 0);
7623 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
7624 mem = XEXP (mem, 0);
7626 /* Note that LO_SUM is used for GOT loads. */
7627 gcc_assert (GET_CODE (mem) == LO_SUM || GET_CODE (mem) == MEM);
7629 return reg_mentioned_p (reg, mem);
7632 /* The following function returns TRUE if INSN produces address for a
7633 load/store insn. We will place such insns into M slot because it
7634 decreases its latency time. */
7637 ia64_produce_address_p (rtx insn)
7639 return insn->call;
7643 /* Emit pseudo-ops for the assembler to describe predicate relations.
7644 At present this assumes that we only consider predicate pairs to
7645 be mutex, and that the assembler can deduce proper values from
7646 straight-line code. */
7648 static void
7649 emit_predicate_relation_info (void)
7651 basic_block bb;
7653 FOR_EACH_BB_REVERSE (bb)
7655 int r;
7656 rtx head = BB_HEAD (bb);
7658 /* We only need such notes at code labels. */
7659 if (GET_CODE (head) != CODE_LABEL)
7660 continue;
7661 if (GET_CODE (NEXT_INSN (head)) == NOTE
7662 && NOTE_LINE_NUMBER (NEXT_INSN (head)) == NOTE_INSN_BASIC_BLOCK)
7663 head = NEXT_INSN (head);
7665 /* Skip p0, which may be thought to be live due to (reg:DI p0)
7666 grabbing the entire block of predicate registers. */
7667 for (r = PR_REG (2); r < PR_REG (64); r += 2)
7668 if (REGNO_REG_SET_P (bb->il.rtl->global_live_at_start, r))
7670 rtx p = gen_rtx_REG (BImode, r);
7671 rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
7672 if (head == BB_END (bb))
7673 BB_END (bb) = n;
7674 head = n;
7678 /* Look for conditional calls that do not return, and protect predicate
7679 relations around them. Otherwise the assembler will assume the call
7680 returns, and complain about uses of call-clobbered predicates after
7681 the call. */
7682 FOR_EACH_BB_REVERSE (bb)
7684 rtx insn = BB_HEAD (bb);
7686 while (1)
7688 if (GET_CODE (insn) == CALL_INSN
7689 && GET_CODE (PATTERN (insn)) == COND_EXEC
7690 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
7692 rtx b = emit_insn_before (gen_safe_across_calls_all (), insn);
7693 rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn);
7694 if (BB_HEAD (bb) == insn)
7695 BB_HEAD (bb) = b;
7696 if (BB_END (bb) == insn)
7697 BB_END (bb) = a;
7700 if (insn == BB_END (bb))
7701 break;
7702 insn = NEXT_INSN (insn);
7707 /* Perform machine dependent operations on the rtl chain INSNS. */
7709 static void
7710 ia64_reorg (void)
7712 /* We are freeing block_for_insn in the toplev to keep compatibility
7713 with old MDEP_REORGS that are not CFG based. Recompute it now. */
7714 compute_bb_for_insn ();
7716 /* If optimizing, we'll have split before scheduling. */
7717 if (optimize == 0)
7718 split_all_insns (0);
7720 /* ??? update_life_info_in_dirty_blocks fails to terminate during
7721 non-optimizing bootstrap. */
7722 update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES, PROP_DEATH_NOTES);
7724 if (optimize && ia64_flag_schedule_insns2)
7726 timevar_push (TV_SCHED2);
7727 ia64_final_schedule = 1;
7729 initiate_bundle_states ();
7730 ia64_nop = make_insn_raw (gen_nop ());
7731 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
7732 recog_memoized (ia64_nop);
7733 clocks_length = get_max_uid () + 1;
7734 stops_p = xcalloc (1, clocks_length);
7735 if (ia64_tune == PROCESSOR_ITANIUM)
7737 clocks = xcalloc (clocks_length, sizeof (int));
7738 add_cycles = xcalloc (clocks_length, sizeof (int));
7740 if (ia64_tune == PROCESSOR_ITANIUM2)
7742 pos_1 = get_cpu_unit_code ("2_1");
7743 pos_2 = get_cpu_unit_code ("2_2");
7744 pos_3 = get_cpu_unit_code ("2_3");
7745 pos_4 = get_cpu_unit_code ("2_4");
7746 pos_5 = get_cpu_unit_code ("2_5");
7747 pos_6 = get_cpu_unit_code ("2_6");
7748 _0mii_ = get_cpu_unit_code ("2b_0mii.");
7749 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
7750 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
7751 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
7752 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
7753 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
7754 _0mib_ = get_cpu_unit_code ("2b_0mib.");
7755 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
7756 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
7757 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
7758 _1mii_ = get_cpu_unit_code ("2b_1mii.");
7759 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
7760 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
7761 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
7762 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
7763 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
7764 _1mib_ = get_cpu_unit_code ("2b_1mib.");
7765 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
7766 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
7767 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
7769 else
7771 pos_1 = get_cpu_unit_code ("1_1");
7772 pos_2 = get_cpu_unit_code ("1_2");
7773 pos_3 = get_cpu_unit_code ("1_3");
7774 pos_4 = get_cpu_unit_code ("1_4");
7775 pos_5 = get_cpu_unit_code ("1_5");
7776 pos_6 = get_cpu_unit_code ("1_6");
7777 _0mii_ = get_cpu_unit_code ("1b_0mii.");
7778 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
7779 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
7780 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
7781 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
7782 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
7783 _0mib_ = get_cpu_unit_code ("1b_0mib.");
7784 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
7785 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
7786 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
7787 _1mii_ = get_cpu_unit_code ("1b_1mii.");
7788 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
7789 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
7790 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
7791 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
7792 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
7793 _1mib_ = get_cpu_unit_code ("1b_1mib.");
7794 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
7795 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
7796 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
7798 schedule_ebbs (dump_file);
7799 finish_bundle_states ();
7800 if (ia64_tune == PROCESSOR_ITANIUM)
7802 free (add_cycles);
7803 free (clocks);
7805 free (stops_p);
7806 emit_insn_group_barriers (dump_file);
7808 ia64_final_schedule = 0;
7809 timevar_pop (TV_SCHED2);
7811 else
7812 emit_all_insn_group_barriers (dump_file);
7814 /* A call must not be the last instruction in a function, so that the
7815 return address is still within the function, so that unwinding works
7816 properly. Note that IA-64 differs from dwarf2 on this point. */
7817 if (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
7819 rtx insn;
7820 int saw_stop = 0;
7822 insn = get_last_insn ();
7823 if (! INSN_P (insn))
7824 insn = prev_active_insn (insn);
7825 /* Skip over insns that expand to nothing. */
7826 while (GET_CODE (insn) == INSN && get_attr_empty (insn) == EMPTY_YES)
7828 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
7829 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
7830 saw_stop = 1;
7831 insn = prev_active_insn (insn);
7833 if (GET_CODE (insn) == CALL_INSN)
7835 if (! saw_stop)
7836 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7837 emit_insn (gen_break_f ());
7838 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7842 emit_predicate_relation_info ();
7844 if (ia64_flag_var_tracking)
7846 timevar_push (TV_VAR_TRACKING);
7847 variable_tracking_main ();
7848 timevar_pop (TV_VAR_TRACKING);
7852 /* Return true if REGNO is used by the epilogue. */
7855 ia64_epilogue_uses (int regno)
7857 switch (regno)
7859 case R_GR (1):
7860 /* With a call to a function in another module, we will write a new
7861 value to "gp". After returning from such a call, we need to make
7862 sure the function restores the original gp-value, even if the
7863 function itself does not use the gp anymore. */
7864 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
7866 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
7867 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
7868 /* For functions defined with the syscall_linkage attribute, all
7869 input registers are marked as live at all function exits. This
7870 prevents the register allocator from using the input registers,
7871 which in turn makes it possible to restart a system call after
7872 an interrupt without having to save/restore the input registers.
7873 This also prevents kernel data from leaking to application code. */
7874 return lookup_attribute ("syscall_linkage",
7875 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
7877 case R_BR (0):
7878 /* Conditional return patterns can't represent the use of `b0' as
7879 the return address, so we force the value live this way. */
7880 return 1;
7882 case AR_PFS_REGNUM:
7883 /* Likewise for ar.pfs, which is used by br.ret. */
7884 return 1;
7886 default:
7887 return 0;
7891 /* Return true if REGNO is used by the frame unwinder. */
7894 ia64_eh_uses (int regno)
7896 if (! reload_completed)
7897 return 0;
7899 if (current_frame_info.reg_save_b0
7900 && regno == current_frame_info.reg_save_b0)
7901 return 1;
7902 if (current_frame_info.reg_save_pr
7903 && regno == current_frame_info.reg_save_pr)
7904 return 1;
7905 if (current_frame_info.reg_save_ar_pfs
7906 && regno == current_frame_info.reg_save_ar_pfs)
7907 return 1;
7908 if (current_frame_info.reg_save_ar_unat
7909 && regno == current_frame_info.reg_save_ar_unat)
7910 return 1;
7911 if (current_frame_info.reg_save_ar_lc
7912 && regno == current_frame_info.reg_save_ar_lc)
7913 return 1;
7915 return 0;
7918 /* Return true if this goes in small data/bss. */
7920 /* ??? We could also support own long data here. Generating movl/add/ld8
7921 instead of addl,ld8/ld8. This makes the code bigger, but should make the
7922 code faster because there is one less load. This also includes incomplete
7923 types which can't go in sdata/sbss. */
7925 static bool
7926 ia64_in_small_data_p (tree exp)
7928 if (TARGET_NO_SDATA)
7929 return false;
7931 /* We want to merge strings, so we never consider them small data. */
7932 if (TREE_CODE (exp) == STRING_CST)
7933 return false;
7935 /* Functions are never small data. */
7936 if (TREE_CODE (exp) == FUNCTION_DECL)
7937 return false;
7939 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
7941 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
7943 if (strcmp (section, ".sdata") == 0
7944 || strncmp (section, ".sdata.", 7) == 0
7945 || strncmp (section, ".gnu.linkonce.s.", 16) == 0
7946 || strcmp (section, ".sbss") == 0
7947 || strncmp (section, ".sbss.", 6) == 0
7948 || strncmp (section, ".gnu.linkonce.sb.", 17) == 0)
7949 return true;
7951 else
7953 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
7955 /* If this is an incomplete type with size 0, then we can't put it
7956 in sdata because it might be too big when completed. */
7957 if (size > 0 && size <= ia64_section_threshold)
7958 return true;
7961 return false;
7964 /* Output assembly directives for prologue regions. */
7966 /* The current basic block number. */
7968 static bool last_block;
7970 /* True if we need a copy_state command at the start of the next block. */
7972 static bool need_copy_state;
7974 /* The function emits unwind directives for the start of an epilogue. */
7976 static void
7977 process_epilogue (void)
7979 /* If this isn't the last block of the function, then we need to label the
7980 current state, and copy it back in at the start of the next block. */
7982 if (!last_block)
7984 fprintf (asm_out_file, "\t.label_state %d\n",
7985 ++cfun->machine->state_num);
7986 need_copy_state = true;
7989 fprintf (asm_out_file, "\t.restore sp\n");
7992 /* This function processes a SET pattern looking for specific patterns
7993 which result in emitting an assembly directive required for unwinding. */
7995 static int
7996 process_set (FILE *asm_out_file, rtx pat)
7998 rtx src = SET_SRC (pat);
7999 rtx dest = SET_DEST (pat);
8000 int src_regno, dest_regno;
8002 /* Look for the ALLOC insn. */
8003 if (GET_CODE (src) == UNSPEC_VOLATILE
8004 && XINT (src, 1) == UNSPECV_ALLOC
8005 && GET_CODE (dest) == REG)
8007 dest_regno = REGNO (dest);
8009 /* If this is the final destination for ar.pfs, then this must
8010 be the alloc in the prologue. */
8011 if (dest_regno == current_frame_info.reg_save_ar_pfs)
8012 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
8013 ia64_dbx_register_number (dest_regno));
8014 else
8016 /* This must be an alloc before a sibcall. We must drop the
8017 old frame info. The easiest way to drop the old frame
8018 info is to ensure we had a ".restore sp" directive
8019 followed by a new prologue. If the procedure doesn't
8020 have a memory-stack frame, we'll issue a dummy ".restore
8021 sp" now. */
8022 if (current_frame_info.total_size == 0 && !frame_pointer_needed)
8023 /* if haven't done process_epilogue() yet, do it now */
8024 process_epilogue ();
8025 fprintf (asm_out_file, "\t.prologue\n");
8027 return 1;
8030 /* Look for SP = .... */
8031 if (GET_CODE (dest) == REG && REGNO (dest) == STACK_POINTER_REGNUM)
8033 if (GET_CODE (src) == PLUS)
8035 rtx op0 = XEXP (src, 0);
8036 rtx op1 = XEXP (src, 1);
8038 gcc_assert (op0 == dest && GET_CODE (op1) == CONST_INT);
8040 if (INTVAL (op1) < 0)
8041 fprintf (asm_out_file, "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
8042 -INTVAL (op1));
8043 else
8044 process_epilogue ();
8046 else
8048 gcc_assert (GET_CODE (src) == REG
8049 && REGNO (src) == HARD_FRAME_POINTER_REGNUM);
8050 process_epilogue ();
8053 return 1;
8056 /* Register move we need to look at. */
8057 if (GET_CODE (dest) == REG && GET_CODE (src) == REG)
8059 src_regno = REGNO (src);
8060 dest_regno = REGNO (dest);
8062 switch (src_regno)
8064 case BR_REG (0):
8065 /* Saving return address pointer. */
8066 gcc_assert (dest_regno == current_frame_info.reg_save_b0);
8067 fprintf (asm_out_file, "\t.save rp, r%d\n",
8068 ia64_dbx_register_number (dest_regno));
8069 return 1;
8071 case PR_REG (0):
8072 gcc_assert (dest_regno == current_frame_info.reg_save_pr);
8073 fprintf (asm_out_file, "\t.save pr, r%d\n",
8074 ia64_dbx_register_number (dest_regno));
8075 return 1;
8077 case AR_UNAT_REGNUM:
8078 gcc_assert (dest_regno == current_frame_info.reg_save_ar_unat);
8079 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
8080 ia64_dbx_register_number (dest_regno));
8081 return 1;
8083 case AR_LC_REGNUM:
8084 gcc_assert (dest_regno == current_frame_info.reg_save_ar_lc);
8085 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
8086 ia64_dbx_register_number (dest_regno));
8087 return 1;
8089 case STACK_POINTER_REGNUM:
8090 gcc_assert (dest_regno == HARD_FRAME_POINTER_REGNUM
8091 && frame_pointer_needed);
8092 fprintf (asm_out_file, "\t.vframe r%d\n",
8093 ia64_dbx_register_number (dest_regno));
8094 return 1;
8096 default:
8097 /* Everything else should indicate being stored to memory. */
8098 gcc_unreachable ();
8102 /* Memory store we need to look at. */
8103 if (GET_CODE (dest) == MEM && GET_CODE (src) == REG)
8105 long off;
8106 rtx base;
8107 const char *saveop;
8109 if (GET_CODE (XEXP (dest, 0)) == REG)
8111 base = XEXP (dest, 0);
8112 off = 0;
8114 else
8116 gcc_assert (GET_CODE (XEXP (dest, 0)) == PLUS
8117 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT);
8118 base = XEXP (XEXP (dest, 0), 0);
8119 off = INTVAL (XEXP (XEXP (dest, 0), 1));
8122 if (base == hard_frame_pointer_rtx)
8124 saveop = ".savepsp";
8125 off = - off;
8127 else
8129 gcc_assert (base == stack_pointer_rtx);
8130 saveop = ".savesp";
8133 src_regno = REGNO (src);
8134 switch (src_regno)
8136 case BR_REG (0):
8137 gcc_assert (!current_frame_info.reg_save_b0);
8138 fprintf (asm_out_file, "\t%s rp, %ld\n", saveop, off);
8139 return 1;
8141 case PR_REG (0):
8142 gcc_assert (!current_frame_info.reg_save_pr);
8143 fprintf (asm_out_file, "\t%s pr, %ld\n", saveop, off);
8144 return 1;
8146 case AR_LC_REGNUM:
8147 gcc_assert (!current_frame_info.reg_save_ar_lc);
8148 fprintf (asm_out_file, "\t%s ar.lc, %ld\n", saveop, off);
8149 return 1;
8151 case AR_PFS_REGNUM:
8152 gcc_assert (!current_frame_info.reg_save_ar_pfs);
8153 fprintf (asm_out_file, "\t%s ar.pfs, %ld\n", saveop, off);
8154 return 1;
8156 case AR_UNAT_REGNUM:
8157 gcc_assert (!current_frame_info.reg_save_ar_unat);
8158 fprintf (asm_out_file, "\t%s ar.unat, %ld\n", saveop, off);
8159 return 1;
8161 case GR_REG (4):
8162 case GR_REG (5):
8163 case GR_REG (6):
8164 case GR_REG (7):
8165 fprintf (asm_out_file, "\t.save.g 0x%x\n",
8166 1 << (src_regno - GR_REG (4)));
8167 return 1;
8169 case BR_REG (1):
8170 case BR_REG (2):
8171 case BR_REG (3):
8172 case BR_REG (4):
8173 case BR_REG (5):
8174 fprintf (asm_out_file, "\t.save.b 0x%x\n",
8175 1 << (src_regno - BR_REG (1)));
8176 return 1;
8178 case FR_REG (2):
8179 case FR_REG (3):
8180 case FR_REG (4):
8181 case FR_REG (5):
8182 fprintf (asm_out_file, "\t.save.f 0x%x\n",
8183 1 << (src_regno - FR_REG (2)));
8184 return 1;
8186 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
8187 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
8188 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
8189 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
8190 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
8191 1 << (src_regno - FR_REG (12)));
8192 return 1;
8194 default:
8195 return 0;
8199 return 0;
8203 /* This function looks at a single insn and emits any directives
8204 required to unwind this insn. */
8205 void
8206 process_for_unwind_directive (FILE *asm_out_file, rtx insn)
8208 if (flag_unwind_tables
8209 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
8211 rtx pat;
8213 if (GET_CODE (insn) == NOTE
8214 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
8216 last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
8218 /* Restore unwind state from immediately before the epilogue. */
8219 if (need_copy_state)
8221 fprintf (asm_out_file, "\t.body\n");
8222 fprintf (asm_out_file, "\t.copy_state %d\n",
8223 cfun->machine->state_num);
8224 need_copy_state = false;
8228 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
8229 return;
8231 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
8232 if (pat)
8233 pat = XEXP (pat, 0);
8234 else
8235 pat = PATTERN (insn);
8237 switch (GET_CODE (pat))
8239 case SET:
8240 process_set (asm_out_file, pat);
8241 break;
8243 case PARALLEL:
8245 int par_index;
8246 int limit = XVECLEN (pat, 0);
8247 for (par_index = 0; par_index < limit; par_index++)
8249 rtx x = XVECEXP (pat, 0, par_index);
8250 if (GET_CODE (x) == SET)
8251 process_set (asm_out_file, x);
8253 break;
8256 default:
8257 gcc_unreachable ();
8263 enum ia64_builtins
8265 IA64_BUILTIN_BSP,
8266 IA64_BUILTIN_FLUSHRS
8269 void
8270 ia64_init_builtins (void)
8272 tree fpreg_type;
8273 tree float80_type;
8275 /* The __fpreg type. */
8276 fpreg_type = make_node (REAL_TYPE);
8277 TYPE_PRECISION (fpreg_type) = 82;
8278 layout_type (fpreg_type);
8279 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
8281 /* The __float80 type. */
8282 float80_type = make_node (REAL_TYPE);
8283 TYPE_PRECISION (float80_type) = 80;
8284 layout_type (float80_type);
8285 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
8287 /* The __float128 type. */
8288 if (!TARGET_HPUX)
8290 tree float128_type = make_node (REAL_TYPE);
8291 TYPE_PRECISION (float128_type) = 128;
8292 layout_type (float128_type);
8293 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
8295 else
8296 /* Under HPUX, this is a synonym for "long double". */
8297 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
8298 "__float128");
8300 #define def_builtin(name, type, code) \
8301 lang_hooks.builtin_function ((name), (type), (code), BUILT_IN_MD, \
8302 NULL, NULL_TREE)
8304 def_builtin ("__builtin_ia64_bsp",
8305 build_function_type (ptr_type_node, void_list_node),
8306 IA64_BUILTIN_BSP);
8308 def_builtin ("__builtin_ia64_flushrs",
8309 build_function_type (void_type_node, void_list_node),
8310 IA64_BUILTIN_FLUSHRS);
8312 #undef def_builtin
8316 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
8317 enum machine_mode mode ATTRIBUTE_UNUSED,
8318 int ignore ATTRIBUTE_UNUSED)
8320 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
8321 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
8323 switch (fcode)
8325 case IA64_BUILTIN_BSP:
8326 if (! target || ! register_operand (target, DImode))
8327 target = gen_reg_rtx (DImode);
8328 emit_insn (gen_bsp_value (target));
8329 #ifdef POINTERS_EXTEND_UNSIGNED
8330 target = convert_memory_address (ptr_mode, target);
8331 #endif
8332 return target;
8334 case IA64_BUILTIN_FLUSHRS:
8335 emit_insn (gen_flushrs ());
8336 return const0_rtx;
8338 default:
8339 break;
8342 return NULL_RTX;
8345 /* For the HP-UX IA64 aggregate parameters are passed stored in the
8346 most significant bits of the stack slot. */
8348 enum direction
8349 ia64_hpux_function_arg_padding (enum machine_mode mode, tree type)
8351 /* Exception to normal case for structures/unions/etc. */
8353 if (type && AGGREGATE_TYPE_P (type)
8354 && int_size_in_bytes (type) < UNITS_PER_WORD)
8355 return upward;
8357 /* Fall back to the default. */
8358 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
8361 /* Linked list of all external functions that are to be emitted by GCC.
8362 We output the name if and only if TREE_SYMBOL_REFERENCED is set in
8363 order to avoid putting out names that are never really used. */
8365 struct extern_func_list GTY(())
8367 struct extern_func_list *next;
8368 tree decl;
8371 static GTY(()) struct extern_func_list *extern_func_head;
8373 static void
8374 ia64_hpux_add_extern_decl (tree decl)
8376 struct extern_func_list *p = ggc_alloc (sizeof (struct extern_func_list));
8378 p->decl = decl;
8379 p->next = extern_func_head;
8380 extern_func_head = p;
8383 /* Print out the list of used global functions. */
8385 static void
8386 ia64_hpux_file_end (void)
8388 struct extern_func_list *p;
8390 for (p = extern_func_head; p; p = p->next)
8392 tree decl = p->decl;
8393 tree id = DECL_ASSEMBLER_NAME (decl);
8395 gcc_assert (id);
8397 if (!TREE_ASM_WRITTEN (decl) && TREE_SYMBOL_REFERENCED (id))
8399 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
8401 TREE_ASM_WRITTEN (decl) = 1;
8402 (*targetm.asm_out.globalize_label) (asm_out_file, name);
8403 fputs (TYPE_ASM_OP, asm_out_file);
8404 assemble_name (asm_out_file, name);
8405 fprintf (asm_out_file, "," TYPE_OPERAND_FMT "\n", "function");
8409 extern_func_head = 0;
8412 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
8413 modes of word_mode and larger. Rename the TFmode libfuncs using the
8414 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
8415 backward compatibility. */
8417 static void
8418 ia64_init_libfuncs (void)
8420 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
8421 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
8422 set_optab_libfunc (smod_optab, SImode, "__modsi3");
8423 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
8425 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
8426 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
8427 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
8428 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
8429 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
8431 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
8432 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
8433 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
8434 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
8435 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
8436 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
8438 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
8439 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
8440 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
8441 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
8443 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
8444 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
8445 /* HP-UX 11.23 libc does not have a function for unsigned
8446 SImode-to-TFmode conversion. */
8447 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxuf_dbl_to_quad");
8450 /* Rename all the TFmode libfuncs using the HPUX conventions. */
8452 static void
8453 ia64_hpux_init_libfuncs (void)
8455 ia64_init_libfuncs ();
8457 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
8458 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
8459 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
8461 /* ia64_expand_compare uses this. */
8462 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
8464 /* These should never be used. */
8465 set_optab_libfunc (eq_optab, TFmode, 0);
8466 set_optab_libfunc (ne_optab, TFmode, 0);
8467 set_optab_libfunc (gt_optab, TFmode, 0);
8468 set_optab_libfunc (ge_optab, TFmode, 0);
8469 set_optab_libfunc (lt_optab, TFmode, 0);
8470 set_optab_libfunc (le_optab, TFmode, 0);
8473 /* Rename the division and modulus functions in VMS. */
8475 static void
8476 ia64_vms_init_libfuncs (void)
8478 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
8479 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
8480 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
8481 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
8482 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
8483 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
8484 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
8485 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
8488 /* Rename the TFmode libfuncs available from soft-fp in glibc using
8489 the HPUX conventions. */
8491 static void
8492 ia64_sysv4_init_libfuncs (void)
8494 ia64_init_libfuncs ();
8496 /* These functions are not part of the HPUX TFmode interface. We
8497 use them instead of _U_Qfcmp, which doesn't work the way we
8498 expect. */
8499 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
8500 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
8501 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
8502 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
8503 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
8504 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
8506 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
8507 glibc doesn't have them. */
8510 /* Return the section to use for X. The only special thing we do here
8511 is to honor small data. */
8513 static section *
8514 ia64_select_rtx_section (enum machine_mode mode, rtx x,
8515 unsigned HOST_WIDE_INT align)
8517 if (GET_MODE_SIZE (mode) > 0
8518 && GET_MODE_SIZE (mode) <= ia64_section_threshold)
8519 return sdata_section;
8520 else
8521 return default_elf_select_rtx_section (mode, x, align);
8524 /* It is illegal to have relocations in shared segments on AIX and HPUX.
8525 Pretend flag_pic is always set. */
8527 static section *
8528 ia64_rwreloc_select_section (tree exp, int reloc, unsigned HOST_WIDE_INT align)
8530 return default_elf_select_section_1 (exp, reloc, align, true);
8533 static void
8534 ia64_rwreloc_unique_section (tree decl, int reloc)
8536 default_unique_section_1 (decl, reloc, true);
8539 static section *
8540 ia64_rwreloc_select_rtx_section (enum machine_mode mode, rtx x,
8541 unsigned HOST_WIDE_INT align)
8543 section *sect;
8544 int save_pic = flag_pic;
8545 flag_pic = 1;
8546 sect = ia64_select_rtx_section (mode, x, align);
8547 flag_pic = save_pic;
8548 return sect;
8551 #ifndef TARGET_RWRELOC
8552 #define TARGET_RWRELOC flag_pic
8553 #endif
8555 static unsigned int
8556 ia64_section_type_flags (tree decl, const char *name, int reloc)
8558 unsigned int flags = 0;
8560 if (strcmp (name, ".sdata") == 0
8561 || strncmp (name, ".sdata.", 7) == 0
8562 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
8563 || strncmp (name, ".sdata2.", 8) == 0
8564 || strncmp (name, ".gnu.linkonce.s2.", 17) == 0
8565 || strcmp (name, ".sbss") == 0
8566 || strncmp (name, ".sbss.", 6) == 0
8567 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
8568 flags = SECTION_SMALL;
8570 flags |= default_section_type_flags_1 (decl, name, reloc, TARGET_RWRELOC);
8571 return flags;
8574 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
8575 structure type and that the address of that type should be passed
8576 in out0, rather than in r8. */
8578 static bool
8579 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
8581 tree ret_type = TREE_TYPE (fntype);
8583 /* The Itanium C++ ABI requires that out0, rather than r8, be used
8584 as the structure return address parameter, if the return value
8585 type has a non-trivial copy constructor or destructor. It is not
8586 clear if this same convention should be used for other
8587 programming languages. Until G++ 3.4, we incorrectly used r8 for
8588 these return values. */
8589 return (abi_version_at_least (2)
8590 && ret_type
8591 && TYPE_MODE (ret_type) == BLKmode
8592 && TREE_ADDRESSABLE (ret_type)
8593 && strcmp (lang_hooks.name, "GNU C++") == 0);
8596 /* Output the assembler code for a thunk function. THUNK_DECL is the
8597 declaration for the thunk function itself, FUNCTION is the decl for
8598 the target function. DELTA is an immediate constant offset to be
8599 added to THIS. If VCALL_OFFSET is nonzero, the word at
8600 *(*this + vcall_offset) should be added to THIS. */
8602 static void
8603 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
8604 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8605 tree function)
8607 rtx this, insn, funexp;
8608 unsigned int this_parmno;
8609 unsigned int this_regno;
8611 reload_completed = 1;
8612 epilogue_completed = 1;
8613 no_new_pseudos = 1;
8614 reset_block_changes ();
8616 /* Set things up as ia64_expand_prologue might. */
8617 last_scratch_gr_reg = 15;
8619 memset (&current_frame_info, 0, sizeof (current_frame_info));
8620 current_frame_info.spill_cfa_off = -16;
8621 current_frame_info.n_input_regs = 1;
8622 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
8624 /* Mark the end of the (empty) prologue. */
8625 emit_note (NOTE_INSN_PROLOGUE_END);
8627 /* Figure out whether "this" will be the first parameter (the
8628 typical case) or the second parameter (as happens when the
8629 virtual function returns certain class objects). */
8630 this_parmno
8631 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
8632 ? 1 : 0);
8633 this_regno = IN_REG (this_parmno);
8634 if (!TARGET_REG_NAMES)
8635 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
8637 this = gen_rtx_REG (Pmode, this_regno);
8638 if (TARGET_ILP32)
8640 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
8641 REG_POINTER (tmp) = 1;
8642 if (delta && CONST_OK_FOR_I (delta))
8644 emit_insn (gen_ptr_extend_plus_imm (this, tmp, GEN_INT (delta)));
8645 delta = 0;
8647 else
8648 emit_insn (gen_ptr_extend (this, tmp));
8651 /* Apply the constant offset, if required. */
8652 if (delta)
8654 rtx delta_rtx = GEN_INT (delta);
8656 if (!CONST_OK_FOR_I (delta))
8658 rtx tmp = gen_rtx_REG (Pmode, 2);
8659 emit_move_insn (tmp, delta_rtx);
8660 delta_rtx = tmp;
8662 emit_insn (gen_adddi3 (this, this, delta_rtx));
8665 /* Apply the offset from the vtable, if required. */
8666 if (vcall_offset)
8668 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8669 rtx tmp = gen_rtx_REG (Pmode, 2);
8671 if (TARGET_ILP32)
8673 rtx t = gen_rtx_REG (ptr_mode, 2);
8674 REG_POINTER (t) = 1;
8675 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this));
8676 if (CONST_OK_FOR_I (vcall_offset))
8678 emit_insn (gen_ptr_extend_plus_imm (tmp, t,
8679 vcall_offset_rtx));
8680 vcall_offset = 0;
8682 else
8683 emit_insn (gen_ptr_extend (tmp, t));
8685 else
8686 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8688 if (vcall_offset)
8690 if (!CONST_OK_FOR_J (vcall_offset))
8692 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
8693 emit_move_insn (tmp2, vcall_offset_rtx);
8694 vcall_offset_rtx = tmp2;
8696 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
8699 if (TARGET_ILP32)
8700 emit_move_insn (gen_rtx_REG (ptr_mode, 2),
8701 gen_rtx_MEM (ptr_mode, tmp));
8702 else
8703 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
8705 emit_insn (gen_adddi3 (this, this, tmp));
8708 /* Generate a tail call to the target function. */
8709 if (! TREE_USED (function))
8711 assemble_external (function);
8712 TREE_USED (function) = 1;
8714 funexp = XEXP (DECL_RTL (function), 0);
8715 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8716 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
8717 insn = get_last_insn ();
8718 SIBLING_CALL_P (insn) = 1;
8720 /* Code generation for calls relies on splitting. */
8721 reload_completed = 1;
8722 epilogue_completed = 1;
8723 try_split (PATTERN (insn), insn, 0);
8725 emit_barrier ();
8727 /* Run just enough of rest_of_compilation to get the insns emitted.
8728 There's not really enough bulk here to make other passes such as
8729 instruction scheduling worth while. Note that use_thunk calls
8730 assemble_start_function and assemble_end_function. */
8732 insn_locators_initialize ();
8733 emit_all_insn_group_barriers (NULL);
8734 insn = get_insns ();
8735 shorten_branches (insn);
8736 final_start_function (insn, file, 1);
8737 final (insn, file, 1);
8738 final_end_function ();
8740 reload_completed = 0;
8741 epilogue_completed = 0;
8742 no_new_pseudos = 0;
8745 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
8747 static rtx
8748 ia64_struct_value_rtx (tree fntype,
8749 int incoming ATTRIBUTE_UNUSED)
8751 if (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype))
8752 return NULL_RTX;
8753 return gen_rtx_REG (Pmode, GR_REG (8));
8756 static bool
8757 ia64_scalar_mode_supported_p (enum machine_mode mode)
8759 switch (mode)
8761 case QImode:
8762 case HImode:
8763 case SImode:
8764 case DImode:
8765 case TImode:
8766 return true;
8768 case SFmode:
8769 case DFmode:
8770 case XFmode:
8771 case RFmode:
8772 return true;
8774 case TFmode:
8775 return TARGET_HPUX;
8777 default:
8778 return false;
8782 static bool
8783 ia64_vector_mode_supported_p (enum machine_mode mode)
8785 switch (mode)
8787 case V8QImode:
8788 case V4HImode:
8789 case V2SImode:
8790 return true;
8792 case V2SFmode:
8793 return true;
8795 default:
8796 return false;
8800 /* Implement the FUNCTION_PROFILER macro. */
8802 void
8803 ia64_output_function_profiler (FILE *file, int labelno)
8805 bool indirect_call;
8807 /* If the function needs a static chain and the static chain
8808 register is r15, we use an indirect call so as to bypass
8809 the PLT stub in case the executable is dynamically linked,
8810 because the stub clobbers r15 as per 5.3.6 of the psABI.
8811 We don't need to do that in non canonical PIC mode. */
8813 if (cfun->static_chain_decl && !TARGET_NO_PIC && !TARGET_AUTO_PIC)
8815 gcc_assert (STATIC_CHAIN_REGNUM == 15);
8816 indirect_call = true;
8818 else
8819 indirect_call = false;
8821 if (TARGET_GNU_AS)
8822 fputs ("\t.prologue 4, r40\n", file);
8823 else
8824 fputs ("\t.prologue\n\t.save ar.pfs, r40\n", file);
8825 fputs ("\talloc out0 = ar.pfs, 8, 0, 4, 0\n", file);
8827 if (NO_PROFILE_COUNTERS)
8828 fputs ("\tmov out3 = r0\n", file);
8829 else
8831 char buf[20];
8832 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
8834 if (TARGET_AUTO_PIC)
8835 fputs ("\tmovl out3 = @gprel(", file);
8836 else
8837 fputs ("\taddl out3 = @ltoff(", file);
8838 assemble_name (file, buf);
8839 if (TARGET_AUTO_PIC)
8840 fputs (")\n", file);
8841 else
8842 fputs ("), r1\n", file);
8845 if (indirect_call)
8846 fputs ("\taddl r14 = @ltoff(@fptr(_mcount)), r1\n", file);
8847 fputs ("\t;;\n", file);
8849 fputs ("\t.save rp, r42\n", file);
8850 fputs ("\tmov out2 = b0\n", file);
8851 if (indirect_call)
8852 fputs ("\tld8 r14 = [r14]\n\t;;\n", file);
8853 fputs ("\t.body\n", file);
8854 fputs ("\tmov out1 = r1\n", file);
8855 if (indirect_call)
8857 fputs ("\tld8 r16 = [r14], 8\n\t;;\n", file);
8858 fputs ("\tmov b6 = r16\n", file);
8859 fputs ("\tld8 r1 = [r14]\n", file);
8860 fputs ("\tbr.call.sptk.many b0 = b6\n\t;;\n", file);
8862 else
8863 fputs ("\tbr.call.sptk.many b0 = _mcount\n\t;;\n", file);
8866 static GTY(()) rtx mcount_func_rtx;
8867 static rtx
8868 gen_mcount_func_rtx (void)
8870 if (!mcount_func_rtx)
8871 mcount_func_rtx = init_one_libfunc ("_mcount");
8872 return mcount_func_rtx;
8875 void
8876 ia64_profile_hook (int labelno)
8878 rtx label, ip;
8880 if (NO_PROFILE_COUNTERS)
8881 label = const0_rtx;
8882 else
8884 char buf[30];
8885 const char *label_name;
8886 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
8887 label_name = (*targetm.strip_name_encoding) (ggc_strdup (buf));
8888 label = gen_rtx_SYMBOL_REF (Pmode, label_name);
8889 SYMBOL_REF_FLAGS (label) = SYMBOL_FLAG_LOCAL;
8891 ip = gen_reg_rtx (Pmode);
8892 emit_insn (gen_ip_value (ip));
8893 emit_library_call (gen_mcount_func_rtx (), LCT_NORMAL,
8894 VOIDmode, 3,
8895 gen_rtx_REG (Pmode, BR_REG (0)), Pmode,
8896 ip, Pmode,
8897 label, Pmode);
8900 /* Return the mangling of TYPE if it is an extended fundamental type. */
8902 static const char *
8903 ia64_mangle_fundamental_type (tree type)
8905 /* On HP-UX, "long double" is mangled as "e" so __float128 is
8906 mangled as "e". */
8907 if (!TARGET_HPUX && TYPE_MODE (type) == TFmode)
8908 return "g";
8909 /* On HP-UX, "e" is not available as a mangling of __float80 so use
8910 an extended mangling. Elsewhere, "e" is available since long
8911 double is 80 bits. */
8912 if (TYPE_MODE (type) == XFmode)
8913 return TARGET_HPUX ? "u9__float80" : "e";
8914 if (TYPE_MODE (type) == RFmode)
8915 return "u7__fpreg";
8916 return NULL;
8919 /* Return the diagnostic message string if conversion from FROMTYPE to
8920 TOTYPE is not allowed, NULL otherwise. */
8921 static const char *
8922 ia64_invalid_conversion (tree fromtype, tree totype)
8924 /* Reject nontrivial conversion to or from __fpreg. */
8925 if (TYPE_MODE (fromtype) == RFmode
8926 && TYPE_MODE (totype) != RFmode
8927 && TYPE_MODE (totype) != VOIDmode)
8928 return N_("invalid conversion from %<__fpreg%>");
8929 if (TYPE_MODE (totype) == RFmode
8930 && TYPE_MODE (fromtype) != RFmode)
8931 return N_("invalid conversion to %<__fpreg%>");
8932 return NULL;
8935 /* Return the diagnostic message string if the unary operation OP is
8936 not permitted on TYPE, NULL otherwise. */
8937 static const char *
8938 ia64_invalid_unary_op (int op, tree type)
8940 /* Reject operations on __fpreg other than unary + or &. */
8941 if (TYPE_MODE (type) == RFmode
8942 && op != CONVERT_EXPR
8943 && op != ADDR_EXPR)
8944 return N_("invalid operation on %<__fpreg%>");
8945 return NULL;
8948 /* Return the diagnostic message string if the binary operation OP is
8949 not permitted on TYPE1 and TYPE2, NULL otherwise. */
8950 static const char *
8951 ia64_invalid_binary_op (int op ATTRIBUTE_UNUSED, tree type1, tree type2)
8953 /* Reject operations on __fpreg. */
8954 if (TYPE_MODE (type1) == RFmode || TYPE_MODE (type2) == RFmode)
8955 return N_("invalid operation on %<__fpreg%>");
8956 return NULL;
8959 #include "gt-ia64.h"