2004-06-08 Vladimir Makarov <vmakarov@redhat.com>
[official-gcc.git] / gcc / config / ia64 / ia64.c
blob0891ed23cd8f537088e8db09a3299ccf83593b05
1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004
3 Free Software Foundation, Inc.
4 Contributed by James E. Wilson <wilson@cygnus.com> and
5 David Mosberger <davidm@hpl.hp.com>.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
12 any later version.
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to
21 the Free Software Foundation, 59 Temple Place - Suite 330,
22 Boston, MA 02111-1307, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "recog.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "except.h"
42 #include "function.h"
43 #include "ggc.h"
44 #include "basic-block.h"
45 #include "toplev.h"
46 #include "sched-int.h"
47 #include "timevar.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "tm_p.h"
51 #include "hashtab.h"
52 #include "langhooks.h"
53 #include "cfglayout.h"
55 /* This is used for communication between ASM_OUTPUT_LABEL and
56 ASM_OUTPUT_LABELREF. */
57 int ia64_asm_output_label = 0;
59 /* Define the information needed to generate branch and scc insns. This is
60 stored from the compare operation. */
61 struct rtx_def * ia64_compare_op0;
62 struct rtx_def * ia64_compare_op1;
64 /* Register names for ia64_expand_prologue. */
65 static const char * const ia64_reg_numbers[96] =
66 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
67 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
68 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
69 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
70 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
71 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
72 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
73 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
74 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
75 "r104","r105","r106","r107","r108","r109","r110","r111",
76 "r112","r113","r114","r115","r116","r117","r118","r119",
77 "r120","r121","r122","r123","r124","r125","r126","r127"};
79 /* ??? These strings could be shared with REGISTER_NAMES. */
80 static const char * const ia64_input_reg_names[8] =
81 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
83 /* ??? These strings could be shared with REGISTER_NAMES. */
84 static const char * const ia64_local_reg_names[80] =
85 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
86 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
87 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
88 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
89 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
90 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
91 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
92 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
93 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
94 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
96 /* ??? These strings could be shared with REGISTER_NAMES. */
97 static const char * const ia64_output_reg_names[8] =
98 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
100 /* String used with the -mfixed-range= option. */
101 const char *ia64_fixed_range_string;
103 /* Determines whether we use adds, addl, or movl to generate our
104 TLS immediate offsets. */
105 int ia64_tls_size = 22;
107 /* String used with the -mtls-size= option. */
108 const char *ia64_tls_size_string;
110 /* Which cpu are we scheduling for. */
111 enum processor_type ia64_tune;
113 /* String used with the -tune= option. */
114 const char *ia64_tune_string;
116 /* Determines whether we run our final scheduling pass or not. We always
117 avoid the normal second scheduling pass. */
118 static int ia64_flag_schedule_insns2;
120 /* Determines whether we run variable tracking in machine dependent
121 reorganization. */
122 static int ia64_flag_var_tracking;
124 /* Variables which are this size or smaller are put in the sdata/sbss
125 sections. */
127 unsigned int ia64_section_threshold;
129 /* The following variable is used by the DFA insn scheduler. The value is
130 TRUE if we do insn bundling instead of insn scheduling. */
131 int bundling_p = 0;
133 /* Structure to be filled in by ia64_compute_frame_size with register
134 save masks and offsets for the current function. */
136 struct ia64_frame_info
138 HOST_WIDE_INT total_size; /* size of the stack frame, not including
139 the caller's scratch area. */
140 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
141 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
142 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
143 HARD_REG_SET mask; /* mask of saved registers. */
144 unsigned int gr_used_mask; /* mask of registers in use as gr spill
145 registers or long-term scratches. */
146 int n_spilled; /* number of spilled registers. */
147 int reg_fp; /* register for fp. */
148 int reg_save_b0; /* save register for b0. */
149 int reg_save_pr; /* save register for prs. */
150 int reg_save_ar_pfs; /* save register for ar.pfs. */
151 int reg_save_ar_unat; /* save register for ar.unat. */
152 int reg_save_ar_lc; /* save register for ar.lc. */
153 int reg_save_gp; /* save register for gp. */
154 int n_input_regs; /* number of input registers used. */
155 int n_local_regs; /* number of local registers used. */
156 int n_output_regs; /* number of output registers used. */
157 int n_rotate_regs; /* number of rotating registers used. */
159 char need_regstk; /* true if a .regstk directive needed. */
160 char initialized; /* true if the data is finalized. */
163 /* Current frame information calculated by ia64_compute_frame_size. */
164 static struct ia64_frame_info current_frame_info;
166 static int ia64_use_dfa_pipeline_interface (void);
167 static int ia64_first_cycle_multipass_dfa_lookahead (void);
168 static void ia64_dependencies_evaluation_hook (rtx, rtx);
169 static void ia64_init_dfa_pre_cycle_insn (void);
170 static rtx ia64_dfa_pre_cycle_insn (void);
171 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
172 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
173 static rtx gen_tls_get_addr (void);
174 static rtx gen_thread_pointer (void);
175 static rtx ia64_expand_tls_address (enum tls_model, rtx, rtx);
176 static int find_gr_spill (int);
177 static int next_scratch_gr_reg (void);
178 static void mark_reg_gr_used_mask (rtx, void *);
179 static void ia64_compute_frame_size (HOST_WIDE_INT);
180 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
181 static void finish_spill_pointers (void);
182 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
183 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
184 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
185 static rtx gen_movdi_x (rtx, rtx, rtx);
186 static rtx gen_fr_spill_x (rtx, rtx, rtx);
187 static rtx gen_fr_restore_x (rtx, rtx, rtx);
189 static enum machine_mode hfa_element_mode (tree, int);
190 static void ia64_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
191 tree, int *, int);
192 static bool ia64_function_ok_for_sibcall (tree, tree);
193 static bool ia64_return_in_memory (tree, tree);
194 static bool ia64_rtx_costs (rtx, int, int, int *);
195 static void fix_range (const char *);
196 static struct machine_function * ia64_init_machine_status (void);
197 static void emit_insn_group_barriers (FILE *);
198 static void emit_all_insn_group_barriers (FILE *);
199 static void final_emit_insn_group_barriers (FILE *);
200 static void emit_predicate_relation_info (void);
201 static void ia64_reorg (void);
202 static bool ia64_in_small_data_p (tree);
203 static void process_epilogue (void);
204 static int process_set (FILE *, rtx);
206 static rtx ia64_expand_fetch_and_op (optab, enum machine_mode, tree, rtx);
207 static rtx ia64_expand_op_and_fetch (optab, enum machine_mode, tree, rtx);
208 static rtx ia64_expand_compare_and_swap (enum machine_mode, enum machine_mode,
209 int, tree, rtx);
210 static rtx ia64_expand_lock_test_and_set (enum machine_mode, tree, rtx);
211 static rtx ia64_expand_lock_release (enum machine_mode, tree, rtx);
212 static bool ia64_assemble_integer (rtx, unsigned int, int);
213 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
214 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
215 static void ia64_output_function_end_prologue (FILE *);
217 static int ia64_issue_rate (void);
218 static int ia64_adjust_cost (rtx, rtx, rtx, int);
219 static void ia64_sched_init (FILE *, int, int);
220 static void ia64_sched_finish (FILE *, int);
221 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
222 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
223 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
224 static int ia64_variable_issue (FILE *, int, rtx, int);
226 static struct bundle_state *get_free_bundle_state (void);
227 static void free_bundle_state (struct bundle_state *);
228 static void initiate_bundle_states (void);
229 static void finish_bundle_states (void);
230 static unsigned bundle_state_hash (const void *);
231 static int bundle_state_eq_p (const void *, const void *);
232 static int insert_bundle_state (struct bundle_state *);
233 static void initiate_bundle_state_table (void);
234 static void finish_bundle_state_table (void);
235 static int try_issue_nops (struct bundle_state *, int);
236 static int try_issue_insn (struct bundle_state *, rtx);
237 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
238 static int get_max_pos (state_t);
239 static int get_template (state_t, int);
241 static rtx get_next_important_insn (rtx, rtx);
242 static void bundling (FILE *, int, rtx, rtx);
244 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
245 HOST_WIDE_INT, tree);
246 static void ia64_file_start (void);
248 static void ia64_select_rtx_section (enum machine_mode, rtx,
249 unsigned HOST_WIDE_INT);
250 static void ia64_rwreloc_select_section (tree, int, unsigned HOST_WIDE_INT)
251 ATTRIBUTE_UNUSED;
252 static void ia64_rwreloc_unique_section (tree, int)
253 ATTRIBUTE_UNUSED;
254 static void ia64_rwreloc_select_rtx_section (enum machine_mode, rtx,
255 unsigned HOST_WIDE_INT)
256 ATTRIBUTE_UNUSED;
257 static unsigned int ia64_rwreloc_section_type_flags (tree, const char *, int)
258 ATTRIBUTE_UNUSED;
260 static void ia64_hpux_add_extern_decl (tree decl)
261 ATTRIBUTE_UNUSED;
262 static void ia64_hpux_file_end (void)
263 ATTRIBUTE_UNUSED;
264 static void ia64_init_libfuncs (void)
265 ATTRIBUTE_UNUSED;
266 static void ia64_hpux_init_libfuncs (void)
267 ATTRIBUTE_UNUSED;
268 static void ia64_sysv4_init_libfuncs (void)
269 ATTRIBUTE_UNUSED;
270 static void ia64_vms_init_libfuncs (void)
271 ATTRIBUTE_UNUSED;
273 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
274 static void ia64_encode_section_info (tree, rtx, int);
275 static rtx ia64_struct_value_rtx (tree, int);
278 /* Table of valid machine attributes. */
279 static const struct attribute_spec ia64_attribute_table[] =
281 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
282 { "syscall_linkage", 0, 0, false, true, true, NULL },
283 { "model", 1, 1, true, false, false, ia64_handle_model_attribute },
284 { NULL, 0, 0, false, false, false, NULL }
287 /* Initialize the GCC target structure. */
288 #undef TARGET_ATTRIBUTE_TABLE
289 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
291 #undef TARGET_INIT_BUILTINS
292 #define TARGET_INIT_BUILTINS ia64_init_builtins
294 #undef TARGET_EXPAND_BUILTIN
295 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
297 #undef TARGET_ASM_BYTE_OP
298 #define TARGET_ASM_BYTE_OP "\tdata1\t"
299 #undef TARGET_ASM_ALIGNED_HI_OP
300 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
301 #undef TARGET_ASM_ALIGNED_SI_OP
302 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
303 #undef TARGET_ASM_ALIGNED_DI_OP
304 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
305 #undef TARGET_ASM_UNALIGNED_HI_OP
306 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
307 #undef TARGET_ASM_UNALIGNED_SI_OP
308 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
309 #undef TARGET_ASM_UNALIGNED_DI_OP
310 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
311 #undef TARGET_ASM_INTEGER
312 #define TARGET_ASM_INTEGER ia64_assemble_integer
314 #undef TARGET_ASM_FUNCTION_PROLOGUE
315 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
316 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
317 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
318 #undef TARGET_ASM_FUNCTION_EPILOGUE
319 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
321 #undef TARGET_IN_SMALL_DATA_P
322 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
324 #undef TARGET_SCHED_ADJUST_COST
325 #define TARGET_SCHED_ADJUST_COST ia64_adjust_cost
326 #undef TARGET_SCHED_ISSUE_RATE
327 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
328 #undef TARGET_SCHED_VARIABLE_ISSUE
329 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
330 #undef TARGET_SCHED_INIT
331 #define TARGET_SCHED_INIT ia64_sched_init
332 #undef TARGET_SCHED_FINISH
333 #define TARGET_SCHED_FINISH ia64_sched_finish
334 #undef TARGET_SCHED_REORDER
335 #define TARGET_SCHED_REORDER ia64_sched_reorder
336 #undef TARGET_SCHED_REORDER2
337 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
339 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
340 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
342 #undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE
343 #define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE ia64_use_dfa_pipeline_interface
345 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
346 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
348 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
349 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
350 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
351 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
353 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
354 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
355 ia64_first_cycle_multipass_dfa_lookahead_guard
357 #undef TARGET_SCHED_DFA_NEW_CYCLE
358 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
360 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
361 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
363 #undef TARGET_ASM_OUTPUT_MI_THUNK
364 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
365 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
366 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
368 #undef TARGET_ASM_FILE_START
369 #define TARGET_ASM_FILE_START ia64_file_start
371 #undef TARGET_RTX_COSTS
372 #define TARGET_RTX_COSTS ia64_rtx_costs
373 #undef TARGET_ADDRESS_COST
374 #define TARGET_ADDRESS_COST hook_int_rtx_0
376 #undef TARGET_MACHINE_DEPENDENT_REORG
377 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
379 #undef TARGET_ENCODE_SECTION_INFO
380 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
382 /* ??? ABI doesn't allow us to define this. */
383 #if 0
384 #undef TARGET_PROMOTE_FUNCTION_ARGS
385 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
386 #endif
388 /* ??? ABI doesn't allow us to define this. */
389 #if 0
390 #undef TARGET_PROMOTE_FUNCTION_RETURN
391 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
392 #endif
394 /* ??? Investigate. */
395 #if 0
396 #undef TARGET_PROMOTE_PROTOTYPES
397 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
398 #endif
400 #undef TARGET_STRUCT_VALUE_RTX
401 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
402 #undef TARGET_RETURN_IN_MEMORY
403 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
405 #undef TARGET_SETUP_INCOMING_VARARGS
406 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
407 #undef TARGET_STRICT_ARGUMENT_NAMING
408 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
410 struct gcc_target targetm = TARGET_INITIALIZER;
412 /* Return 1 if OP is a valid operand for the MEM of a CALL insn. */
415 call_operand (rtx op, enum machine_mode mode)
417 if (mode != GET_MODE (op) && mode != VOIDmode)
418 return 0;
420 return (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == REG
421 || (GET_CODE (op) == SUBREG && GET_CODE (XEXP (op, 0)) == REG));
424 /* Return 1 if OP refers to a symbol in the sdata section. */
427 sdata_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
429 switch (GET_CODE (op))
431 case CONST:
432 if (GET_CODE (XEXP (op, 0)) != PLUS
433 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF)
434 break;
435 op = XEXP (XEXP (op, 0), 0);
436 /* FALLTHRU */
438 case SYMBOL_REF:
439 if (CONSTANT_POOL_ADDRESS_P (op))
440 return GET_MODE_SIZE (get_pool_mode (op)) <= ia64_section_threshold;
441 else
442 return SYMBOL_REF_LOCAL_P (op) && SYMBOL_REF_SMALL_P (op);
444 default:
445 break;
448 return 0;
452 small_addr_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
454 return SYMBOL_REF_SMALL_ADDR_P (op);
457 /* Return 1 if OP refers to a symbol, and is appropriate for a GOT load. */
460 got_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
462 switch (GET_CODE (op))
464 case CONST:
465 op = XEXP (op, 0);
466 if (GET_CODE (op) != PLUS)
467 return 0;
468 if (GET_CODE (XEXP (op, 0)) != SYMBOL_REF)
469 return 0;
470 op = XEXP (op, 1);
471 if (GET_CODE (op) != CONST_INT)
472 return 0;
474 return 1;
476 /* Ok if we're not using GOT entries at all. */
477 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
478 return 1;
480 /* "Ok" while emitting rtl, since otherwise we won't be provided
481 with the entire offset during emission, which makes it very
482 hard to split the offset into high and low parts. */
483 if (rtx_equal_function_value_matters)
484 return 1;
486 /* Force the low 14 bits of the constant to zero so that we do not
487 use up so many GOT entries. */
488 return (INTVAL (op) & 0x3fff) == 0;
490 case SYMBOL_REF:
491 if (SYMBOL_REF_SMALL_ADDR_P (op))
492 return 0;
493 case LABEL_REF:
494 return 1;
496 default:
497 break;
499 return 0;
502 /* Return 1 if OP refers to a symbol. */
505 symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
507 switch (GET_CODE (op))
509 case CONST:
510 case SYMBOL_REF:
511 case LABEL_REF:
512 return 1;
514 default:
515 break;
517 return 0;
520 /* Return tls_model if OP refers to a TLS symbol. */
523 tls_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
525 if (GET_CODE (op) != SYMBOL_REF)
526 return 0;
527 return SYMBOL_REF_TLS_MODEL (op);
531 /* Return 1 if OP refers to a function. */
534 function_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
536 if (GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (op))
537 return 1;
538 else
539 return 0;
542 /* Return 1 if OP is setjmp or a similar function. */
544 /* ??? This is an unsatisfying solution. Should rethink. */
547 setjmp_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
549 const char *name;
550 int retval = 0;
552 if (GET_CODE (op) != SYMBOL_REF)
553 return 0;
555 name = XSTR (op, 0);
557 /* The following code is borrowed from special_function_p in calls.c. */
559 /* Disregard prefix _, __ or __x. */
560 if (name[0] == '_')
562 if (name[1] == '_' && name[2] == 'x')
563 name += 3;
564 else if (name[1] == '_')
565 name += 2;
566 else
567 name += 1;
570 if (name[0] == 's')
572 retval
573 = ((name[1] == 'e'
574 && (! strcmp (name, "setjmp")
575 || ! strcmp (name, "setjmp_syscall")))
576 || (name[1] == 'i'
577 && ! strcmp (name, "sigsetjmp"))
578 || (name[1] == 'a'
579 && ! strcmp (name, "savectx")));
581 else if ((name[0] == 'q' && name[1] == 's'
582 && ! strcmp (name, "qsetjmp"))
583 || (name[0] == 'v' && name[1] == 'f'
584 && ! strcmp (name, "vfork")))
585 retval = 1;
587 return retval;
590 /* Return 1 if OP is a general operand, excluding tls symbolic operands. */
593 move_operand (rtx op, enum machine_mode mode)
595 return general_operand (op, mode) && !tls_symbolic_operand (op, mode);
598 /* Return 1 if OP is a register operand that is (or could be) a GR reg. */
601 gr_register_operand (rtx op, enum machine_mode mode)
603 if (! register_operand (op, mode))
604 return 0;
605 if (GET_CODE (op) == SUBREG)
606 op = SUBREG_REG (op);
607 if (GET_CODE (op) == REG)
609 unsigned int regno = REGNO (op);
610 if (regno < FIRST_PSEUDO_REGISTER)
611 return GENERAL_REGNO_P (regno);
613 return 1;
616 /* Return 1 if OP is a register operand that is (or could be) an FR reg. */
619 fr_register_operand (rtx op, enum machine_mode mode)
621 if (! register_operand (op, mode))
622 return 0;
623 if (GET_CODE (op) == SUBREG)
624 op = SUBREG_REG (op);
625 if (GET_CODE (op) == REG)
627 unsigned int regno = REGNO (op);
628 if (regno < FIRST_PSEUDO_REGISTER)
629 return FR_REGNO_P (regno);
631 return 1;
634 /* Return 1 if OP is a register operand that is (or could be) a GR/FR reg. */
637 grfr_register_operand (rtx op, enum machine_mode mode)
639 if (! register_operand (op, mode))
640 return 0;
641 if (GET_CODE (op) == SUBREG)
642 op = SUBREG_REG (op);
643 if (GET_CODE (op) == REG)
645 unsigned int regno = REGNO (op);
646 if (regno < FIRST_PSEUDO_REGISTER)
647 return GENERAL_REGNO_P (regno) || FR_REGNO_P (regno);
649 return 1;
652 /* Return 1 if OP is a nonimmediate operand that is (or could be) a GR reg. */
655 gr_nonimmediate_operand (rtx op, enum machine_mode mode)
657 if (! nonimmediate_operand (op, mode))
658 return 0;
659 if (GET_CODE (op) == SUBREG)
660 op = SUBREG_REG (op);
661 if (GET_CODE (op) == REG)
663 unsigned int regno = REGNO (op);
664 if (regno < FIRST_PSEUDO_REGISTER)
665 return GENERAL_REGNO_P (regno);
667 return 1;
670 /* Return 1 if OP is a nonimmediate operand that is (or could be) a FR reg. */
673 fr_nonimmediate_operand (rtx op, enum machine_mode mode)
675 if (! nonimmediate_operand (op, mode))
676 return 0;
677 if (GET_CODE (op) == SUBREG)
678 op = SUBREG_REG (op);
679 if (GET_CODE (op) == REG)
681 unsigned int regno = REGNO (op);
682 if (regno < FIRST_PSEUDO_REGISTER)
683 return FR_REGNO_P (regno);
685 return 1;
688 /* Return 1 if OP is a nonimmediate operand that is a GR/FR reg. */
691 grfr_nonimmediate_operand (rtx op, enum machine_mode mode)
693 if (! nonimmediate_operand (op, mode))
694 return 0;
695 if (GET_CODE (op) == SUBREG)
696 op = SUBREG_REG (op);
697 if (GET_CODE (op) == REG)
699 unsigned int regno = REGNO (op);
700 if (regno < FIRST_PSEUDO_REGISTER)
701 return GENERAL_REGNO_P (regno) || FR_REGNO_P (regno);
703 return 1;
706 /* Return 1 if OP is a GR register operand, or zero. */
709 gr_reg_or_0_operand (rtx op, enum machine_mode mode)
711 return (op == const0_rtx || gr_register_operand (op, mode));
714 /* Return 1 if OP is a GR register operand, or a 5 bit immediate operand. */
717 gr_reg_or_5bit_operand (rtx op, enum machine_mode mode)
719 return ((GET_CODE (op) == CONST_INT && INTVAL (op) >= 0 && INTVAL (op) < 32)
720 || gr_register_operand (op, mode));
723 /* Return 1 if OP is a GR register operand, or a 6 bit immediate operand. */
726 gr_reg_or_6bit_operand (rtx op, enum machine_mode mode)
728 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (INTVAL (op)))
729 || gr_register_operand (op, mode));
732 /* Return 1 if OP is a GR register operand, or an 8 bit immediate operand. */
735 gr_reg_or_8bit_operand (rtx op, enum machine_mode mode)
737 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op)))
738 || gr_register_operand (op, mode));
741 /* Return 1 if OP is a GR/FR register operand, or an 8 bit immediate. */
744 grfr_reg_or_8bit_operand (rtx op, enum machine_mode mode)
746 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op)))
747 || grfr_register_operand (op, mode));
750 /* Return 1 if OP is a register operand, or an 8 bit adjusted immediate
751 operand. */
754 gr_reg_or_8bit_adjusted_operand (rtx op, enum machine_mode mode)
756 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_L (INTVAL (op)))
757 || gr_register_operand (op, mode));
760 /* Return 1 if OP is a register operand, or is valid for both an 8 bit
761 immediate and an 8 bit adjusted immediate operand. This is necessary
762 because when we emit a compare, we don't know what the condition will be,
763 so we need the union of the immediates accepted by GT and LT. */
766 gr_reg_or_8bit_and_adjusted_operand (rtx op, enum machine_mode mode)
768 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op))
769 && CONST_OK_FOR_L (INTVAL (op)))
770 || gr_register_operand (op, mode));
773 /* Return 1 if OP is a register operand, or a 14 bit immediate operand. */
776 gr_reg_or_14bit_operand (rtx op, enum machine_mode mode)
778 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_I (INTVAL (op)))
779 || gr_register_operand (op, mode));
782 /* Return 1 if OP is a register operand, or a 22 bit immediate operand. */
785 gr_reg_or_22bit_operand (rtx op, enum machine_mode mode)
787 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_J (INTVAL (op)))
788 || gr_register_operand (op, mode));
791 /* Return 1 if OP is a 6 bit immediate operand. */
794 shift_count_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
796 return (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (INTVAL (op)));
799 /* Return 1 if OP is a 5 bit immediate operand. */
802 shift_32bit_count_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
804 return (GET_CODE (op) == CONST_INT
805 && (INTVAL (op) >= 0 && INTVAL (op) < 32));
808 /* Return 1 if OP is a 2, 4, 8, or 16 immediate operand. */
811 shladd_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
813 return (GET_CODE (op) == CONST_INT
814 && (INTVAL (op) == 2 || INTVAL (op) == 4
815 || INTVAL (op) == 8 || INTVAL (op) == 16));
818 /* Return 1 if OP is a -16, -8, -4, -1, 1, 4, 8, or 16 immediate operand. */
821 fetchadd_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
823 return (GET_CODE (op) == CONST_INT
824 && (INTVAL (op) == -16 || INTVAL (op) == -8 ||
825 INTVAL (op) == -4 || INTVAL (op) == -1 ||
826 INTVAL (op) == 1 || INTVAL (op) == 4 ||
827 INTVAL (op) == 8 || INTVAL (op) == 16));
830 /* Return 1 if OP is a floating-point constant zero, one, or a register. */
833 fr_reg_or_fp01_operand (rtx op, enum machine_mode mode)
835 return ((GET_CODE (op) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_G (op))
836 || fr_register_operand (op, mode));
839 /* Like nonimmediate_operand, but don't allow MEMs that try to use a
840 POST_MODIFY with a REG as displacement. */
843 destination_operand (rtx op, enum machine_mode mode)
845 if (! nonimmediate_operand (op, mode))
846 return 0;
847 if (GET_CODE (op) == MEM
848 && GET_CODE (XEXP (op, 0)) == POST_MODIFY
849 && GET_CODE (XEXP (XEXP (XEXP (op, 0), 1), 1)) == REG)
850 return 0;
851 return 1;
854 /* Like memory_operand, but don't allow post-increments. */
857 not_postinc_memory_operand (rtx op, enum machine_mode mode)
859 return (memory_operand (op, mode)
860 && GET_RTX_CLASS (GET_CODE (XEXP (op, 0))) != RTX_AUTOINC);
863 /* Return 1 if this is a comparison operator, which accepts a normal 8-bit
864 signed immediate operand. */
867 normal_comparison_operator (register rtx op, enum machine_mode mode)
869 enum rtx_code code = GET_CODE (op);
870 return ((mode == VOIDmode || GET_MODE (op) == mode)
871 && (code == EQ || code == NE
872 || code == GT || code == LE || code == GTU || code == LEU));
875 /* Return 1 if this is a comparison operator, which accepts an adjusted 8-bit
876 signed immediate operand. */
879 adjusted_comparison_operator (register rtx op, enum machine_mode mode)
881 enum rtx_code code = GET_CODE (op);
882 return ((mode == VOIDmode || GET_MODE (op) == mode)
883 && (code == LT || code == GE || code == LTU || code == GEU));
886 /* Return 1 if this is a signed inequality operator. */
889 signed_inequality_operator (register rtx op, enum machine_mode mode)
891 enum rtx_code code = GET_CODE (op);
892 return ((mode == VOIDmode || GET_MODE (op) == mode)
893 && (code == GE || code == GT
894 || code == LE || code == LT));
897 /* Return 1 if this operator is valid for predication. */
900 predicate_operator (register rtx op, enum machine_mode mode)
902 enum rtx_code code = GET_CODE (op);
903 return ((GET_MODE (op) == mode || mode == VOIDmode)
904 && (code == EQ || code == NE));
907 /* Return 1 if this operator can be used in a conditional operation. */
910 condop_operator (register rtx op, enum machine_mode mode)
912 enum rtx_code code = GET_CODE (op);
913 return ((GET_MODE (op) == mode || mode == VOIDmode)
914 && (code == PLUS || code == MINUS || code == AND
915 || code == IOR || code == XOR));
918 /* Return 1 if this is the ar.lc register. */
921 ar_lc_reg_operand (register rtx op, enum machine_mode mode)
923 return (GET_MODE (op) == DImode
924 && (mode == DImode || mode == VOIDmode)
925 && GET_CODE (op) == REG
926 && REGNO (op) == AR_LC_REGNUM);
929 /* Return 1 if this is the ar.ccv register. */
932 ar_ccv_reg_operand (register rtx op, enum machine_mode mode)
934 return ((GET_MODE (op) == mode || mode == VOIDmode)
935 && GET_CODE (op) == REG
936 && REGNO (op) == AR_CCV_REGNUM);
939 /* Return 1 if this is the ar.pfs register. */
942 ar_pfs_reg_operand (register rtx op, enum machine_mode mode)
944 return ((GET_MODE (op) == mode || mode == VOIDmode)
945 && GET_CODE (op) == REG
946 && REGNO (op) == AR_PFS_REGNUM);
949 /* Like general_operand, but don't allow (mem (addressof)). */
952 general_xfmode_operand (rtx op, enum machine_mode mode)
954 if (! general_operand (op, mode))
955 return 0;
956 if (GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) == ADDRESSOF)
957 return 0;
958 return 1;
961 /* Similarly. */
964 destination_xfmode_operand (rtx op, enum machine_mode mode)
966 if (! destination_operand (op, mode))
967 return 0;
968 if (GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) == ADDRESSOF)
969 return 0;
970 return 1;
973 /* Similarly. */
976 xfreg_or_fp01_operand (rtx op, enum machine_mode mode)
978 if (GET_CODE (op) == SUBREG)
979 return 0;
980 return fr_reg_or_fp01_operand (op, mode);
983 /* Return 1 if OP is valid as a base register in a reg + offset address. */
986 basereg_operand (rtx op, enum machine_mode mode)
988 /* ??? Should I copy the flag_omit_frame_pointer and cse_not_expected
989 checks from pa.c basereg_operand as well? Seems to be OK without them
990 in test runs. */
992 return (register_operand (op, mode) &&
993 REG_POINTER ((GET_CODE (op) == SUBREG) ? SUBREG_REG (op) : op));
996 typedef enum
998 ADDR_AREA_NORMAL, /* normal address area */
999 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
1001 ia64_addr_area;
1003 static GTY(()) tree small_ident1;
1004 static GTY(()) tree small_ident2;
1006 static void
1007 init_idents (void)
1009 if (small_ident1 == 0)
1011 small_ident1 = get_identifier ("small");
1012 small_ident2 = get_identifier ("__small__");
1016 /* Retrieve the address area that has been chosen for the given decl. */
1018 static ia64_addr_area
1019 ia64_get_addr_area (tree decl)
1021 tree model_attr;
1023 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
1024 if (model_attr)
1026 tree id;
1028 init_idents ();
1029 id = TREE_VALUE (TREE_VALUE (model_attr));
1030 if (id == small_ident1 || id == small_ident2)
1031 return ADDR_AREA_SMALL;
1033 return ADDR_AREA_NORMAL;
1036 static tree
1037 ia64_handle_model_attribute (tree *node, tree name, tree args, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1039 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
1040 ia64_addr_area area;
1041 tree arg, decl = *node;
1043 init_idents ();
1044 arg = TREE_VALUE (args);
1045 if (arg == small_ident1 || arg == small_ident2)
1047 addr_area = ADDR_AREA_SMALL;
1049 else
1051 warning ("invalid argument of `%s' attribute",
1052 IDENTIFIER_POINTER (name));
1053 *no_add_attrs = true;
1056 switch (TREE_CODE (decl))
1058 case VAR_DECL:
1059 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
1060 == FUNCTION_DECL)
1061 && !TREE_STATIC (decl))
1063 error ("%Jan address area attribute cannot be specified for "
1064 "local variables", decl, decl);
1065 *no_add_attrs = true;
1067 area = ia64_get_addr_area (decl);
1068 if (area != ADDR_AREA_NORMAL && addr_area != area)
1070 error ("%Jaddress area of '%s' conflicts with previous "
1071 "declaration", decl, decl);
1072 *no_add_attrs = true;
1074 break;
1076 case FUNCTION_DECL:
1077 error ("%Jaddress area attribute cannot be specified for functions",
1078 decl, decl);
1079 *no_add_attrs = true;
1080 break;
1082 default:
1083 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
1084 *no_add_attrs = true;
1085 break;
1088 return NULL_TREE;
1091 static void
1092 ia64_encode_addr_area (tree decl, rtx symbol)
1094 int flags;
1096 flags = SYMBOL_REF_FLAGS (symbol);
1097 switch (ia64_get_addr_area (decl))
1099 case ADDR_AREA_NORMAL: break;
1100 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
1101 default: abort ();
1103 SYMBOL_REF_FLAGS (symbol) = flags;
1106 static void
1107 ia64_encode_section_info (tree decl, rtx rtl, int first)
1109 default_encode_section_info (decl, rtl, first);
1111 /* Careful not to prod global register variables. */
1112 if (TREE_CODE (decl) == VAR_DECL
1113 && GET_CODE (DECL_RTL (decl)) == MEM
1114 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
1115 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
1116 ia64_encode_addr_area (decl, XEXP (rtl, 0));
1119 /* Return 1 if the operands of a move are ok. */
1122 ia64_move_ok (rtx dst, rtx src)
1124 /* If we're under init_recog_no_volatile, we'll not be able to use
1125 memory_operand. So check the code directly and don't worry about
1126 the validity of the underlying address, which should have been
1127 checked elsewhere anyway. */
1128 if (GET_CODE (dst) != MEM)
1129 return 1;
1130 if (GET_CODE (src) == MEM)
1131 return 0;
1132 if (register_operand (src, VOIDmode))
1133 return 1;
1135 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
1136 if (INTEGRAL_MODE_P (GET_MODE (dst)))
1137 return src == const0_rtx;
1138 else
1139 return GET_CODE (src) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_G (src);
1143 addp4_optimize_ok (rtx op1, rtx op2)
1145 return (basereg_operand (op1, GET_MODE(op1)) !=
1146 basereg_operand (op2, GET_MODE(op2)));
1149 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
1150 Return the length of the field, or <= 0 on failure. */
1153 ia64_depz_field_mask (rtx rop, rtx rshift)
1155 unsigned HOST_WIDE_INT op = INTVAL (rop);
1156 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
1158 /* Get rid of the zero bits we're shifting in. */
1159 op >>= shift;
1161 /* We must now have a solid block of 1's at bit 0. */
1162 return exact_log2 (op + 1);
1165 /* Expand a symbolic constant load. */
1167 void
1168 ia64_expand_load_address (rtx dest, rtx src)
1170 if (tls_symbolic_operand (src, VOIDmode))
1171 abort ();
1172 if (GET_CODE (dest) != REG)
1173 abort ();
1175 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
1176 having to pointer-extend the value afterward. Other forms of address
1177 computation below are also more natural to compute as 64-bit quantities.
1178 If we've been given an SImode destination register, change it. */
1179 if (GET_MODE (dest) != Pmode)
1180 dest = gen_rtx_REG (Pmode, REGNO (dest));
1182 if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_SMALL_ADDR_P (src))
1184 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
1185 return;
1187 else if (TARGET_AUTO_PIC)
1189 emit_insn (gen_load_gprel64 (dest, src));
1190 return;
1192 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
1194 emit_insn (gen_load_fptr (dest, src));
1195 return;
1197 else if (sdata_symbolic_operand (src, VOIDmode))
1199 emit_insn (gen_load_gprel (dest, src));
1200 return;
1203 if (GET_CODE (src) == CONST
1204 && GET_CODE (XEXP (src, 0)) == PLUS
1205 && GET_CODE (XEXP (XEXP (src, 0), 1)) == CONST_INT
1206 && (INTVAL (XEXP (XEXP (src, 0), 1)) & 0x1fff) != 0)
1208 rtx sym = XEXP (XEXP (src, 0), 0);
1209 HOST_WIDE_INT ofs, hi, lo;
1211 /* Split the offset into a sign extended 14-bit low part
1212 and a complementary high part. */
1213 ofs = INTVAL (XEXP (XEXP (src, 0), 1));
1214 lo = ((ofs & 0x3fff) ^ 0x2000) - 0x2000;
1215 hi = ofs - lo;
1217 ia64_expand_load_address (dest, plus_constant (sym, hi));
1218 emit_insn (gen_adddi3 (dest, dest, GEN_INT (lo)));
1220 else
1222 rtx tmp;
1224 tmp = gen_rtx_HIGH (Pmode, src);
1225 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
1226 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1228 tmp = gen_rtx_LO_SUM (GET_MODE (dest), dest, src);
1229 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1233 static GTY(()) rtx gen_tls_tga;
1234 static rtx
1235 gen_tls_get_addr (void)
1237 if (!gen_tls_tga)
1238 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
1239 return gen_tls_tga;
1242 static GTY(()) rtx thread_pointer_rtx;
1243 static rtx
1244 gen_thread_pointer (void)
1246 if (!thread_pointer_rtx)
1248 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
1249 RTX_UNCHANGING_P (thread_pointer_rtx) = 1;
1251 return thread_pointer_rtx;
1254 static rtx
1255 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1)
1257 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
1258 rtx orig_op0 = op0;
1260 switch (tls_kind)
1262 case TLS_MODEL_GLOBAL_DYNAMIC:
1263 start_sequence ();
1265 tga_op1 = gen_reg_rtx (Pmode);
1266 emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1));
1267 tga_op1 = gen_rtx_MEM (Pmode, tga_op1);
1268 RTX_UNCHANGING_P (tga_op1) = 1;
1270 tga_op2 = gen_reg_rtx (Pmode);
1271 emit_insn (gen_load_ltoff_dtprel (tga_op2, op1));
1272 tga_op2 = gen_rtx_MEM (Pmode, tga_op2);
1273 RTX_UNCHANGING_P (tga_op2) = 1;
1275 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1276 LCT_CONST, Pmode, 2, tga_op1,
1277 Pmode, tga_op2, Pmode);
1279 insns = get_insns ();
1280 end_sequence ();
1282 if (GET_MODE (op0) != Pmode)
1283 op0 = tga_ret;
1284 emit_libcall_block (insns, op0, tga_ret, op1);
1285 break;
1287 case TLS_MODEL_LOCAL_DYNAMIC:
1288 /* ??? This isn't the completely proper way to do local-dynamic
1289 If the call to __tls_get_addr is used only by a single symbol,
1290 then we should (somehow) move the dtprel to the second arg
1291 to avoid the extra add. */
1292 start_sequence ();
1294 tga_op1 = gen_reg_rtx (Pmode);
1295 emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1));
1296 tga_op1 = gen_rtx_MEM (Pmode, tga_op1);
1297 RTX_UNCHANGING_P (tga_op1) = 1;
1299 tga_op2 = const0_rtx;
1301 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1302 LCT_CONST, Pmode, 2, tga_op1,
1303 Pmode, tga_op2, Pmode);
1305 insns = get_insns ();
1306 end_sequence ();
1308 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1309 UNSPEC_LD_BASE);
1310 tmp = gen_reg_rtx (Pmode);
1311 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
1313 if (!register_operand (op0, Pmode))
1314 op0 = gen_reg_rtx (Pmode);
1315 if (TARGET_TLS64)
1317 emit_insn (gen_load_dtprel (op0, op1));
1318 emit_insn (gen_adddi3 (op0, tmp, op0));
1320 else
1321 emit_insn (gen_add_dtprel (op0, tmp, op1));
1322 break;
1324 case TLS_MODEL_INITIAL_EXEC:
1325 tmp = gen_reg_rtx (Pmode);
1326 emit_insn (gen_load_ltoff_tprel (tmp, op1));
1327 tmp = gen_rtx_MEM (Pmode, tmp);
1328 RTX_UNCHANGING_P (tmp) = 1;
1329 tmp = force_reg (Pmode, tmp);
1331 if (!register_operand (op0, Pmode))
1332 op0 = gen_reg_rtx (Pmode);
1333 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
1334 break;
1336 case TLS_MODEL_LOCAL_EXEC:
1337 if (!register_operand (op0, Pmode))
1338 op0 = gen_reg_rtx (Pmode);
1339 if (TARGET_TLS64)
1341 emit_insn (gen_load_tprel (op0, op1));
1342 emit_insn (gen_adddi3 (op0, gen_thread_pointer (), op0));
1344 else
1345 emit_insn (gen_add_tprel (op0, gen_thread_pointer (), op1));
1346 break;
1348 default:
1349 abort ();
1352 if (orig_op0 == op0)
1353 return NULL_RTX;
1354 if (GET_MODE (orig_op0) == Pmode)
1355 return op0;
1356 return gen_lowpart (GET_MODE (orig_op0), op0);
1360 ia64_expand_move (rtx op0, rtx op1)
1362 enum machine_mode mode = GET_MODE (op0);
1364 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
1365 op1 = force_reg (mode, op1);
1367 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
1369 enum tls_model tls_kind;
1370 if ((tls_kind = tls_symbolic_operand (op1, VOIDmode)))
1371 return ia64_expand_tls_address (tls_kind, op0, op1);
1373 if (!TARGET_NO_PIC && reload_completed)
1375 ia64_expand_load_address (op0, op1);
1376 return NULL_RTX;
1380 return op1;
1383 /* Split a move from OP1 to OP0 conditional on COND. */
1385 void
1386 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
1388 rtx insn, first = get_last_insn ();
1390 emit_move_insn (op0, op1);
1392 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
1393 if (INSN_P (insn))
1394 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
1395 PATTERN (insn));
1398 /* Split a post-reload TImode or TFmode reference into two DImode
1399 components. This is made extra difficult by the fact that we do
1400 not get any scratch registers to work with, because reload cannot
1401 be prevented from giving us a scratch that overlaps the register
1402 pair involved. So instead, when addressing memory, we tweak the
1403 pointer register up and back down with POST_INCs. Or up and not
1404 back down when we can get away with it.
1406 REVERSED is true when the loads must be done in reversed order
1407 (high word first) for correctness. DEAD is true when the pointer
1408 dies with the second insn we generate and therefore the second
1409 address must not carry a postmodify.
1411 May return an insn which is to be emitted after the moves. */
1413 static rtx
1414 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
1416 rtx fixup = 0;
1418 switch (GET_CODE (in))
1420 case REG:
1421 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
1422 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
1423 break;
1425 case CONST_INT:
1426 case CONST_DOUBLE:
1427 /* Cannot occur reversed. */
1428 if (reversed) abort ();
1430 if (GET_MODE (in) != TFmode)
1431 split_double (in, &out[0], &out[1]);
1432 else
1433 /* split_double does not understand how to split a TFmode
1434 quantity into a pair of DImode constants. */
1436 REAL_VALUE_TYPE r;
1437 unsigned HOST_WIDE_INT p[2];
1438 long l[4]; /* TFmode is 128 bits */
1440 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
1441 real_to_target (l, &r, TFmode);
1443 if (FLOAT_WORDS_BIG_ENDIAN)
1445 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
1446 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
1448 else
1450 p[0] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
1451 p[1] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
1453 out[0] = GEN_INT (p[0]);
1454 out[1] = GEN_INT (p[1]);
1456 break;
1458 case MEM:
1460 rtx base = XEXP (in, 0);
1461 rtx offset;
1463 switch (GET_CODE (base))
1465 case REG:
1466 if (!reversed)
1468 out[0] = adjust_automodify_address
1469 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1470 out[1] = adjust_automodify_address
1471 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
1473 else
1475 /* Reversal requires a pre-increment, which can only
1476 be done as a separate insn. */
1477 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
1478 out[0] = adjust_automodify_address
1479 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
1480 out[1] = adjust_address (in, DImode, 0);
1482 break;
1484 case POST_INC:
1485 if (reversed || dead) abort ();
1486 /* Just do the increment in two steps. */
1487 out[0] = adjust_automodify_address (in, DImode, 0, 0);
1488 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1489 break;
1491 case POST_DEC:
1492 if (reversed || dead) abort ();
1493 /* Add 8, subtract 24. */
1494 base = XEXP (base, 0);
1495 out[0] = adjust_automodify_address
1496 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1497 out[1] = adjust_automodify_address
1498 (in, DImode,
1499 gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -24)),
1501 break;
1503 case POST_MODIFY:
1504 if (reversed || dead) abort ();
1505 /* Extract and adjust the modification. This case is
1506 trickier than the others, because we might have an
1507 index register, or we might have a combined offset that
1508 doesn't fit a signed 9-bit displacement field. We can
1509 assume the incoming expression is already legitimate. */
1510 offset = XEXP (base, 1);
1511 base = XEXP (base, 0);
1513 out[0] = adjust_automodify_address
1514 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1516 if (GET_CODE (XEXP (offset, 1)) == REG)
1518 /* Can't adjust the postmodify to match. Emit the
1519 original, then a separate addition insn. */
1520 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1521 fixup = gen_adddi3 (base, base, GEN_INT (-8));
1523 else if (GET_CODE (XEXP (offset, 1)) != CONST_INT)
1524 abort ();
1525 else if (INTVAL (XEXP (offset, 1)) < -256 + 8)
1527 /* Again the postmodify cannot be made to match, but
1528 in this case it's more efficient to get rid of the
1529 postmodify entirely and fix up with an add insn. */
1530 out[1] = adjust_automodify_address (in, DImode, base, 8);
1531 fixup = gen_adddi3 (base, base,
1532 GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
1534 else
1536 /* Combined offset still fits in the displacement field.
1537 (We cannot overflow it at the high end.) */
1538 out[1] = adjust_automodify_address
1539 (in, DImode,
1540 gen_rtx_POST_MODIFY (Pmode, base,
1541 gen_rtx_PLUS (Pmode, base,
1542 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
1545 break;
1547 default:
1548 abort ();
1550 break;
1553 default:
1554 abort ();
1557 return fixup;
1560 /* Split a TImode or TFmode move instruction after reload.
1561 This is used by *movtf_internal and *movti_internal. */
1562 void
1563 ia64_split_tmode_move (rtx operands[])
1565 rtx in[2], out[2], insn;
1566 rtx fixup[2];
1567 bool dead = false;
1568 bool reversed = false;
1570 /* It is possible for reload to decide to overwrite a pointer with
1571 the value it points to. In that case we have to do the loads in
1572 the appropriate order so that the pointer is not destroyed too
1573 early. Also we must not generate a postmodify for that second
1574 load, or rws_access_regno will abort. */
1575 if (GET_CODE (operands[1]) == MEM
1576 && reg_overlap_mentioned_p (operands[0], operands[1]))
1578 rtx base = XEXP (operands[1], 0);
1579 while (GET_CODE (base) != REG)
1580 base = XEXP (base, 0);
1582 if (REGNO (base) == REGNO (operands[0]))
1583 reversed = true;
1584 dead = true;
1586 /* Another reason to do the moves in reversed order is if the first
1587 element of the target register pair is also the second element of
1588 the source register pair. */
1589 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1590 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1591 reversed = true;
1593 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1594 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1596 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1597 if (GET_CODE (EXP) == MEM \
1598 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1599 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1600 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1601 REG_NOTES (INSN) = gen_rtx_EXPR_LIST (REG_INC, \
1602 XEXP (XEXP (EXP, 0), 0), \
1603 REG_NOTES (INSN))
1605 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1606 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1607 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1609 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1610 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1611 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1613 if (fixup[0])
1614 emit_insn (fixup[0]);
1615 if (fixup[1])
1616 emit_insn (fixup[1]);
1618 #undef MAYBE_ADD_REG_INC_NOTE
1621 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1622 through memory plus an extra GR scratch register. Except that you can
1623 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1624 SECONDARY_RELOAD_CLASS, but not both.
1626 We got into problems in the first place by allowing a construct like
1627 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1628 This solution attempts to prevent this situation from occurring. When
1629 we see something like the above, we spill the inner register to memory. */
1632 spill_xfmode_operand (rtx in, int force)
1634 if (GET_CODE (in) == SUBREG
1635 && GET_MODE (SUBREG_REG (in)) == TImode
1636 && GET_CODE (SUBREG_REG (in)) == REG)
1638 rtx mem = gen_mem_addressof (SUBREG_REG (in), NULL_TREE, /*rescan=*/true);
1639 return gen_rtx_MEM (XFmode, copy_to_reg (XEXP (mem, 0)));
1641 else if (force && GET_CODE (in) == REG)
1643 rtx mem = gen_mem_addressof (in, NULL_TREE, /*rescan=*/true);
1644 return gen_rtx_MEM (XFmode, copy_to_reg (XEXP (mem, 0)));
1646 else if (GET_CODE (in) == MEM
1647 && GET_CODE (XEXP (in, 0)) == ADDRESSOF)
1648 return change_address (in, XFmode, copy_to_reg (XEXP (in, 0)));
1649 else
1650 return in;
1653 /* Emit comparison instruction if necessary, returning the expression
1654 that holds the compare result in the proper mode. */
1656 static GTY(()) rtx cmptf_libfunc;
1659 ia64_expand_compare (enum rtx_code code, enum machine_mode mode)
1661 rtx op0 = ia64_compare_op0, op1 = ia64_compare_op1;
1662 rtx cmp;
1664 /* If we have a BImode input, then we already have a compare result, and
1665 do not need to emit another comparison. */
1666 if (GET_MODE (op0) == BImode)
1668 if ((code == NE || code == EQ) && op1 == const0_rtx)
1669 cmp = op0;
1670 else
1671 abort ();
1673 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1674 magic number as its third argument, that indicates what to do.
1675 The return value is an integer to be compared against zero. */
1676 else if (GET_MODE (op0) == TFmode)
1678 enum qfcmp_magic {
1679 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1680 QCMP_UNORD = 2,
1681 QCMP_EQ = 4,
1682 QCMP_LT = 8,
1683 QCMP_GT = 16
1684 } magic;
1685 enum rtx_code ncode;
1686 rtx ret, insns;
1687 if (!cmptf_libfunc || GET_MODE (op1) != TFmode)
1688 abort ();
1689 switch (code)
1691 /* 1 = equal, 0 = not equal. Equality operators do
1692 not raise FP_INVALID when given an SNaN operand. */
1693 case EQ: magic = QCMP_EQ; ncode = NE; break;
1694 case NE: magic = QCMP_EQ; ncode = EQ; break;
1695 /* isunordered() from C99. */
1696 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1697 /* Relational operators raise FP_INVALID when given
1698 an SNaN operand. */
1699 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1700 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1701 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1702 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1703 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1704 Expanders for buneq etc. weuld have to be added to ia64.md
1705 for this to be useful. */
1706 default: abort ();
1709 start_sequence ();
1711 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1712 op0, TFmode, op1, TFmode,
1713 GEN_INT (magic), DImode);
1714 cmp = gen_reg_rtx (BImode);
1715 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1716 gen_rtx_fmt_ee (ncode, BImode,
1717 ret, const0_rtx)));
1719 insns = get_insns ();
1720 end_sequence ();
1722 emit_libcall_block (insns, cmp, cmp,
1723 gen_rtx_fmt_ee (code, BImode, op0, op1));
1724 code = NE;
1726 else
1728 cmp = gen_reg_rtx (BImode);
1729 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1730 gen_rtx_fmt_ee (code, BImode, op0, op1)));
1731 code = NE;
1734 return gen_rtx_fmt_ee (code, mode, cmp, const0_rtx);
1737 /* Emit the appropriate sequence for a call. */
1739 void
1740 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
1741 int sibcall_p)
1743 rtx insn, b0;
1745 addr = XEXP (addr, 0);
1746 addr = convert_memory_address (DImode, addr);
1747 b0 = gen_rtx_REG (DImode, R_BR (0));
1749 /* ??? Should do this for functions known to bind local too. */
1750 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
1752 if (sibcall_p)
1753 insn = gen_sibcall_nogp (addr);
1754 else if (! retval)
1755 insn = gen_call_nogp (addr, b0);
1756 else
1757 insn = gen_call_value_nogp (retval, addr, b0);
1758 insn = emit_call_insn (insn);
1760 else
1762 if (sibcall_p)
1763 insn = gen_sibcall_gp (addr);
1764 else if (! retval)
1765 insn = gen_call_gp (addr, b0);
1766 else
1767 insn = gen_call_value_gp (retval, addr, b0);
1768 insn = emit_call_insn (insn);
1770 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
1773 if (sibcall_p)
1774 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
1777 void
1778 ia64_reload_gp (void)
1780 rtx tmp;
1782 if (current_frame_info.reg_save_gp)
1783 tmp = gen_rtx_REG (DImode, current_frame_info.reg_save_gp);
1784 else
1786 HOST_WIDE_INT offset;
1788 offset = (current_frame_info.spill_cfa_off
1789 + current_frame_info.spill_size);
1790 if (frame_pointer_needed)
1792 tmp = hard_frame_pointer_rtx;
1793 offset = -offset;
1795 else
1797 tmp = stack_pointer_rtx;
1798 offset = current_frame_info.total_size - offset;
1801 if (CONST_OK_FOR_I (offset))
1802 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1803 tmp, GEN_INT (offset)));
1804 else
1806 emit_move_insn (pic_offset_table_rtx, GEN_INT (offset));
1807 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1808 pic_offset_table_rtx, tmp));
1811 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
1814 emit_move_insn (pic_offset_table_rtx, tmp);
1817 void
1818 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
1819 rtx scratch_b, int noreturn_p, int sibcall_p)
1821 rtx insn;
1822 bool is_desc = false;
1824 /* If we find we're calling through a register, then we're actually
1825 calling through a descriptor, so load up the values. */
1826 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
1828 rtx tmp;
1829 bool addr_dead_p;
1831 /* ??? We are currently constrained to *not* use peep2, because
1832 we can legitimately change the global lifetime of the GP
1833 (in the form of killing where previously live). This is
1834 because a call through a descriptor doesn't use the previous
1835 value of the GP, while a direct call does, and we do not
1836 commit to either form until the split here.
1838 That said, this means that we lack precise life info for
1839 whether ADDR is dead after this call. This is not terribly
1840 important, since we can fix things up essentially for free
1841 with the POST_DEC below, but it's nice to not use it when we
1842 can immediately tell it's not necessary. */
1843 addr_dead_p = ((noreturn_p || sibcall_p
1844 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
1845 REGNO (addr)))
1846 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
1848 /* Load the code address into scratch_b. */
1849 tmp = gen_rtx_POST_INC (Pmode, addr);
1850 tmp = gen_rtx_MEM (Pmode, tmp);
1851 emit_move_insn (scratch_r, tmp);
1852 emit_move_insn (scratch_b, scratch_r);
1854 /* Load the GP address. If ADDR is not dead here, then we must
1855 revert the change made above via the POST_INCREMENT. */
1856 if (!addr_dead_p)
1857 tmp = gen_rtx_POST_DEC (Pmode, addr);
1858 else
1859 tmp = addr;
1860 tmp = gen_rtx_MEM (Pmode, tmp);
1861 emit_move_insn (pic_offset_table_rtx, tmp);
1863 is_desc = true;
1864 addr = scratch_b;
1867 if (sibcall_p)
1868 insn = gen_sibcall_nogp (addr);
1869 else if (retval)
1870 insn = gen_call_value_nogp (retval, addr, retaddr);
1871 else
1872 insn = gen_call_nogp (addr, retaddr);
1873 emit_call_insn (insn);
1875 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
1876 ia64_reload_gp ();
1879 /* Begin the assembly file. */
1881 static void
1882 ia64_file_start (void)
1884 default_file_start ();
1885 emit_safe_across_calls ();
1888 void
1889 emit_safe_across_calls (void)
1891 unsigned int rs, re;
1892 int out_state;
1894 rs = 1;
1895 out_state = 0;
1896 while (1)
1898 while (rs < 64 && call_used_regs[PR_REG (rs)])
1899 rs++;
1900 if (rs >= 64)
1901 break;
1902 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
1903 continue;
1904 if (out_state == 0)
1906 fputs ("\t.pred.safe_across_calls ", asm_out_file);
1907 out_state = 1;
1909 else
1910 fputc (',', asm_out_file);
1911 if (re == rs + 1)
1912 fprintf (asm_out_file, "p%u", rs);
1913 else
1914 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
1915 rs = re + 1;
1917 if (out_state)
1918 fputc ('\n', asm_out_file);
1921 /* Helper function for ia64_compute_frame_size: find an appropriate general
1922 register to spill some special register to. SPECIAL_SPILL_MASK contains
1923 bits in GR0 to GR31 that have already been allocated by this routine.
1924 TRY_LOCALS is true if we should attempt to locate a local regnum. */
1926 static int
1927 find_gr_spill (int try_locals)
1929 int regno;
1931 /* If this is a leaf function, first try an otherwise unused
1932 call-clobbered register. */
1933 if (current_function_is_leaf)
1935 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
1936 if (! regs_ever_live[regno]
1937 && call_used_regs[regno]
1938 && ! fixed_regs[regno]
1939 && ! global_regs[regno]
1940 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
1942 current_frame_info.gr_used_mask |= 1 << regno;
1943 return regno;
1947 if (try_locals)
1949 regno = current_frame_info.n_local_regs;
1950 /* If there is a frame pointer, then we can't use loc79, because
1951 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
1952 reg_name switching code in ia64_expand_prologue. */
1953 if (regno < (80 - frame_pointer_needed))
1955 current_frame_info.n_local_regs = regno + 1;
1956 return LOC_REG (0) + regno;
1960 /* Failed to find a general register to spill to. Must use stack. */
1961 return 0;
1964 /* In order to make for nice schedules, we try to allocate every temporary
1965 to a different register. We must of course stay away from call-saved,
1966 fixed, and global registers. We must also stay away from registers
1967 allocated in current_frame_info.gr_used_mask, since those include regs
1968 used all through the prologue.
1970 Any register allocated here must be used immediately. The idea is to
1971 aid scheduling, not to solve data flow problems. */
1973 static int last_scratch_gr_reg;
1975 static int
1976 next_scratch_gr_reg (void)
1978 int i, regno;
1980 for (i = 0; i < 32; ++i)
1982 regno = (last_scratch_gr_reg + i + 1) & 31;
1983 if (call_used_regs[regno]
1984 && ! fixed_regs[regno]
1985 && ! global_regs[regno]
1986 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
1988 last_scratch_gr_reg = regno;
1989 return regno;
1993 /* There must be _something_ available. */
1994 abort ();
1997 /* Helper function for ia64_compute_frame_size, called through
1998 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
2000 static void
2001 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
2003 unsigned int regno = REGNO (reg);
2004 if (regno < 32)
2006 unsigned int i, n = HARD_REGNO_NREGS (regno, GET_MODE (reg));
2007 for (i = 0; i < n; ++i)
2008 current_frame_info.gr_used_mask |= 1 << (regno + i);
2012 /* Returns the number of bytes offset between the frame pointer and the stack
2013 pointer for the current function. SIZE is the number of bytes of space
2014 needed for local variables. */
2016 static void
2017 ia64_compute_frame_size (HOST_WIDE_INT size)
2019 HOST_WIDE_INT total_size;
2020 HOST_WIDE_INT spill_size = 0;
2021 HOST_WIDE_INT extra_spill_size = 0;
2022 HOST_WIDE_INT pretend_args_size;
2023 HARD_REG_SET mask;
2024 int n_spilled = 0;
2025 int spilled_gr_p = 0;
2026 int spilled_fr_p = 0;
2027 unsigned int regno;
2028 int i;
2030 if (current_frame_info.initialized)
2031 return;
2033 memset (&current_frame_info, 0, sizeof current_frame_info);
2034 CLEAR_HARD_REG_SET (mask);
2036 /* Don't allocate scratches to the return register. */
2037 diddle_return_value (mark_reg_gr_used_mask, NULL);
2039 /* Don't allocate scratches to the EH scratch registers. */
2040 if (cfun->machine->ia64_eh_epilogue_sp)
2041 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
2042 if (cfun->machine->ia64_eh_epilogue_bsp)
2043 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
2045 /* Find the size of the register stack frame. We have only 80 local
2046 registers, because we reserve 8 for the inputs and 8 for the
2047 outputs. */
2049 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
2050 since we'll be adjusting that down later. */
2051 regno = LOC_REG (78) + ! frame_pointer_needed;
2052 for (; regno >= LOC_REG (0); regno--)
2053 if (regs_ever_live[regno])
2054 break;
2055 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2057 /* For functions marked with the syscall_linkage attribute, we must mark
2058 all eight input registers as in use, so that locals aren't visible to
2059 the caller. */
2061 if (cfun->machine->n_varargs > 0
2062 || lookup_attribute ("syscall_linkage",
2063 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
2064 current_frame_info.n_input_regs = 8;
2065 else
2067 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
2068 if (regs_ever_live[regno])
2069 break;
2070 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
2073 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
2074 if (regs_ever_live[regno])
2075 break;
2076 i = regno - OUT_REG (0) + 1;
2078 /* When -p profiling, we need one output register for the mcount argument.
2079 Likewise for -a profiling for the bb_init_func argument. For -ax
2080 profiling, we need two output registers for the two bb_init_trace_func
2081 arguments. */
2082 if (current_function_profile)
2083 i = MAX (i, 1);
2084 current_frame_info.n_output_regs = i;
2086 /* ??? No rotating register support yet. */
2087 current_frame_info.n_rotate_regs = 0;
2089 /* Discover which registers need spilling, and how much room that
2090 will take. Begin with floating point and general registers,
2091 which will always wind up on the stack. */
2093 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
2094 if (regs_ever_live[regno] && ! call_used_regs[regno])
2096 SET_HARD_REG_BIT (mask, regno);
2097 spill_size += 16;
2098 n_spilled += 1;
2099 spilled_fr_p = 1;
2102 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2103 if (regs_ever_live[regno] && ! call_used_regs[regno])
2105 SET_HARD_REG_BIT (mask, regno);
2106 spill_size += 8;
2107 n_spilled += 1;
2108 spilled_gr_p = 1;
2111 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
2112 if (regs_ever_live[regno] && ! call_used_regs[regno])
2114 SET_HARD_REG_BIT (mask, regno);
2115 spill_size += 8;
2116 n_spilled += 1;
2119 /* Now come all special registers that might get saved in other
2120 general registers. */
2122 if (frame_pointer_needed)
2124 current_frame_info.reg_fp = find_gr_spill (1);
2125 /* If we did not get a register, then we take LOC79. This is guaranteed
2126 to be free, even if regs_ever_live is already set, because this is
2127 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
2128 as we don't count loc79 above. */
2129 if (current_frame_info.reg_fp == 0)
2131 current_frame_info.reg_fp = LOC_REG (79);
2132 current_frame_info.n_local_regs++;
2136 if (! current_function_is_leaf)
2138 /* Emit a save of BR0 if we call other functions. Do this even
2139 if this function doesn't return, as EH depends on this to be
2140 able to unwind the stack. */
2141 SET_HARD_REG_BIT (mask, BR_REG (0));
2143 current_frame_info.reg_save_b0 = find_gr_spill (1);
2144 if (current_frame_info.reg_save_b0 == 0)
2146 spill_size += 8;
2147 n_spilled += 1;
2150 /* Similarly for ar.pfs. */
2151 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2152 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
2153 if (current_frame_info.reg_save_ar_pfs == 0)
2155 extra_spill_size += 8;
2156 n_spilled += 1;
2159 /* Similarly for gp. Note that if we're calling setjmp, the stacked
2160 registers are clobbered, so we fall back to the stack. */
2161 current_frame_info.reg_save_gp
2162 = (current_function_calls_setjmp ? 0 : find_gr_spill (1));
2163 if (current_frame_info.reg_save_gp == 0)
2165 SET_HARD_REG_BIT (mask, GR_REG (1));
2166 spill_size += 8;
2167 n_spilled += 1;
2170 else
2172 if (regs_ever_live[BR_REG (0)] && ! call_used_regs[BR_REG (0)])
2174 SET_HARD_REG_BIT (mask, BR_REG (0));
2175 spill_size += 8;
2176 n_spilled += 1;
2179 if (regs_ever_live[AR_PFS_REGNUM])
2181 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2182 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
2183 if (current_frame_info.reg_save_ar_pfs == 0)
2185 extra_spill_size += 8;
2186 n_spilled += 1;
2191 /* Unwind descriptor hackery: things are most efficient if we allocate
2192 consecutive GR save registers for RP, PFS, FP in that order. However,
2193 it is absolutely critical that FP get the only hard register that's
2194 guaranteed to be free, so we allocated it first. If all three did
2195 happen to be allocated hard regs, and are consecutive, rearrange them
2196 into the preferred order now. */
2197 if (current_frame_info.reg_fp != 0
2198 && current_frame_info.reg_save_b0 == current_frame_info.reg_fp + 1
2199 && current_frame_info.reg_save_ar_pfs == current_frame_info.reg_fp + 2)
2201 current_frame_info.reg_save_b0 = current_frame_info.reg_fp;
2202 current_frame_info.reg_save_ar_pfs = current_frame_info.reg_fp + 1;
2203 current_frame_info.reg_fp = current_frame_info.reg_fp + 2;
2206 /* See if we need to store the predicate register block. */
2207 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2208 if (regs_ever_live[regno] && ! call_used_regs[regno])
2209 break;
2210 if (regno <= PR_REG (63))
2212 SET_HARD_REG_BIT (mask, PR_REG (0));
2213 current_frame_info.reg_save_pr = find_gr_spill (1);
2214 if (current_frame_info.reg_save_pr == 0)
2216 extra_spill_size += 8;
2217 n_spilled += 1;
2220 /* ??? Mark them all as used so that register renaming and such
2221 are free to use them. */
2222 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2223 regs_ever_live[regno] = 1;
2226 /* If we're forced to use st8.spill, we're forced to save and restore
2227 ar.unat as well. The check for existing liveness allows inline asm
2228 to touch ar.unat. */
2229 if (spilled_gr_p || cfun->machine->n_varargs
2230 || regs_ever_live[AR_UNAT_REGNUM])
2232 regs_ever_live[AR_UNAT_REGNUM] = 1;
2233 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
2234 current_frame_info.reg_save_ar_unat = find_gr_spill (spill_size == 0);
2235 if (current_frame_info.reg_save_ar_unat == 0)
2237 extra_spill_size += 8;
2238 n_spilled += 1;
2242 if (regs_ever_live[AR_LC_REGNUM])
2244 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
2245 current_frame_info.reg_save_ar_lc = find_gr_spill (spill_size == 0);
2246 if (current_frame_info.reg_save_ar_lc == 0)
2248 extra_spill_size += 8;
2249 n_spilled += 1;
2253 /* If we have an odd number of words of pretend arguments written to
2254 the stack, then the FR save area will be unaligned. We round the
2255 size of this area up to keep things 16 byte aligned. */
2256 if (spilled_fr_p)
2257 pretend_args_size = IA64_STACK_ALIGN (current_function_pretend_args_size);
2258 else
2259 pretend_args_size = current_function_pretend_args_size;
2261 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2262 + current_function_outgoing_args_size);
2263 total_size = IA64_STACK_ALIGN (total_size);
2265 /* We always use the 16-byte scratch area provided by the caller, but
2266 if we are a leaf function, there's no one to which we need to provide
2267 a scratch area. */
2268 if (current_function_is_leaf)
2269 total_size = MAX (0, total_size - 16);
2271 current_frame_info.total_size = total_size;
2272 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2273 current_frame_info.spill_size = spill_size;
2274 current_frame_info.extra_spill_size = extra_spill_size;
2275 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2276 current_frame_info.n_spilled = n_spilled;
2277 current_frame_info.initialized = reload_completed;
2280 /* Compute the initial difference between the specified pair of registers. */
2282 HOST_WIDE_INT
2283 ia64_initial_elimination_offset (int from, int to)
2285 HOST_WIDE_INT offset;
2287 ia64_compute_frame_size (get_frame_size ());
2288 switch (from)
2290 case FRAME_POINTER_REGNUM:
2291 if (to == HARD_FRAME_POINTER_REGNUM)
2293 if (current_function_is_leaf)
2294 offset = -current_frame_info.total_size;
2295 else
2296 offset = -(current_frame_info.total_size
2297 - current_function_outgoing_args_size - 16);
2299 else if (to == STACK_POINTER_REGNUM)
2301 if (current_function_is_leaf)
2302 offset = 0;
2303 else
2304 offset = 16 + current_function_outgoing_args_size;
2306 else
2307 abort ();
2308 break;
2310 case ARG_POINTER_REGNUM:
2311 /* Arguments start above the 16 byte save area, unless stdarg
2312 in which case we store through the 16 byte save area. */
2313 if (to == HARD_FRAME_POINTER_REGNUM)
2314 offset = 16 - current_function_pretend_args_size;
2315 else if (to == STACK_POINTER_REGNUM)
2316 offset = (current_frame_info.total_size
2317 + 16 - current_function_pretend_args_size);
2318 else
2319 abort ();
2320 break;
2322 default:
2323 abort ();
2326 return offset;
2329 /* If there are more than a trivial number of register spills, we use
2330 two interleaved iterators so that we can get two memory references
2331 per insn group.
2333 In order to simplify things in the prologue and epilogue expanders,
2334 we use helper functions to fix up the memory references after the
2335 fact with the appropriate offsets to a POST_MODIFY memory mode.
2336 The following data structure tracks the state of the two iterators
2337 while insns are being emitted. */
2339 struct spill_fill_data
2341 rtx init_after; /* point at which to emit initializations */
2342 rtx init_reg[2]; /* initial base register */
2343 rtx iter_reg[2]; /* the iterator registers */
2344 rtx *prev_addr[2]; /* address of last memory use */
2345 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
2346 HOST_WIDE_INT prev_off[2]; /* last offset */
2347 int n_iter; /* number of iterators in use */
2348 int next_iter; /* next iterator to use */
2349 unsigned int save_gr_used_mask;
2352 static struct spill_fill_data spill_fill_data;
2354 static void
2355 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
2357 int i;
2359 spill_fill_data.init_after = get_last_insn ();
2360 spill_fill_data.init_reg[0] = init_reg;
2361 spill_fill_data.init_reg[1] = init_reg;
2362 spill_fill_data.prev_addr[0] = NULL;
2363 spill_fill_data.prev_addr[1] = NULL;
2364 spill_fill_data.prev_insn[0] = NULL;
2365 spill_fill_data.prev_insn[1] = NULL;
2366 spill_fill_data.prev_off[0] = cfa_off;
2367 spill_fill_data.prev_off[1] = cfa_off;
2368 spill_fill_data.next_iter = 0;
2369 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
2371 spill_fill_data.n_iter = 1 + (n_spills > 2);
2372 for (i = 0; i < spill_fill_data.n_iter; ++i)
2374 int regno = next_scratch_gr_reg ();
2375 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
2376 current_frame_info.gr_used_mask |= 1 << regno;
2380 static void
2381 finish_spill_pointers (void)
2383 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
2386 static rtx
2387 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
2389 int iter = spill_fill_data.next_iter;
2390 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
2391 rtx disp_rtx = GEN_INT (disp);
2392 rtx mem;
2394 if (spill_fill_data.prev_addr[iter])
2396 if (CONST_OK_FOR_N (disp))
2398 *spill_fill_data.prev_addr[iter]
2399 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
2400 gen_rtx_PLUS (DImode,
2401 spill_fill_data.iter_reg[iter],
2402 disp_rtx));
2403 REG_NOTES (spill_fill_data.prev_insn[iter])
2404 = gen_rtx_EXPR_LIST (REG_INC, spill_fill_data.iter_reg[iter],
2405 REG_NOTES (spill_fill_data.prev_insn[iter]));
2407 else
2409 /* ??? Could use register post_modify for loads. */
2410 if (! CONST_OK_FOR_I (disp))
2412 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2413 emit_move_insn (tmp, disp_rtx);
2414 disp_rtx = tmp;
2416 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2417 spill_fill_data.iter_reg[iter], disp_rtx));
2420 /* Micro-optimization: if we've created a frame pointer, it's at
2421 CFA 0, which may allow the real iterator to be initialized lower,
2422 slightly increasing parallelism. Also, if there are few saves
2423 it may eliminate the iterator entirely. */
2424 else if (disp == 0
2425 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
2426 && frame_pointer_needed)
2428 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
2429 set_mem_alias_set (mem, get_varargs_alias_set ());
2430 return mem;
2432 else
2434 rtx seq, insn;
2436 if (disp == 0)
2437 seq = gen_movdi (spill_fill_data.iter_reg[iter],
2438 spill_fill_data.init_reg[iter]);
2439 else
2441 start_sequence ();
2443 if (! CONST_OK_FOR_I (disp))
2445 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2446 emit_move_insn (tmp, disp_rtx);
2447 disp_rtx = tmp;
2450 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2451 spill_fill_data.init_reg[iter],
2452 disp_rtx));
2454 seq = get_insns ();
2455 end_sequence ();
2458 /* Careful for being the first insn in a sequence. */
2459 if (spill_fill_data.init_after)
2460 insn = emit_insn_after (seq, spill_fill_data.init_after);
2461 else
2463 rtx first = get_insns ();
2464 if (first)
2465 insn = emit_insn_before (seq, first);
2466 else
2467 insn = emit_insn (seq);
2469 spill_fill_data.init_after = insn;
2471 /* If DISP is 0, we may or may not have a further adjustment
2472 afterward. If we do, then the load/store insn may be modified
2473 to be a post-modify. If we don't, then this copy may be
2474 eliminated by copyprop_hardreg_forward, which makes this
2475 insn garbage, which runs afoul of the sanity check in
2476 propagate_one_insn. So mark this insn as legal to delete. */
2477 if (disp == 0)
2478 REG_NOTES(insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx,
2479 REG_NOTES (insn));
2482 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
2484 /* ??? Not all of the spills are for varargs, but some of them are.
2485 The rest of the spills belong in an alias set of their own. But
2486 it doesn't actually hurt to include them here. */
2487 set_mem_alias_set (mem, get_varargs_alias_set ());
2489 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
2490 spill_fill_data.prev_off[iter] = cfa_off;
2492 if (++iter >= spill_fill_data.n_iter)
2493 iter = 0;
2494 spill_fill_data.next_iter = iter;
2496 return mem;
2499 static void
2500 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
2501 rtx frame_reg)
2503 int iter = spill_fill_data.next_iter;
2504 rtx mem, insn;
2506 mem = spill_restore_mem (reg, cfa_off);
2507 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
2508 spill_fill_data.prev_insn[iter] = insn;
2510 if (frame_reg)
2512 rtx base;
2513 HOST_WIDE_INT off;
2515 RTX_FRAME_RELATED_P (insn) = 1;
2517 /* Don't even pretend that the unwind code can intuit its way
2518 through a pair of interleaved post_modify iterators. Just
2519 provide the correct answer. */
2521 if (frame_pointer_needed)
2523 base = hard_frame_pointer_rtx;
2524 off = - cfa_off;
2526 else
2528 base = stack_pointer_rtx;
2529 off = current_frame_info.total_size - cfa_off;
2532 REG_NOTES (insn)
2533 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2534 gen_rtx_SET (VOIDmode,
2535 gen_rtx_MEM (GET_MODE (reg),
2536 plus_constant (base, off)),
2537 frame_reg),
2538 REG_NOTES (insn));
2542 static void
2543 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
2545 int iter = spill_fill_data.next_iter;
2546 rtx insn;
2548 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
2549 GEN_INT (cfa_off)));
2550 spill_fill_data.prev_insn[iter] = insn;
2553 /* Wrapper functions that discards the CONST_INT spill offset. These
2554 exist so that we can give gr_spill/gr_fill the offset they need and
2555 use a consistent function interface. */
2557 static rtx
2558 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2560 return gen_movdi (dest, src);
2563 static rtx
2564 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2566 return gen_fr_spill (dest, src);
2569 static rtx
2570 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2572 return gen_fr_restore (dest, src);
2575 /* Called after register allocation to add any instructions needed for the
2576 prologue. Using a prologue insn is favored compared to putting all of the
2577 instructions in output_function_prologue(), since it allows the scheduler
2578 to intermix instructions with the saves of the caller saved registers. In
2579 some cases, it might be necessary to emit a barrier instruction as the last
2580 insn to prevent such scheduling.
2582 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
2583 so that the debug info generation code can handle them properly.
2585 The register save area is layed out like so:
2586 cfa+16
2587 [ varargs spill area ]
2588 [ fr register spill area ]
2589 [ br register spill area ]
2590 [ ar register spill area ]
2591 [ pr register spill area ]
2592 [ gr register spill area ] */
2594 /* ??? Get inefficient code when the frame size is larger than can fit in an
2595 adds instruction. */
2597 void
2598 ia64_expand_prologue (void)
2600 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
2601 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
2602 rtx reg, alt_reg;
2604 ia64_compute_frame_size (get_frame_size ());
2605 last_scratch_gr_reg = 15;
2607 /* If there is no epilogue, then we don't need some prologue insns.
2608 We need to avoid emitting the dead prologue insns, because flow
2609 will complain about them. */
2610 if (optimize)
2612 edge e;
2614 for (e = EXIT_BLOCK_PTR->pred; e ; e = e->pred_next)
2615 if ((e->flags & EDGE_FAKE) == 0
2616 && (e->flags & EDGE_FALLTHRU) != 0)
2617 break;
2618 epilogue_p = (e != NULL);
2620 else
2621 epilogue_p = 1;
2623 /* Set the local, input, and output register names. We need to do this
2624 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
2625 half. If we use in/loc/out register names, then we get assembler errors
2626 in crtn.S because there is no alloc insn or regstk directive in there. */
2627 if (! TARGET_REG_NAMES)
2629 int inputs = current_frame_info.n_input_regs;
2630 int locals = current_frame_info.n_local_regs;
2631 int outputs = current_frame_info.n_output_regs;
2633 for (i = 0; i < inputs; i++)
2634 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
2635 for (i = 0; i < locals; i++)
2636 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
2637 for (i = 0; i < outputs; i++)
2638 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
2641 /* Set the frame pointer register name. The regnum is logically loc79,
2642 but of course we'll not have allocated that many locals. Rather than
2643 worrying about renumbering the existing rtxs, we adjust the name. */
2644 /* ??? This code means that we can never use one local register when
2645 there is a frame pointer. loc79 gets wasted in this case, as it is
2646 renamed to a register that will never be used. See also the try_locals
2647 code in find_gr_spill. */
2648 if (current_frame_info.reg_fp)
2650 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
2651 reg_names[HARD_FRAME_POINTER_REGNUM]
2652 = reg_names[current_frame_info.reg_fp];
2653 reg_names[current_frame_info.reg_fp] = tmp;
2656 /* We don't need an alloc instruction if we've used no outputs or locals. */
2657 if (current_frame_info.n_local_regs == 0
2658 && current_frame_info.n_output_regs == 0
2659 && current_frame_info.n_input_regs <= current_function_args_info.int_regs
2660 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
2662 /* If there is no alloc, but there are input registers used, then we
2663 need a .regstk directive. */
2664 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
2665 ar_pfs_save_reg = NULL_RTX;
2667 else
2669 current_frame_info.need_regstk = 0;
2671 if (current_frame_info.reg_save_ar_pfs)
2672 regno = current_frame_info.reg_save_ar_pfs;
2673 else
2674 regno = next_scratch_gr_reg ();
2675 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
2677 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
2678 GEN_INT (current_frame_info.n_input_regs),
2679 GEN_INT (current_frame_info.n_local_regs),
2680 GEN_INT (current_frame_info.n_output_regs),
2681 GEN_INT (current_frame_info.n_rotate_regs)));
2682 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_pfs != 0);
2685 /* Set up frame pointer, stack pointer, and spill iterators. */
2687 n_varargs = cfun->machine->n_varargs;
2688 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
2689 stack_pointer_rtx, 0);
2691 if (frame_pointer_needed)
2693 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
2694 RTX_FRAME_RELATED_P (insn) = 1;
2697 if (current_frame_info.total_size != 0)
2699 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
2700 rtx offset;
2702 if (CONST_OK_FOR_I (- current_frame_info.total_size))
2703 offset = frame_size_rtx;
2704 else
2706 regno = next_scratch_gr_reg ();
2707 offset = gen_rtx_REG (DImode, regno);
2708 emit_move_insn (offset, frame_size_rtx);
2711 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
2712 stack_pointer_rtx, offset));
2714 if (! frame_pointer_needed)
2716 RTX_FRAME_RELATED_P (insn) = 1;
2717 if (GET_CODE (offset) != CONST_INT)
2719 REG_NOTES (insn)
2720 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2721 gen_rtx_SET (VOIDmode,
2722 stack_pointer_rtx,
2723 gen_rtx_PLUS (DImode,
2724 stack_pointer_rtx,
2725 frame_size_rtx)),
2726 REG_NOTES (insn));
2730 /* ??? At this point we must generate a magic insn that appears to
2731 modify the stack pointer, the frame pointer, and all spill
2732 iterators. This would allow the most scheduling freedom. For
2733 now, just hard stop. */
2734 emit_insn (gen_blockage ());
2737 /* Must copy out ar.unat before doing any integer spills. */
2738 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2740 if (current_frame_info.reg_save_ar_unat)
2741 ar_unat_save_reg
2742 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
2743 else
2745 alt_regno = next_scratch_gr_reg ();
2746 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
2747 current_frame_info.gr_used_mask |= 1 << alt_regno;
2750 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2751 insn = emit_move_insn (ar_unat_save_reg, reg);
2752 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_unat != 0);
2754 /* Even if we're not going to generate an epilogue, we still
2755 need to save the register so that EH works. */
2756 if (! epilogue_p && current_frame_info.reg_save_ar_unat)
2757 emit_insn (gen_prologue_use (ar_unat_save_reg));
2759 else
2760 ar_unat_save_reg = NULL_RTX;
2762 /* Spill all varargs registers. Do this before spilling any GR registers,
2763 since we want the UNAT bits for the GR registers to override the UNAT
2764 bits from varargs, which we don't care about. */
2766 cfa_off = -16;
2767 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
2769 reg = gen_rtx_REG (DImode, regno);
2770 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
2773 /* Locate the bottom of the register save area. */
2774 cfa_off = (current_frame_info.spill_cfa_off
2775 + current_frame_info.spill_size
2776 + current_frame_info.extra_spill_size);
2778 /* Save the predicate register block either in a register or in memory. */
2779 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
2781 reg = gen_rtx_REG (DImode, PR_REG (0));
2782 if (current_frame_info.reg_save_pr != 0)
2784 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
2785 insn = emit_move_insn (alt_reg, reg);
2787 /* ??? Denote pr spill/fill by a DImode move that modifies all
2788 64 hard registers. */
2789 RTX_FRAME_RELATED_P (insn) = 1;
2790 REG_NOTES (insn)
2791 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2792 gen_rtx_SET (VOIDmode, alt_reg, reg),
2793 REG_NOTES (insn));
2795 /* Even if we're not going to generate an epilogue, we still
2796 need to save the register so that EH works. */
2797 if (! epilogue_p)
2798 emit_insn (gen_prologue_use (alt_reg));
2800 else
2802 alt_regno = next_scratch_gr_reg ();
2803 alt_reg = gen_rtx_REG (DImode, alt_regno);
2804 insn = emit_move_insn (alt_reg, reg);
2805 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2806 cfa_off -= 8;
2810 /* Handle AR regs in numerical order. All of them get special handling. */
2811 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
2812 && current_frame_info.reg_save_ar_unat == 0)
2814 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2815 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
2816 cfa_off -= 8;
2819 /* The alloc insn already copied ar.pfs into a general register. The
2820 only thing we have to do now is copy that register to a stack slot
2821 if we'd not allocated a local register for the job. */
2822 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
2823 && current_frame_info.reg_save_ar_pfs == 0)
2825 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2826 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
2827 cfa_off -= 8;
2830 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
2832 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
2833 if (current_frame_info.reg_save_ar_lc != 0)
2835 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
2836 insn = emit_move_insn (alt_reg, reg);
2837 RTX_FRAME_RELATED_P (insn) = 1;
2839 /* Even if we're not going to generate an epilogue, we still
2840 need to save the register so that EH works. */
2841 if (! epilogue_p)
2842 emit_insn (gen_prologue_use (alt_reg));
2844 else
2846 alt_regno = next_scratch_gr_reg ();
2847 alt_reg = gen_rtx_REG (DImode, alt_regno);
2848 emit_move_insn (alt_reg, reg);
2849 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2850 cfa_off -= 8;
2854 if (current_frame_info.reg_save_gp)
2856 insn = emit_move_insn (gen_rtx_REG (DImode,
2857 current_frame_info.reg_save_gp),
2858 pic_offset_table_rtx);
2859 /* We don't know for sure yet if this is actually needed, since
2860 we've not split the PIC call patterns. If all of the calls
2861 are indirect, and not followed by any uses of the gp, then
2862 this save is dead. Allow it to go away. */
2863 REG_NOTES (insn)
2864 = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, REG_NOTES (insn));
2867 /* We should now be at the base of the gr/br/fr spill area. */
2868 if (cfa_off != (current_frame_info.spill_cfa_off
2869 + current_frame_info.spill_size))
2870 abort ();
2872 /* Spill all general registers. */
2873 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
2874 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2876 reg = gen_rtx_REG (DImode, regno);
2877 do_spill (gen_gr_spill, reg, cfa_off, reg);
2878 cfa_off -= 8;
2881 /* Handle BR0 specially -- it may be getting stored permanently in
2882 some GR register. */
2883 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
2885 reg = gen_rtx_REG (DImode, BR_REG (0));
2886 if (current_frame_info.reg_save_b0 != 0)
2888 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
2889 insn = emit_move_insn (alt_reg, reg);
2890 RTX_FRAME_RELATED_P (insn) = 1;
2892 /* Even if we're not going to generate an epilogue, we still
2893 need to save the register so that EH works. */
2894 if (! epilogue_p)
2895 emit_insn (gen_prologue_use (alt_reg));
2897 else
2899 alt_regno = next_scratch_gr_reg ();
2900 alt_reg = gen_rtx_REG (DImode, alt_regno);
2901 emit_move_insn (alt_reg, reg);
2902 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2903 cfa_off -= 8;
2907 /* Spill the rest of the BR registers. */
2908 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
2909 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2911 alt_regno = next_scratch_gr_reg ();
2912 alt_reg = gen_rtx_REG (DImode, alt_regno);
2913 reg = gen_rtx_REG (DImode, regno);
2914 emit_move_insn (alt_reg, reg);
2915 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2916 cfa_off -= 8;
2919 /* Align the frame and spill all FR registers. */
2920 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
2921 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2923 if (cfa_off & 15)
2924 abort ();
2925 reg = gen_rtx_REG (XFmode, regno);
2926 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
2927 cfa_off -= 16;
2930 if (cfa_off != current_frame_info.spill_cfa_off)
2931 abort ();
2933 finish_spill_pointers ();
2936 /* Called after register allocation to add any instructions needed for the
2937 epilogue. Using an epilogue insn is favored compared to putting all of the
2938 instructions in output_function_prologue(), since it allows the scheduler
2939 to intermix instructions with the saves of the caller saved registers. In
2940 some cases, it might be necessary to emit a barrier instruction as the last
2941 insn to prevent such scheduling. */
2943 void
2944 ia64_expand_epilogue (int sibcall_p)
2946 rtx insn, reg, alt_reg, ar_unat_save_reg;
2947 int regno, alt_regno, cfa_off;
2949 ia64_compute_frame_size (get_frame_size ());
2951 /* If there is a frame pointer, then we use it instead of the stack
2952 pointer, so that the stack pointer does not need to be valid when
2953 the epilogue starts. See EXIT_IGNORE_STACK. */
2954 if (frame_pointer_needed)
2955 setup_spill_pointers (current_frame_info.n_spilled,
2956 hard_frame_pointer_rtx, 0);
2957 else
2958 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
2959 current_frame_info.total_size);
2961 if (current_frame_info.total_size != 0)
2963 /* ??? At this point we must generate a magic insn that appears to
2964 modify the spill iterators and the frame pointer. This would
2965 allow the most scheduling freedom. For now, just hard stop. */
2966 emit_insn (gen_blockage ());
2969 /* Locate the bottom of the register save area. */
2970 cfa_off = (current_frame_info.spill_cfa_off
2971 + current_frame_info.spill_size
2972 + current_frame_info.extra_spill_size);
2974 /* Restore the predicate registers. */
2975 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
2977 if (current_frame_info.reg_save_pr != 0)
2978 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
2979 else
2981 alt_regno = next_scratch_gr_reg ();
2982 alt_reg = gen_rtx_REG (DImode, alt_regno);
2983 do_restore (gen_movdi_x, alt_reg, cfa_off);
2984 cfa_off -= 8;
2986 reg = gen_rtx_REG (DImode, PR_REG (0));
2987 emit_move_insn (reg, alt_reg);
2990 /* Restore the application registers. */
2992 /* Load the saved unat from the stack, but do not restore it until
2993 after the GRs have been restored. */
2994 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2996 if (current_frame_info.reg_save_ar_unat != 0)
2997 ar_unat_save_reg
2998 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
2999 else
3001 alt_regno = next_scratch_gr_reg ();
3002 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3003 current_frame_info.gr_used_mask |= 1 << alt_regno;
3004 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
3005 cfa_off -= 8;
3008 else
3009 ar_unat_save_reg = NULL_RTX;
3011 if (current_frame_info.reg_save_ar_pfs != 0)
3013 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_pfs);
3014 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3015 emit_move_insn (reg, alt_reg);
3017 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3019 alt_regno = next_scratch_gr_reg ();
3020 alt_reg = gen_rtx_REG (DImode, alt_regno);
3021 do_restore (gen_movdi_x, alt_reg, cfa_off);
3022 cfa_off -= 8;
3023 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3024 emit_move_insn (reg, alt_reg);
3027 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3029 if (current_frame_info.reg_save_ar_lc != 0)
3030 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
3031 else
3033 alt_regno = next_scratch_gr_reg ();
3034 alt_reg = gen_rtx_REG (DImode, alt_regno);
3035 do_restore (gen_movdi_x, alt_reg, cfa_off);
3036 cfa_off -= 8;
3038 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3039 emit_move_insn (reg, alt_reg);
3042 /* We should now be at the base of the gr/br/fr spill area. */
3043 if (cfa_off != (current_frame_info.spill_cfa_off
3044 + current_frame_info.spill_size))
3045 abort ();
3047 /* The GP may be stored on the stack in the prologue, but it's
3048 never restored in the epilogue. Skip the stack slot. */
3049 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
3050 cfa_off -= 8;
3052 /* Restore all general registers. */
3053 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
3054 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3056 reg = gen_rtx_REG (DImode, regno);
3057 do_restore (gen_gr_restore, reg, cfa_off);
3058 cfa_off -= 8;
3061 /* Restore the branch registers. Handle B0 specially, as it may
3062 have gotten stored in some GR register. */
3063 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3065 if (current_frame_info.reg_save_b0 != 0)
3066 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
3067 else
3069 alt_regno = next_scratch_gr_reg ();
3070 alt_reg = gen_rtx_REG (DImode, alt_regno);
3071 do_restore (gen_movdi_x, alt_reg, cfa_off);
3072 cfa_off -= 8;
3074 reg = gen_rtx_REG (DImode, BR_REG (0));
3075 emit_move_insn (reg, alt_reg);
3078 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3079 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3081 alt_regno = next_scratch_gr_reg ();
3082 alt_reg = gen_rtx_REG (DImode, alt_regno);
3083 do_restore (gen_movdi_x, alt_reg, cfa_off);
3084 cfa_off -= 8;
3085 reg = gen_rtx_REG (DImode, regno);
3086 emit_move_insn (reg, alt_reg);
3089 /* Restore floating point registers. */
3090 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3091 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3093 if (cfa_off & 15)
3094 abort ();
3095 reg = gen_rtx_REG (XFmode, regno);
3096 do_restore (gen_fr_restore_x, reg, cfa_off);
3097 cfa_off -= 16;
3100 /* Restore ar.unat for real. */
3101 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3103 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3104 emit_move_insn (reg, ar_unat_save_reg);
3107 if (cfa_off != current_frame_info.spill_cfa_off)
3108 abort ();
3110 finish_spill_pointers ();
3112 if (current_frame_info.total_size || cfun->machine->ia64_eh_epilogue_sp)
3114 /* ??? At this point we must generate a magic insn that appears to
3115 modify the spill iterators, the stack pointer, and the frame
3116 pointer. This would allow the most scheduling freedom. For now,
3117 just hard stop. */
3118 emit_insn (gen_blockage ());
3121 if (cfun->machine->ia64_eh_epilogue_sp)
3122 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
3123 else if (frame_pointer_needed)
3125 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
3126 RTX_FRAME_RELATED_P (insn) = 1;
3128 else if (current_frame_info.total_size)
3130 rtx offset, frame_size_rtx;
3132 frame_size_rtx = GEN_INT (current_frame_info.total_size);
3133 if (CONST_OK_FOR_I (current_frame_info.total_size))
3134 offset = frame_size_rtx;
3135 else
3137 regno = next_scratch_gr_reg ();
3138 offset = gen_rtx_REG (DImode, regno);
3139 emit_move_insn (offset, frame_size_rtx);
3142 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
3143 offset));
3145 RTX_FRAME_RELATED_P (insn) = 1;
3146 if (GET_CODE (offset) != CONST_INT)
3148 REG_NOTES (insn)
3149 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3150 gen_rtx_SET (VOIDmode,
3151 stack_pointer_rtx,
3152 gen_rtx_PLUS (DImode,
3153 stack_pointer_rtx,
3154 frame_size_rtx)),
3155 REG_NOTES (insn));
3159 if (cfun->machine->ia64_eh_epilogue_bsp)
3160 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
3162 if (! sibcall_p)
3163 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
3164 else
3166 int fp = GR_REG (2);
3167 /* We need a throw away register here, r0 and r1 are reserved, so r2 is the
3168 first available call clobbered register. If there was a frame_pointer
3169 register, we may have swapped the names of r2 and HARD_FRAME_POINTER_REGNUM,
3170 so we have to make sure we're using the string "r2" when emitting
3171 the register name for the assembler. */
3172 if (current_frame_info.reg_fp && current_frame_info.reg_fp == GR_REG (2))
3173 fp = HARD_FRAME_POINTER_REGNUM;
3175 /* We must emit an alloc to force the input registers to become output
3176 registers. Otherwise, if the callee tries to pass its parameters
3177 through to another call without an intervening alloc, then these
3178 values get lost. */
3179 /* ??? We don't need to preserve all input registers. We only need to
3180 preserve those input registers used as arguments to the sibling call.
3181 It is unclear how to compute that number here. */
3182 if (current_frame_info.n_input_regs != 0)
3183 emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
3184 const0_rtx, const0_rtx,
3185 GEN_INT (current_frame_info.n_input_regs),
3186 const0_rtx));
3190 /* Return 1 if br.ret can do all the work required to return from a
3191 function. */
3194 ia64_direct_return (void)
3196 if (reload_completed && ! frame_pointer_needed)
3198 ia64_compute_frame_size (get_frame_size ());
3200 return (current_frame_info.total_size == 0
3201 && current_frame_info.n_spilled == 0
3202 && current_frame_info.reg_save_b0 == 0
3203 && current_frame_info.reg_save_pr == 0
3204 && current_frame_info.reg_save_ar_pfs == 0
3205 && current_frame_info.reg_save_ar_unat == 0
3206 && current_frame_info.reg_save_ar_lc == 0);
3208 return 0;
3211 /* Return the magic cookie that we use to hold the return address
3212 during early compilation. */
3215 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
3217 if (count != 0)
3218 return NULL;
3219 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
3222 /* Split this value after reload, now that we know where the return
3223 address is saved. */
3225 void
3226 ia64_split_return_addr_rtx (rtx dest)
3228 rtx src;
3230 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3232 if (current_frame_info.reg_save_b0 != 0)
3233 src = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
3234 else
3236 HOST_WIDE_INT off;
3237 unsigned int regno;
3239 /* Compute offset from CFA for BR0. */
3240 /* ??? Must be kept in sync with ia64_expand_prologue. */
3241 off = (current_frame_info.spill_cfa_off
3242 + current_frame_info.spill_size);
3243 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3244 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3245 off -= 8;
3247 /* Convert CFA offset to a register based offset. */
3248 if (frame_pointer_needed)
3249 src = hard_frame_pointer_rtx;
3250 else
3252 src = stack_pointer_rtx;
3253 off += current_frame_info.total_size;
3256 /* Load address into scratch register. */
3257 if (CONST_OK_FOR_I (off))
3258 emit_insn (gen_adddi3 (dest, src, GEN_INT (off)));
3259 else
3261 emit_move_insn (dest, GEN_INT (off));
3262 emit_insn (gen_adddi3 (dest, src, dest));
3265 src = gen_rtx_MEM (Pmode, dest);
3268 else
3269 src = gen_rtx_REG (DImode, BR_REG (0));
3271 emit_move_insn (dest, src);
3275 ia64_hard_regno_rename_ok (int from, int to)
3277 /* Don't clobber any of the registers we reserved for the prologue. */
3278 if (to == current_frame_info.reg_fp
3279 || to == current_frame_info.reg_save_b0
3280 || to == current_frame_info.reg_save_pr
3281 || to == current_frame_info.reg_save_ar_pfs
3282 || to == current_frame_info.reg_save_ar_unat
3283 || to == current_frame_info.reg_save_ar_lc)
3284 return 0;
3286 if (from == current_frame_info.reg_fp
3287 || from == current_frame_info.reg_save_b0
3288 || from == current_frame_info.reg_save_pr
3289 || from == current_frame_info.reg_save_ar_pfs
3290 || from == current_frame_info.reg_save_ar_unat
3291 || from == current_frame_info.reg_save_ar_lc)
3292 return 0;
3294 /* Don't use output registers outside the register frame. */
3295 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
3296 return 0;
3298 /* Retain even/oddness on predicate register pairs. */
3299 if (PR_REGNO_P (from) && PR_REGNO_P (to))
3300 return (from & 1) == (to & 1);
3302 return 1;
3305 /* Target hook for assembling integer objects. Handle word-sized
3306 aligned objects and detect the cases when @fptr is needed. */
3308 static bool
3309 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
3311 if (size == POINTER_SIZE / BITS_PER_UNIT
3312 && aligned_p
3313 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
3314 && GET_CODE (x) == SYMBOL_REF
3315 && SYMBOL_REF_FUNCTION_P (x))
3317 if (POINTER_SIZE == 32)
3318 fputs ("\tdata4\t@fptr(", asm_out_file);
3319 else
3320 fputs ("\tdata8\t@fptr(", asm_out_file);
3321 output_addr_const (asm_out_file, x);
3322 fputs (")\n", asm_out_file);
3323 return true;
3325 return default_assemble_integer (x, size, aligned_p);
3328 /* Emit the function prologue. */
3330 static void
3331 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3333 int mask, grsave, grsave_prev;
3335 if (current_frame_info.need_regstk)
3336 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
3337 current_frame_info.n_input_regs,
3338 current_frame_info.n_local_regs,
3339 current_frame_info.n_output_regs,
3340 current_frame_info.n_rotate_regs);
3342 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3343 return;
3345 /* Emit the .prologue directive. */
3347 mask = 0;
3348 grsave = grsave_prev = 0;
3349 if (current_frame_info.reg_save_b0 != 0)
3351 mask |= 8;
3352 grsave = grsave_prev = current_frame_info.reg_save_b0;
3354 if (current_frame_info.reg_save_ar_pfs != 0
3355 && (grsave_prev == 0
3356 || current_frame_info.reg_save_ar_pfs == grsave_prev + 1))
3358 mask |= 4;
3359 if (grsave_prev == 0)
3360 grsave = current_frame_info.reg_save_ar_pfs;
3361 grsave_prev = current_frame_info.reg_save_ar_pfs;
3363 if (current_frame_info.reg_fp != 0
3364 && (grsave_prev == 0
3365 || current_frame_info.reg_fp == grsave_prev + 1))
3367 mask |= 2;
3368 if (grsave_prev == 0)
3369 grsave = HARD_FRAME_POINTER_REGNUM;
3370 grsave_prev = current_frame_info.reg_fp;
3372 if (current_frame_info.reg_save_pr != 0
3373 && (grsave_prev == 0
3374 || current_frame_info.reg_save_pr == grsave_prev + 1))
3376 mask |= 1;
3377 if (grsave_prev == 0)
3378 grsave = current_frame_info.reg_save_pr;
3381 if (mask && TARGET_GNU_AS)
3382 fprintf (file, "\t.prologue %d, %d\n", mask,
3383 ia64_dbx_register_number (grsave));
3384 else
3385 fputs ("\t.prologue\n", file);
3387 /* Emit a .spill directive, if necessary, to relocate the base of
3388 the register spill area. */
3389 if (current_frame_info.spill_cfa_off != -16)
3390 fprintf (file, "\t.spill %ld\n",
3391 (long) (current_frame_info.spill_cfa_off
3392 + current_frame_info.spill_size));
3395 /* Emit the .body directive at the scheduled end of the prologue. */
3397 static void
3398 ia64_output_function_end_prologue (FILE *file)
3400 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3401 return;
3403 fputs ("\t.body\n", file);
3406 /* Emit the function epilogue. */
3408 static void
3409 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
3410 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3412 int i;
3414 if (current_frame_info.reg_fp)
3416 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3417 reg_names[HARD_FRAME_POINTER_REGNUM]
3418 = reg_names[current_frame_info.reg_fp];
3419 reg_names[current_frame_info.reg_fp] = tmp;
3421 if (! TARGET_REG_NAMES)
3423 for (i = 0; i < current_frame_info.n_input_regs; i++)
3424 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
3425 for (i = 0; i < current_frame_info.n_local_regs; i++)
3426 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
3427 for (i = 0; i < current_frame_info.n_output_regs; i++)
3428 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
3431 current_frame_info.initialized = 0;
3435 ia64_dbx_register_number (int regno)
3437 /* In ia64_expand_prologue we quite literally renamed the frame pointer
3438 from its home at loc79 to something inside the register frame. We
3439 must perform the same renumbering here for the debug info. */
3440 if (current_frame_info.reg_fp)
3442 if (regno == HARD_FRAME_POINTER_REGNUM)
3443 regno = current_frame_info.reg_fp;
3444 else if (regno == current_frame_info.reg_fp)
3445 regno = HARD_FRAME_POINTER_REGNUM;
3448 if (IN_REGNO_P (regno))
3449 return 32 + regno - IN_REG (0);
3450 else if (LOC_REGNO_P (regno))
3451 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
3452 else if (OUT_REGNO_P (regno))
3453 return (32 + current_frame_info.n_input_regs
3454 + current_frame_info.n_local_regs + regno - OUT_REG (0));
3455 else
3456 return regno;
3459 void
3460 ia64_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
3462 rtx addr_reg, eight = GEN_INT (8);
3464 /* The Intel assembler requires that the global __ia64_trampoline symbol
3465 be declared explicitly */
3466 if (!TARGET_GNU_AS)
3468 static bool declared_ia64_trampoline = false;
3470 if (!declared_ia64_trampoline)
3472 declared_ia64_trampoline = true;
3473 (*targetm.asm_out.globalize_label) (asm_out_file,
3474 "__ia64_trampoline");
3478 /* Load up our iterator. */
3479 addr_reg = gen_reg_rtx (Pmode);
3480 emit_move_insn (addr_reg, addr);
3482 /* The first two words are the fake descriptor:
3483 __ia64_trampoline, ADDR+16. */
3484 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3485 gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline"));
3486 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3488 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3489 copy_to_reg (plus_constant (addr, 16)));
3490 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3492 /* The third word is the target descriptor. */
3493 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), fnaddr);
3494 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3496 /* The fourth word is the static chain. */
3497 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), static_chain);
3500 /* Do any needed setup for a variadic function. CUM has not been updated
3501 for the last named argument which has type TYPE and mode MODE.
3503 We generate the actual spill instructions during prologue generation. */
3505 static void
3506 ia64_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3507 tree type, int * pretend_size,
3508 int second_time ATTRIBUTE_UNUSED)
3510 CUMULATIVE_ARGS next_cum = *cum;
3512 /* Skip the current argument. */
3513 ia64_function_arg_advance (&next_cum, mode, type, 1);
3515 if (next_cum.words < MAX_ARGUMENT_SLOTS)
3517 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
3518 *pretend_size = n * UNITS_PER_WORD;
3519 cfun->machine->n_varargs = n;
3523 /* Check whether TYPE is a homogeneous floating point aggregate. If
3524 it is, return the mode of the floating point type that appears
3525 in all leafs. If it is not, return VOIDmode.
3527 An aggregate is a homogeneous floating point aggregate is if all
3528 fields/elements in it have the same floating point type (e.g,
3529 SFmode). 128-bit quad-precision floats are excluded. */
3531 static enum machine_mode
3532 hfa_element_mode (tree type, int nested)
3534 enum machine_mode element_mode = VOIDmode;
3535 enum machine_mode mode;
3536 enum tree_code code = TREE_CODE (type);
3537 int know_element_mode = 0;
3538 tree t;
3540 switch (code)
3542 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
3543 case BOOLEAN_TYPE: case CHAR_TYPE: case POINTER_TYPE:
3544 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
3545 case FILE_TYPE: case SET_TYPE: case LANG_TYPE:
3546 case FUNCTION_TYPE:
3547 return VOIDmode;
3549 /* Fortran complex types are supposed to be HFAs, so we need to handle
3550 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
3551 types though. */
3552 case COMPLEX_TYPE:
3553 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
3554 && TYPE_MODE (type) != TCmode)
3555 return GET_MODE_INNER (TYPE_MODE (type));
3556 else
3557 return VOIDmode;
3559 case REAL_TYPE:
3560 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
3561 mode if this is contained within an aggregate. */
3562 if (nested && TYPE_MODE (type) != TFmode)
3563 return TYPE_MODE (type);
3564 else
3565 return VOIDmode;
3567 case ARRAY_TYPE:
3568 return hfa_element_mode (TREE_TYPE (type), 1);
3570 case RECORD_TYPE:
3571 case UNION_TYPE:
3572 case QUAL_UNION_TYPE:
3573 for (t = TYPE_FIELDS (type); t; t = TREE_CHAIN (t))
3575 if (TREE_CODE (t) != FIELD_DECL)
3576 continue;
3578 mode = hfa_element_mode (TREE_TYPE (t), 1);
3579 if (know_element_mode)
3581 if (mode != element_mode)
3582 return VOIDmode;
3584 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
3585 return VOIDmode;
3586 else
3588 know_element_mode = 1;
3589 element_mode = mode;
3592 return element_mode;
3594 default:
3595 /* If we reach here, we probably have some front-end specific type
3596 that the backend doesn't know about. This can happen via the
3597 aggregate_value_p call in init_function_start. All we can do is
3598 ignore unknown tree types. */
3599 return VOIDmode;
3602 return VOIDmode;
3605 /* Return the number of words required to hold a quantity of TYPE and MODE
3606 when passed as an argument. */
3607 static int
3608 ia64_function_arg_words (tree type, enum machine_mode mode)
3610 int words;
3612 if (mode == BLKmode)
3613 words = int_size_in_bytes (type);
3614 else
3615 words = GET_MODE_SIZE (mode);
3617 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
3620 /* Return the number of registers that should be skipped so the current
3621 argument (described by TYPE and WORDS) will be properly aligned.
3623 Integer and float arguments larger than 8 bytes start at the next
3624 even boundary. Aggregates larger than 8 bytes start at the next
3625 even boundary if the aggregate has 16 byte alignment. Note that
3626 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
3627 but are still to be aligned in registers.
3629 ??? The ABI does not specify how to handle aggregates with
3630 alignment from 9 to 15 bytes, or greater than 16. We handle them
3631 all as if they had 16 byte alignment. Such aggregates can occur
3632 only if gcc extensions are used. */
3633 static int
3634 ia64_function_arg_offset (CUMULATIVE_ARGS *cum, tree type, int words)
3636 if ((cum->words & 1) == 0)
3637 return 0;
3639 if (type
3640 && TREE_CODE (type) != INTEGER_TYPE
3641 && TREE_CODE (type) != REAL_TYPE)
3642 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
3643 else
3644 return words > 1;
3647 /* Return rtx for register where argument is passed, or zero if it is passed
3648 on the stack. */
3649 /* ??? 128-bit quad-precision floats are always passed in general
3650 registers. */
3653 ia64_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
3654 int named, int incoming)
3656 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
3657 int words = ia64_function_arg_words (type, mode);
3658 int offset = ia64_function_arg_offset (cum, type, words);
3659 enum machine_mode hfa_mode = VOIDmode;
3661 /* If all argument slots are used, then it must go on the stack. */
3662 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
3663 return 0;
3665 /* Check for and handle homogeneous FP aggregates. */
3666 if (type)
3667 hfa_mode = hfa_element_mode (type, 0);
3669 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
3670 and unprototyped hfas are passed specially. */
3671 if (hfa_mode != VOIDmode && (! cum->prototype || named))
3673 rtx loc[16];
3674 int i = 0;
3675 int fp_regs = cum->fp_regs;
3676 int int_regs = cum->words + offset;
3677 int hfa_size = GET_MODE_SIZE (hfa_mode);
3678 int byte_size;
3679 int args_byte_size;
3681 /* If prototyped, pass it in FR regs then GR regs.
3682 If not prototyped, pass it in both FR and GR regs.
3684 If this is an SFmode aggregate, then it is possible to run out of
3685 FR regs while GR regs are still left. In that case, we pass the
3686 remaining part in the GR regs. */
3688 /* Fill the FP regs. We do this always. We stop if we reach the end
3689 of the argument, the last FP register, or the last argument slot. */
3691 byte_size = ((mode == BLKmode)
3692 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3693 args_byte_size = int_regs * UNITS_PER_WORD;
3694 offset = 0;
3695 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
3696 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
3698 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3699 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
3700 + fp_regs)),
3701 GEN_INT (offset));
3702 offset += hfa_size;
3703 args_byte_size += hfa_size;
3704 fp_regs++;
3707 /* If no prototype, then the whole thing must go in GR regs. */
3708 if (! cum->prototype)
3709 offset = 0;
3710 /* If this is an SFmode aggregate, then we might have some left over
3711 that needs to go in GR regs. */
3712 else if (byte_size != offset)
3713 int_regs += offset / UNITS_PER_WORD;
3715 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
3717 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
3719 enum machine_mode gr_mode = DImode;
3720 unsigned int gr_size;
3722 /* If we have an odd 4 byte hunk because we ran out of FR regs,
3723 then this goes in a GR reg left adjusted/little endian, right
3724 adjusted/big endian. */
3725 /* ??? Currently this is handled wrong, because 4-byte hunks are
3726 always right adjusted/little endian. */
3727 if (offset & 0x4)
3728 gr_mode = SImode;
3729 /* If we have an even 4 byte hunk because the aggregate is a
3730 multiple of 4 bytes in size, then this goes in a GR reg right
3731 adjusted/little endian. */
3732 else if (byte_size - offset == 4)
3733 gr_mode = SImode;
3735 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3736 gen_rtx_REG (gr_mode, (basereg
3737 + int_regs)),
3738 GEN_INT (offset));
3740 gr_size = GET_MODE_SIZE (gr_mode);
3741 offset += gr_size;
3742 if (gr_size == UNITS_PER_WORD
3743 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
3744 int_regs++;
3745 else if (gr_size > UNITS_PER_WORD)
3746 int_regs += gr_size / UNITS_PER_WORD;
3749 /* If we ended up using just one location, just return that one loc, but
3750 change the mode back to the argument mode. */
3751 if (i == 1)
3752 return gen_rtx_REG (mode, REGNO (XEXP (loc[0], 0)));
3753 else
3754 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
3757 /* Integral and aggregates go in general registers. If we have run out of
3758 FR registers, then FP values must also go in general registers. This can
3759 happen when we have a SFmode HFA. */
3760 else if (mode == TFmode || mode == TCmode
3761 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
3763 int byte_size = ((mode == BLKmode)
3764 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3765 if (BYTES_BIG_ENDIAN
3766 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
3767 && byte_size < UNITS_PER_WORD
3768 && byte_size > 0)
3770 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
3771 gen_rtx_REG (DImode,
3772 (basereg + cum->words
3773 + offset)),
3774 const0_rtx);
3775 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
3777 else
3778 return gen_rtx_REG (mode, basereg + cum->words + offset);
3782 /* If there is a prototype, then FP values go in a FR register when
3783 named, and in a GR register when unnamed. */
3784 else if (cum->prototype)
3786 if (named)
3787 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
3788 /* In big-endian mode, an anonymous SFmode value must be represented
3789 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
3790 the value into the high half of the general register. */
3791 else if (BYTES_BIG_ENDIAN && mode == SFmode)
3792 return gen_rtx_PARALLEL (mode,
3793 gen_rtvec (1,
3794 gen_rtx_EXPR_LIST (VOIDmode,
3795 gen_rtx_REG (DImode, basereg + cum->words + offset),
3796 const0_rtx)));
3797 else
3798 return gen_rtx_REG (mode, basereg + cum->words + offset);
3800 /* If there is no prototype, then FP values go in both FR and GR
3801 registers. */
3802 else
3804 /* See comment above. */
3805 enum machine_mode inner_mode =
3806 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
3808 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
3809 gen_rtx_REG (mode, (FR_ARG_FIRST
3810 + cum->fp_regs)),
3811 const0_rtx);
3812 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
3813 gen_rtx_REG (inner_mode,
3814 (basereg + cum->words
3815 + offset)),
3816 const0_rtx);
3818 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
3822 /* Return number of words, at the beginning of the argument, that must be
3823 put in registers. 0 is the argument is entirely in registers or entirely
3824 in memory. */
3827 ia64_function_arg_partial_nregs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3828 tree type, int named ATTRIBUTE_UNUSED)
3830 int words = ia64_function_arg_words (type, mode);
3831 int offset = ia64_function_arg_offset (cum, type, words);
3833 /* If all argument slots are used, then it must go on the stack. */
3834 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
3835 return 0;
3837 /* It doesn't matter whether the argument goes in FR or GR regs. If
3838 it fits within the 8 argument slots, then it goes entirely in
3839 registers. If it extends past the last argument slot, then the rest
3840 goes on the stack. */
3842 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
3843 return 0;
3845 return MAX_ARGUMENT_SLOTS - cum->words - offset;
3848 /* Update CUM to point after this argument. This is patterned after
3849 ia64_function_arg. */
3851 void
3852 ia64_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3853 tree type, int named)
3855 int words = ia64_function_arg_words (type, mode);
3856 int offset = ia64_function_arg_offset (cum, type, words);
3857 enum machine_mode hfa_mode = VOIDmode;
3859 /* If all arg slots are already full, then there is nothing to do. */
3860 if (cum->words >= MAX_ARGUMENT_SLOTS)
3861 return;
3863 cum->words += words + offset;
3865 /* Check for and handle homogeneous FP aggregates. */
3866 if (type)
3867 hfa_mode = hfa_element_mode (type, 0);
3869 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
3870 and unprototyped hfas are passed specially. */
3871 if (hfa_mode != VOIDmode && (! cum->prototype || named))
3873 int fp_regs = cum->fp_regs;
3874 /* This is the original value of cum->words + offset. */
3875 int int_regs = cum->words - words;
3876 int hfa_size = GET_MODE_SIZE (hfa_mode);
3877 int byte_size;
3878 int args_byte_size;
3880 /* If prototyped, pass it in FR regs then GR regs.
3881 If not prototyped, pass it in both FR and GR regs.
3883 If this is an SFmode aggregate, then it is possible to run out of
3884 FR regs while GR regs are still left. In that case, we pass the
3885 remaining part in the GR regs. */
3887 /* Fill the FP regs. We do this always. We stop if we reach the end
3888 of the argument, the last FP register, or the last argument slot. */
3890 byte_size = ((mode == BLKmode)
3891 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3892 args_byte_size = int_regs * UNITS_PER_WORD;
3893 offset = 0;
3894 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
3895 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
3897 offset += hfa_size;
3898 args_byte_size += hfa_size;
3899 fp_regs++;
3902 cum->fp_regs = fp_regs;
3905 /* Integral and aggregates go in general registers. If we have run out of
3906 FR registers, then FP values must also go in general registers. This can
3907 happen when we have a SFmode HFA. */
3908 else if (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS)
3909 cum->int_regs = cum->words;
3911 /* If there is a prototype, then FP values go in a FR register when
3912 named, and in a GR register when unnamed. */
3913 else if (cum->prototype)
3915 if (! named)
3916 cum->int_regs = cum->words;
3917 else
3918 /* ??? Complex types should not reach here. */
3919 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
3921 /* If there is no prototype, then FP values go in both FR and GR
3922 registers. */
3923 else
3925 /* ??? Complex types should not reach here. */
3926 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
3927 cum->int_regs = cum->words;
3931 /* Variable sized types are passed by reference. */
3932 /* ??? At present this is a GCC extension to the IA-64 ABI. */
3935 ia64_function_arg_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
3936 enum machine_mode mode ATTRIBUTE_UNUSED,
3937 tree type, int named ATTRIBUTE_UNUSED)
3939 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
3942 /* True if it is OK to do sibling call optimization for the specified
3943 call expression EXP. DECL will be the called function, or NULL if
3944 this is an indirect call. */
3945 static bool
3946 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
3948 /* We must always return with our current GP. This means we can
3949 only sibcall to functions defined in the current module. */
3950 return decl && (*targetm.binds_local_p) (decl);
3954 /* Implement va_arg. */
3957 ia64_va_arg (tree valist, tree type)
3959 tree t;
3961 /* Variable sized types are passed by reference. */
3962 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
3964 rtx addr = force_reg (ptr_mode,
3965 std_expand_builtin_va_arg (valist, build_pointer_type (type)));
3966 #ifdef POINTERS_EXTEND_UNSIGNED
3967 addr = convert_memory_address (Pmode, addr);
3968 #endif
3969 return gen_rtx_MEM (ptr_mode, addr);
3972 /* Aggregate arguments with alignment larger than 8 bytes start at
3973 the next even boundary. Integer and floating point arguments
3974 do so if they are larger than 8 bytes, whether or not they are
3975 also aligned larger than 8 bytes. */
3976 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
3977 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
3979 t = build (PLUS_EXPR, TREE_TYPE (valist), valist,
3980 build_int_2 (2 * UNITS_PER_WORD - 1, 0));
3981 t = build (BIT_AND_EXPR, TREE_TYPE (t), t,
3982 build_int_2 (-2 * UNITS_PER_WORD, -1));
3983 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
3984 TREE_SIDE_EFFECTS (t) = 1;
3985 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3988 return std_expand_builtin_va_arg (valist, type);
3991 /* Return 1 if function return value returned in memory. Return 0 if it is
3992 in a register. */
3994 static bool
3995 ia64_return_in_memory (tree valtype, tree fntype ATTRIBUTE_UNUSED)
3997 enum machine_mode mode;
3998 enum machine_mode hfa_mode;
3999 HOST_WIDE_INT byte_size;
4001 mode = TYPE_MODE (valtype);
4002 byte_size = GET_MODE_SIZE (mode);
4003 if (mode == BLKmode)
4005 byte_size = int_size_in_bytes (valtype);
4006 if (byte_size < 0)
4007 return true;
4010 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
4012 hfa_mode = hfa_element_mode (valtype, 0);
4013 if (hfa_mode != VOIDmode)
4015 int hfa_size = GET_MODE_SIZE (hfa_mode);
4017 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
4018 return true;
4019 else
4020 return false;
4022 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
4023 return true;
4024 else
4025 return false;
4028 /* Return rtx for register that holds the function return value. */
4031 ia64_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
4033 enum machine_mode mode;
4034 enum machine_mode hfa_mode;
4036 mode = TYPE_MODE (valtype);
4037 hfa_mode = hfa_element_mode (valtype, 0);
4039 if (hfa_mode != VOIDmode)
4041 rtx loc[8];
4042 int i;
4043 int hfa_size;
4044 int byte_size;
4045 int offset;
4047 hfa_size = GET_MODE_SIZE (hfa_mode);
4048 byte_size = ((mode == BLKmode)
4049 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
4050 offset = 0;
4051 for (i = 0; offset < byte_size; i++)
4053 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4054 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
4055 GEN_INT (offset));
4056 offset += hfa_size;
4059 if (i == 1)
4060 return XEXP (loc[0], 0);
4061 else
4062 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4064 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
4065 return gen_rtx_REG (mode, FR_ARG_FIRST);
4066 else
4068 if (BYTES_BIG_ENDIAN
4069 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
4071 rtx loc[8];
4072 int offset;
4073 int bytesize;
4074 int i;
4076 offset = 0;
4077 bytesize = int_size_in_bytes (valtype);
4078 for (i = 0; offset < bytesize; i++)
4080 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4081 gen_rtx_REG (DImode,
4082 GR_RET_FIRST + i),
4083 GEN_INT (offset));
4084 offset += UNITS_PER_WORD;
4086 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4088 else
4089 return gen_rtx_REG (mode, GR_RET_FIRST);
4093 /* This is called from dwarf2out.c via ASM_OUTPUT_DWARF_DTPREL.
4094 We need to emit DTP-relative relocations. */
4096 void
4097 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
4099 if (size != 8)
4100 abort ();
4101 fputs ("\tdata8.ua\t@dtprel(", file);
4102 output_addr_const (file, x);
4103 fputs (")", file);
4106 /* Print a memory address as an operand to reference that memory location. */
4108 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
4109 also call this from ia64_print_operand for memory addresses. */
4111 void
4112 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
4113 rtx address ATTRIBUTE_UNUSED)
4117 /* Print an operand to an assembler instruction.
4118 C Swap and print a comparison operator.
4119 D Print an FP comparison operator.
4120 E Print 32 - constant, for SImode shifts as extract.
4121 e Print 64 - constant, for DImode rotates.
4122 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
4123 a floating point register emitted normally.
4124 I Invert a predicate register by adding 1.
4125 J Select the proper predicate register for a condition.
4126 j Select the inverse predicate register for a condition.
4127 O Append .acq for volatile load.
4128 P Postincrement of a MEM.
4129 Q Append .rel for volatile store.
4130 S Shift amount for shladd instruction.
4131 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
4132 for Intel assembler.
4133 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
4134 for Intel assembler.
4135 r Print register name, or constant 0 as r0. HP compatibility for
4136 Linux kernel. */
4137 void
4138 ia64_print_operand (FILE * file, rtx x, int code)
4140 const char *str;
4142 switch (code)
4144 case 0:
4145 /* Handled below. */
4146 break;
4148 case 'C':
4150 enum rtx_code c = swap_condition (GET_CODE (x));
4151 fputs (GET_RTX_NAME (c), file);
4152 return;
4155 case 'D':
4156 switch (GET_CODE (x))
4158 case NE:
4159 str = "neq";
4160 break;
4161 case UNORDERED:
4162 str = "unord";
4163 break;
4164 case ORDERED:
4165 str = "ord";
4166 break;
4167 default:
4168 str = GET_RTX_NAME (GET_CODE (x));
4169 break;
4171 fputs (str, file);
4172 return;
4174 case 'E':
4175 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
4176 return;
4178 case 'e':
4179 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
4180 return;
4182 case 'F':
4183 if (x == CONST0_RTX (GET_MODE (x)))
4184 str = reg_names [FR_REG (0)];
4185 else if (x == CONST1_RTX (GET_MODE (x)))
4186 str = reg_names [FR_REG (1)];
4187 else if (GET_CODE (x) == REG)
4188 str = reg_names [REGNO (x)];
4189 else
4190 abort ();
4191 fputs (str, file);
4192 return;
4194 case 'I':
4195 fputs (reg_names [REGNO (x) + 1], file);
4196 return;
4198 case 'J':
4199 case 'j':
4201 unsigned int regno = REGNO (XEXP (x, 0));
4202 if (GET_CODE (x) == EQ)
4203 regno += 1;
4204 if (code == 'j')
4205 regno ^= 1;
4206 fputs (reg_names [regno], file);
4208 return;
4210 case 'O':
4211 if (MEM_VOLATILE_P (x))
4212 fputs(".acq", file);
4213 return;
4215 case 'P':
4217 HOST_WIDE_INT value;
4219 switch (GET_CODE (XEXP (x, 0)))
4221 default:
4222 return;
4224 case POST_MODIFY:
4225 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
4226 if (GET_CODE (x) == CONST_INT)
4227 value = INTVAL (x);
4228 else if (GET_CODE (x) == REG)
4230 fprintf (file, ", %s", reg_names[REGNO (x)]);
4231 return;
4233 else
4234 abort ();
4235 break;
4237 case POST_INC:
4238 value = GET_MODE_SIZE (GET_MODE (x));
4239 break;
4241 case POST_DEC:
4242 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
4243 break;
4246 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
4247 return;
4250 case 'Q':
4251 if (MEM_VOLATILE_P (x))
4252 fputs(".rel", file);
4253 return;
4255 case 'S':
4256 fprintf (file, "%d", exact_log2 (INTVAL (x)));
4257 return;
4259 case 'T':
4260 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4262 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
4263 return;
4265 break;
4267 case 'U':
4268 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4270 const char *prefix = "0x";
4271 if (INTVAL (x) & 0x80000000)
4273 fprintf (file, "0xffffffff");
4274 prefix = "";
4276 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
4277 return;
4279 break;
4281 case 'r':
4282 /* If this operand is the constant zero, write it as register zero.
4283 Any register, zero, or CONST_INT value is OK here. */
4284 if (GET_CODE (x) == REG)
4285 fputs (reg_names[REGNO (x)], file);
4286 else if (x == CONST0_RTX (GET_MODE (x)))
4287 fputs ("r0", file);
4288 else if (GET_CODE (x) == CONST_INT)
4289 output_addr_const (file, x);
4290 else
4291 output_operand_lossage ("invalid %%r value");
4292 return;
4294 case '+':
4296 const char *which;
4298 /* For conditional branches, returns or calls, substitute
4299 sptk, dptk, dpnt, or spnt for %s. */
4300 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
4301 if (x)
4303 int pred_val = INTVAL (XEXP (x, 0));
4305 /* Guess top and bottom 10% statically predicted. */
4306 if (pred_val < REG_BR_PROB_BASE / 50)
4307 which = ".spnt";
4308 else if (pred_val < REG_BR_PROB_BASE / 2)
4309 which = ".dpnt";
4310 else if (pred_val < REG_BR_PROB_BASE / 100 * 98)
4311 which = ".dptk";
4312 else
4313 which = ".sptk";
4315 else if (GET_CODE (current_output_insn) == CALL_INSN)
4316 which = ".sptk";
4317 else
4318 which = ".dptk";
4320 fputs (which, file);
4321 return;
4324 case ',':
4325 x = current_insn_predicate;
4326 if (x)
4328 unsigned int regno = REGNO (XEXP (x, 0));
4329 if (GET_CODE (x) == EQ)
4330 regno += 1;
4331 fprintf (file, "(%s) ", reg_names [regno]);
4333 return;
4335 default:
4336 output_operand_lossage ("ia64_print_operand: unknown code");
4337 return;
4340 switch (GET_CODE (x))
4342 /* This happens for the spill/restore instructions. */
4343 case POST_INC:
4344 case POST_DEC:
4345 case POST_MODIFY:
4346 x = XEXP (x, 0);
4347 /* ... fall through ... */
4349 case REG:
4350 fputs (reg_names [REGNO (x)], file);
4351 break;
4353 case MEM:
4355 rtx addr = XEXP (x, 0);
4356 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
4357 addr = XEXP (addr, 0);
4358 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
4359 break;
4362 default:
4363 output_addr_const (file, x);
4364 break;
4367 return;
4370 /* Compute a (partial) cost for rtx X. Return true if the complete
4371 cost has been computed, and false if subexpressions should be
4372 scanned. In either case, *TOTAL contains the cost result. */
4373 /* ??? This is incomplete. */
4375 static bool
4376 ia64_rtx_costs (rtx x, int code, int outer_code, int *total)
4378 switch (code)
4380 case CONST_INT:
4381 switch (outer_code)
4383 case SET:
4384 *total = CONST_OK_FOR_J (INTVAL (x)) ? 0 : COSTS_N_INSNS (1);
4385 return true;
4386 case PLUS:
4387 if (CONST_OK_FOR_I (INTVAL (x)))
4388 *total = 0;
4389 else if (CONST_OK_FOR_J (INTVAL (x)))
4390 *total = 1;
4391 else
4392 *total = COSTS_N_INSNS (1);
4393 return true;
4394 default:
4395 if (CONST_OK_FOR_K (INTVAL (x)) || CONST_OK_FOR_L (INTVAL (x)))
4396 *total = 0;
4397 else
4398 *total = COSTS_N_INSNS (1);
4399 return true;
4402 case CONST_DOUBLE:
4403 *total = COSTS_N_INSNS (1);
4404 return true;
4406 case CONST:
4407 case SYMBOL_REF:
4408 case LABEL_REF:
4409 *total = COSTS_N_INSNS (3);
4410 return true;
4412 case MULT:
4413 /* For multiplies wider than HImode, we have to go to the FPU,
4414 which normally involves copies. Plus there's the latency
4415 of the multiply itself, and the latency of the instructions to
4416 transfer integer regs to FP regs. */
4417 /* ??? Check for FP mode. */
4418 if (GET_MODE_SIZE (GET_MODE (x)) > 2)
4419 *total = COSTS_N_INSNS (10);
4420 else
4421 *total = COSTS_N_INSNS (2);
4422 return true;
4424 case PLUS:
4425 case MINUS:
4426 case ASHIFT:
4427 case ASHIFTRT:
4428 case LSHIFTRT:
4429 *total = COSTS_N_INSNS (1);
4430 return true;
4432 case DIV:
4433 case UDIV:
4434 case MOD:
4435 case UMOD:
4436 /* We make divide expensive, so that divide-by-constant will be
4437 optimized to a multiply. */
4438 *total = COSTS_N_INSNS (60);
4439 return true;
4441 default:
4442 return false;
4446 /* Calculate the cost of moving data from a register in class FROM to
4447 one in class TO, using MODE. */
4450 ia64_register_move_cost (enum machine_mode mode, enum reg_class from,
4451 enum reg_class to)
4453 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
4454 if (to == ADDL_REGS)
4455 to = GR_REGS;
4456 if (from == ADDL_REGS)
4457 from = GR_REGS;
4459 /* All costs are symmetric, so reduce cases by putting the
4460 lower number class as the destination. */
4461 if (from < to)
4463 enum reg_class tmp = to;
4464 to = from, from = tmp;
4467 /* Moving from FR<->GR in XFmode must be more expensive than 2,
4468 so that we get secondary memory reloads. Between FR_REGS,
4469 we have to make this at least as expensive as MEMORY_MOVE_COST
4470 to avoid spectacularly poor register class preferencing. */
4471 if (mode == XFmode)
4473 if (to != GR_REGS || from != GR_REGS)
4474 return MEMORY_MOVE_COST (mode, to, 0);
4475 else
4476 return 3;
4479 switch (to)
4481 case PR_REGS:
4482 /* Moving between PR registers takes two insns. */
4483 if (from == PR_REGS)
4484 return 3;
4485 /* Moving between PR and anything but GR is impossible. */
4486 if (from != GR_REGS)
4487 return MEMORY_MOVE_COST (mode, to, 0);
4488 break;
4490 case BR_REGS:
4491 /* Moving between BR and anything but GR is impossible. */
4492 if (from != GR_REGS && from != GR_AND_BR_REGS)
4493 return MEMORY_MOVE_COST (mode, to, 0);
4494 break;
4496 case AR_I_REGS:
4497 case AR_M_REGS:
4498 /* Moving between AR and anything but GR is impossible. */
4499 if (from != GR_REGS)
4500 return MEMORY_MOVE_COST (mode, to, 0);
4501 break;
4503 case GR_REGS:
4504 case FR_REGS:
4505 case GR_AND_FR_REGS:
4506 case GR_AND_BR_REGS:
4507 case ALL_REGS:
4508 break;
4510 default:
4511 abort ();
4514 return 2;
4517 /* This function returns the register class required for a secondary
4518 register when copying between one of the registers in CLASS, and X,
4519 using MODE. A return value of NO_REGS means that no secondary register
4520 is required. */
4522 enum reg_class
4523 ia64_secondary_reload_class (enum reg_class class,
4524 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
4526 int regno = -1;
4528 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
4529 regno = true_regnum (x);
4531 switch (class)
4533 case BR_REGS:
4534 case AR_M_REGS:
4535 case AR_I_REGS:
4536 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
4537 interaction. We end up with two pseudos with overlapping lifetimes
4538 both of which are equiv to the same constant, and both which need
4539 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
4540 changes depending on the path length, which means the qty_first_reg
4541 check in make_regs_eqv can give different answers at different times.
4542 At some point I'll probably need a reload_indi pattern to handle
4543 this.
4545 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
4546 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
4547 non-general registers for good measure. */
4548 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
4549 return GR_REGS;
4551 /* This is needed if a pseudo used as a call_operand gets spilled to a
4552 stack slot. */
4553 if (GET_CODE (x) == MEM)
4554 return GR_REGS;
4555 break;
4557 case FR_REGS:
4558 /* Need to go through general registers to get to other class regs. */
4559 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
4560 return GR_REGS;
4562 /* This can happen when a paradoxical subreg is an operand to the
4563 muldi3 pattern. */
4564 /* ??? This shouldn't be necessary after instruction scheduling is
4565 enabled, because paradoxical subregs are not accepted by
4566 register_operand when INSN_SCHEDULING is defined. Or alternatively,
4567 stop the paradoxical subreg stupidity in the *_operand functions
4568 in recog.c. */
4569 if (GET_CODE (x) == MEM
4570 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
4571 || GET_MODE (x) == QImode))
4572 return GR_REGS;
4574 /* This can happen because of the ior/and/etc patterns that accept FP
4575 registers as operands. If the third operand is a constant, then it
4576 needs to be reloaded into a FP register. */
4577 if (GET_CODE (x) == CONST_INT)
4578 return GR_REGS;
4580 /* This can happen because of register elimination in a muldi3 insn.
4581 E.g. `26107 * (unsigned long)&u'. */
4582 if (GET_CODE (x) == PLUS)
4583 return GR_REGS;
4584 break;
4586 case PR_REGS:
4587 /* ??? This happens if we cse/gcse a BImode value across a call,
4588 and the function has a nonlocal goto. This is because global
4589 does not allocate call crossing pseudos to hard registers when
4590 current_function_has_nonlocal_goto is true. This is relatively
4591 common for C++ programs that use exceptions. To reproduce,
4592 return NO_REGS and compile libstdc++. */
4593 if (GET_CODE (x) == MEM)
4594 return GR_REGS;
4596 /* This can happen when we take a BImode subreg of a DImode value,
4597 and that DImode value winds up in some non-GR register. */
4598 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
4599 return GR_REGS;
4600 break;
4602 default:
4603 break;
4606 return NO_REGS;
4610 /* Emit text to declare externally defined variables and functions, because
4611 the Intel assembler does not support undefined externals. */
4613 void
4614 ia64_asm_output_external (FILE *file, tree decl, const char *name)
4616 int save_referenced;
4618 /* GNU as does not need anything here, but the HP linker does need
4619 something for external functions. */
4621 if (TARGET_GNU_AS
4622 && (!TARGET_HPUX_LD
4623 || TREE_CODE (decl) != FUNCTION_DECL
4624 || strstr (name, "__builtin_") == name))
4625 return;
4627 /* ??? The Intel assembler creates a reference that needs to be satisfied by
4628 the linker when we do this, so we need to be careful not to do this for
4629 builtin functions which have no library equivalent. Unfortunately, we
4630 can't tell here whether or not a function will actually be called by
4631 expand_expr, so we pull in library functions even if we may not need
4632 them later. */
4633 if (! strcmp (name, "__builtin_next_arg")
4634 || ! strcmp (name, "alloca")
4635 || ! strcmp (name, "__builtin_constant_p")
4636 || ! strcmp (name, "__builtin_args_info"))
4637 return;
4639 if (TARGET_HPUX_LD)
4640 ia64_hpux_add_extern_decl (decl);
4641 else
4643 /* assemble_name will set TREE_SYMBOL_REFERENCED, so we must save and
4644 restore it. */
4645 save_referenced = TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl));
4646 if (TREE_CODE (decl) == FUNCTION_DECL)
4647 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
4648 (*targetm.asm_out.globalize_label) (file, name);
4649 TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)) = save_referenced;
4653 /* Parse the -mfixed-range= option string. */
4655 static void
4656 fix_range (const char *const_str)
4658 int i, first, last;
4659 char *str, *dash, *comma;
4661 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
4662 REG2 are either register names or register numbers. The effect
4663 of this option is to mark the registers in the range from REG1 to
4664 REG2 as ``fixed'' so they won't be used by the compiler. This is
4665 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
4667 i = strlen (const_str);
4668 str = (char *) alloca (i + 1);
4669 memcpy (str, const_str, i + 1);
4671 while (1)
4673 dash = strchr (str, '-');
4674 if (!dash)
4676 warning ("value of -mfixed-range must have form REG1-REG2");
4677 return;
4679 *dash = '\0';
4681 comma = strchr (dash + 1, ',');
4682 if (comma)
4683 *comma = '\0';
4685 first = decode_reg_name (str);
4686 if (first < 0)
4688 warning ("unknown register name: %s", str);
4689 return;
4692 last = decode_reg_name (dash + 1);
4693 if (last < 0)
4695 warning ("unknown register name: %s", dash + 1);
4696 return;
4699 *dash = '-';
4701 if (first > last)
4703 warning ("%s-%s is an empty range", str, dash + 1);
4704 return;
4707 for (i = first; i <= last; ++i)
4708 fixed_regs[i] = call_used_regs[i] = 1;
4710 if (!comma)
4711 break;
4713 *comma = ',';
4714 str = comma + 1;
4718 static struct machine_function *
4719 ia64_init_machine_status (void)
4721 return ggc_alloc_cleared (sizeof (struct machine_function));
4724 /* Handle TARGET_OPTIONS switches. */
4726 void
4727 ia64_override_options (void)
4729 static struct pta
4731 const char *const name; /* processor name or nickname. */
4732 const enum processor_type processor;
4734 const processor_alias_table[] =
4736 {"itanium", PROCESSOR_ITANIUM},
4737 {"itanium1", PROCESSOR_ITANIUM},
4738 {"merced", PROCESSOR_ITANIUM},
4739 {"itanium2", PROCESSOR_ITANIUM2},
4740 {"mckinley", PROCESSOR_ITANIUM2},
4743 int const pta_size = ARRAY_SIZE (processor_alias_table);
4744 int i;
4746 if (TARGET_AUTO_PIC)
4747 target_flags |= MASK_CONST_GP;
4749 if (TARGET_INLINE_FLOAT_DIV_LAT && TARGET_INLINE_FLOAT_DIV_THR)
4751 if ((target_flags_explicit & MASK_INLINE_FLOAT_DIV_LAT)
4752 && (target_flags_explicit & MASK_INLINE_FLOAT_DIV_THR))
4754 warning ("cannot optimize floating point division for both latency and throughput");
4755 target_flags &= ~MASK_INLINE_FLOAT_DIV_THR;
4757 else
4759 if (target_flags_explicit & MASK_INLINE_FLOAT_DIV_THR)
4760 target_flags &= ~MASK_INLINE_FLOAT_DIV_LAT;
4761 else
4762 target_flags &= ~MASK_INLINE_FLOAT_DIV_THR;
4766 if (TARGET_INLINE_INT_DIV_LAT && TARGET_INLINE_INT_DIV_THR)
4768 if ((target_flags_explicit & MASK_INLINE_INT_DIV_LAT)
4769 && (target_flags_explicit & MASK_INLINE_INT_DIV_THR))
4771 warning ("cannot optimize integer division for both latency and throughput");
4772 target_flags &= ~MASK_INLINE_INT_DIV_THR;
4774 else
4776 if (target_flags_explicit & MASK_INLINE_INT_DIV_THR)
4777 target_flags &= ~MASK_INLINE_INT_DIV_LAT;
4778 else
4779 target_flags &= ~MASK_INLINE_INT_DIV_THR;
4783 if (TARGET_INLINE_SQRT_LAT && TARGET_INLINE_SQRT_THR)
4785 if ((target_flags_explicit & MASK_INLINE_SQRT_LAT)
4786 && (target_flags_explicit & MASK_INLINE_SQRT_THR))
4788 warning ("cannot optimize square root for both latency and throughput");
4789 target_flags &= ~MASK_INLINE_SQRT_THR;
4791 else
4793 if (target_flags_explicit & MASK_INLINE_SQRT_THR)
4794 target_flags &= ~MASK_INLINE_SQRT_LAT;
4795 else
4796 target_flags &= ~MASK_INLINE_SQRT_THR;
4800 if (TARGET_INLINE_SQRT_LAT)
4802 warning ("not yet implemented: latency-optimized inline square root");
4803 target_flags &= ~MASK_INLINE_SQRT_LAT;
4806 if (ia64_fixed_range_string)
4807 fix_range (ia64_fixed_range_string);
4809 if (ia64_tls_size_string)
4811 char *end;
4812 unsigned long tmp = strtoul (ia64_tls_size_string, &end, 10);
4813 if (*end || (tmp != 14 && tmp != 22 && tmp != 64))
4814 error ("bad value (%s) for -mtls-size= switch", ia64_tls_size_string);
4815 else
4816 ia64_tls_size = tmp;
4819 if (!ia64_tune_string)
4820 ia64_tune_string = "itanium2";
4822 for (i = 0; i < pta_size; i++)
4823 if (! strcmp (ia64_tune_string, processor_alias_table[i].name))
4825 ia64_tune = processor_alias_table[i].processor;
4826 break;
4829 if (i == pta_size)
4830 error ("bad value (%s) for -tune= switch", ia64_tune_string);
4832 ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
4833 flag_schedule_insns_after_reload = 0;
4835 /* Variable tracking should be run after all optimizations which change order
4836 of insns. It also needs a valid CFG. */
4837 ia64_flag_var_tracking = flag_var_tracking;
4838 flag_var_tracking = 0;
4840 ia64_section_threshold = g_switch_set ? g_switch_value : IA64_DEFAULT_GVALUE;
4842 init_machine_status = ia64_init_machine_status;
4845 static enum attr_itanium_class ia64_safe_itanium_class (rtx);
4846 static enum attr_type ia64_safe_type (rtx);
4848 static enum attr_itanium_class
4849 ia64_safe_itanium_class (rtx insn)
4851 if (recog_memoized (insn) >= 0)
4852 return get_attr_itanium_class (insn);
4853 else
4854 return ITANIUM_CLASS_UNKNOWN;
4857 static enum attr_type
4858 ia64_safe_type (rtx insn)
4860 if (recog_memoized (insn) >= 0)
4861 return get_attr_type (insn);
4862 else
4863 return TYPE_UNKNOWN;
4866 /* The following collection of routines emit instruction group stop bits as
4867 necessary to avoid dependencies. */
4869 /* Need to track some additional registers as far as serialization is
4870 concerned so we can properly handle br.call and br.ret. We could
4871 make these registers visible to gcc, but since these registers are
4872 never explicitly used in gcc generated code, it seems wasteful to
4873 do so (plus it would make the call and return patterns needlessly
4874 complex). */
4875 #define REG_RP (BR_REG (0))
4876 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
4877 /* This is used for volatile asms which may require a stop bit immediately
4878 before and after them. */
4879 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
4880 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
4881 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
4883 /* For each register, we keep track of how it has been written in the
4884 current instruction group.
4886 If a register is written unconditionally (no qualifying predicate),
4887 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
4889 If a register is written if its qualifying predicate P is true, we
4890 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
4891 may be written again by the complement of P (P^1) and when this happens,
4892 WRITE_COUNT gets set to 2.
4894 The result of this is that whenever an insn attempts to write a register
4895 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
4897 If a predicate register is written by a floating-point insn, we set
4898 WRITTEN_BY_FP to true.
4900 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
4901 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
4903 struct reg_write_state
4905 unsigned int write_count : 2;
4906 unsigned int first_pred : 16;
4907 unsigned int written_by_fp : 1;
4908 unsigned int written_by_and : 1;
4909 unsigned int written_by_or : 1;
4912 /* Cumulative info for the current instruction group. */
4913 struct reg_write_state rws_sum[NUM_REGS];
4914 /* Info for the current instruction. This gets copied to rws_sum after a
4915 stop bit is emitted. */
4916 struct reg_write_state rws_insn[NUM_REGS];
4918 /* Indicates whether this is the first instruction after a stop bit,
4919 in which case we don't need another stop bit. Without this, we hit
4920 the abort in ia64_variable_issue when scheduling an alloc. */
4921 static int first_instruction;
4923 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
4924 RTL for one instruction. */
4925 struct reg_flags
4927 unsigned int is_write : 1; /* Is register being written? */
4928 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
4929 unsigned int is_branch : 1; /* Is register used as part of a branch? */
4930 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
4931 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
4932 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
4935 static void rws_update (struct reg_write_state *, int, struct reg_flags, int);
4936 static int rws_access_regno (int, struct reg_flags, int);
4937 static int rws_access_reg (rtx, struct reg_flags, int);
4938 static void update_set_flags (rtx, struct reg_flags *, int *, rtx *);
4939 static int set_src_needs_barrier (rtx, struct reg_flags, int, rtx);
4940 static int rtx_needs_barrier (rtx, struct reg_flags, int);
4941 static void init_insn_group_barriers (void);
4942 static int group_barrier_needed_p (rtx);
4943 static int safe_group_barrier_needed_p (rtx);
4945 /* Update *RWS for REGNO, which is being written by the current instruction,
4946 with predicate PRED, and associated register flags in FLAGS. */
4948 static void
4949 rws_update (struct reg_write_state *rws, int regno, struct reg_flags flags, int pred)
4951 if (pred)
4952 rws[regno].write_count++;
4953 else
4954 rws[regno].write_count = 2;
4955 rws[regno].written_by_fp |= flags.is_fp;
4956 /* ??? Not tracking and/or across differing predicates. */
4957 rws[regno].written_by_and = flags.is_and;
4958 rws[regno].written_by_or = flags.is_or;
4959 rws[regno].first_pred = pred;
4962 /* Handle an access to register REGNO of type FLAGS using predicate register
4963 PRED. Update rws_insn and rws_sum arrays. Return 1 if this access creates
4964 a dependency with an earlier instruction in the same group. */
4966 static int
4967 rws_access_regno (int regno, struct reg_flags flags, int pred)
4969 int need_barrier = 0;
4971 if (regno >= NUM_REGS)
4972 abort ();
4974 if (! PR_REGNO_P (regno))
4975 flags.is_and = flags.is_or = 0;
4977 if (flags.is_write)
4979 int write_count;
4981 /* One insn writes same reg multiple times? */
4982 if (rws_insn[regno].write_count > 0)
4983 abort ();
4985 /* Update info for current instruction. */
4986 rws_update (rws_insn, regno, flags, pred);
4987 write_count = rws_sum[regno].write_count;
4989 switch (write_count)
4991 case 0:
4992 /* The register has not been written yet. */
4993 rws_update (rws_sum, regno, flags, pred);
4994 break;
4996 case 1:
4997 /* The register has been written via a predicate. If this is
4998 not a complementary predicate, then we need a barrier. */
4999 /* ??? This assumes that P and P+1 are always complementary
5000 predicates for P even. */
5001 if (flags.is_and && rws_sum[regno].written_by_and)
5003 else if (flags.is_or && rws_sum[regno].written_by_or)
5005 else if ((rws_sum[regno].first_pred ^ 1) != pred)
5006 need_barrier = 1;
5007 rws_update (rws_sum, regno, flags, pred);
5008 break;
5010 case 2:
5011 /* The register has been unconditionally written already. We
5012 need a barrier. */
5013 if (flags.is_and && rws_sum[regno].written_by_and)
5015 else if (flags.is_or && rws_sum[regno].written_by_or)
5017 else
5018 need_barrier = 1;
5019 rws_sum[regno].written_by_and = flags.is_and;
5020 rws_sum[regno].written_by_or = flags.is_or;
5021 break;
5023 default:
5024 abort ();
5027 else
5029 if (flags.is_branch)
5031 /* Branches have several RAW exceptions that allow to avoid
5032 barriers. */
5034 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
5035 /* RAW dependencies on branch regs are permissible as long
5036 as the writer is a non-branch instruction. Since we
5037 never generate code that uses a branch register written
5038 by a branch instruction, handling this case is
5039 easy. */
5040 return 0;
5042 if (REGNO_REG_CLASS (regno) == PR_REGS
5043 && ! rws_sum[regno].written_by_fp)
5044 /* The predicates of a branch are available within the
5045 same insn group as long as the predicate was written by
5046 something other than a floating-point instruction. */
5047 return 0;
5050 if (flags.is_and && rws_sum[regno].written_by_and)
5051 return 0;
5052 if (flags.is_or && rws_sum[regno].written_by_or)
5053 return 0;
5055 switch (rws_sum[regno].write_count)
5057 case 0:
5058 /* The register has not been written yet. */
5059 break;
5061 case 1:
5062 /* The register has been written via a predicate. If this is
5063 not a complementary predicate, then we need a barrier. */
5064 /* ??? This assumes that P and P+1 are always complementary
5065 predicates for P even. */
5066 if ((rws_sum[regno].first_pred ^ 1) != pred)
5067 need_barrier = 1;
5068 break;
5070 case 2:
5071 /* The register has been unconditionally written already. We
5072 need a barrier. */
5073 need_barrier = 1;
5074 break;
5076 default:
5077 abort ();
5081 return need_barrier;
5084 static int
5085 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
5087 int regno = REGNO (reg);
5088 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
5090 if (n == 1)
5091 return rws_access_regno (regno, flags, pred);
5092 else
5094 int need_barrier = 0;
5095 while (--n >= 0)
5096 need_barrier |= rws_access_regno (regno + n, flags, pred);
5097 return need_barrier;
5101 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
5102 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
5104 static void
5105 update_set_flags (rtx x, struct reg_flags *pflags, int *ppred, rtx *pcond)
5107 rtx src = SET_SRC (x);
5109 *pcond = 0;
5111 switch (GET_CODE (src))
5113 case CALL:
5114 return;
5116 case IF_THEN_ELSE:
5117 if (SET_DEST (x) == pc_rtx)
5118 /* X is a conditional branch. */
5119 return;
5120 else
5122 int is_complemented = 0;
5124 /* X is a conditional move. */
5125 rtx cond = XEXP (src, 0);
5126 if (GET_CODE (cond) == EQ)
5127 is_complemented = 1;
5128 cond = XEXP (cond, 0);
5129 if (GET_CODE (cond) != REG
5130 && REGNO_REG_CLASS (REGNO (cond)) != PR_REGS)
5131 abort ();
5132 *pcond = cond;
5133 if (XEXP (src, 1) == SET_DEST (x)
5134 || XEXP (src, 2) == SET_DEST (x))
5136 /* X is a conditional move that conditionally writes the
5137 destination. */
5139 /* We need another complement in this case. */
5140 if (XEXP (src, 1) == SET_DEST (x))
5141 is_complemented = ! is_complemented;
5143 *ppred = REGNO (cond);
5144 if (is_complemented)
5145 ++*ppred;
5148 /* ??? If this is a conditional write to the dest, then this
5149 instruction does not actually read one source. This probably
5150 doesn't matter, because that source is also the dest. */
5151 /* ??? Multiple writes to predicate registers are allowed
5152 if they are all AND type compares, or if they are all OR
5153 type compares. We do not generate such instructions
5154 currently. */
5156 /* ... fall through ... */
5158 default:
5159 if (COMPARISON_P (src)
5160 && GET_MODE_CLASS (GET_MODE (XEXP (src, 0))) == MODE_FLOAT)
5161 /* Set pflags->is_fp to 1 so that we know we're dealing
5162 with a floating point comparison when processing the
5163 destination of the SET. */
5164 pflags->is_fp = 1;
5166 /* Discover if this is a parallel comparison. We only handle
5167 and.orcm and or.andcm at present, since we must retain a
5168 strict inverse on the predicate pair. */
5169 else if (GET_CODE (src) == AND)
5170 pflags->is_and = 1;
5171 else if (GET_CODE (src) == IOR)
5172 pflags->is_or = 1;
5174 break;
5178 /* Subroutine of rtx_needs_barrier; this function determines whether the
5179 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
5180 are as in rtx_needs_barrier. COND is an rtx that holds the condition
5181 for this insn. */
5183 static int
5184 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred, rtx cond)
5186 int need_barrier = 0;
5187 rtx dst;
5188 rtx src = SET_SRC (x);
5190 if (GET_CODE (src) == CALL)
5191 /* We don't need to worry about the result registers that
5192 get written by subroutine call. */
5193 return rtx_needs_barrier (src, flags, pred);
5194 else if (SET_DEST (x) == pc_rtx)
5196 /* X is a conditional branch. */
5197 /* ??? This seems redundant, as the caller sets this bit for
5198 all JUMP_INSNs. */
5199 flags.is_branch = 1;
5200 return rtx_needs_barrier (src, flags, pred);
5203 need_barrier = rtx_needs_barrier (src, flags, pred);
5205 /* This instruction unconditionally uses a predicate register. */
5206 if (cond)
5207 need_barrier |= rws_access_reg (cond, flags, 0);
5209 dst = SET_DEST (x);
5210 if (GET_CODE (dst) == ZERO_EXTRACT)
5212 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
5213 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
5214 dst = XEXP (dst, 0);
5216 return need_barrier;
5219 /* Handle an access to rtx X of type FLAGS using predicate register
5220 PRED. Return 1 if this access creates a dependency with an earlier
5221 instruction in the same group. */
5223 static int
5224 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
5226 int i, j;
5227 int is_complemented = 0;
5228 int need_barrier = 0;
5229 const char *format_ptr;
5230 struct reg_flags new_flags;
5231 rtx cond = 0;
5233 if (! x)
5234 return 0;
5236 new_flags = flags;
5238 switch (GET_CODE (x))
5240 case SET:
5241 update_set_flags (x, &new_flags, &pred, &cond);
5242 need_barrier = set_src_needs_barrier (x, new_flags, pred, cond);
5243 if (GET_CODE (SET_SRC (x)) != CALL)
5245 new_flags.is_write = 1;
5246 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
5248 break;
5250 case CALL:
5251 new_flags.is_write = 0;
5252 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5254 /* Avoid multiple register writes, in case this is a pattern with
5255 multiple CALL rtx. This avoids an abort in rws_access_reg. */
5256 if (! flags.is_sibcall && ! rws_insn[REG_AR_CFM].write_count)
5258 new_flags.is_write = 1;
5259 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
5260 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
5261 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5263 break;
5265 case COND_EXEC:
5266 /* X is a predicated instruction. */
5268 cond = COND_EXEC_TEST (x);
5269 if (pred)
5270 abort ();
5271 need_barrier = rtx_needs_barrier (cond, flags, 0);
5273 if (GET_CODE (cond) == EQ)
5274 is_complemented = 1;
5275 cond = XEXP (cond, 0);
5276 if (GET_CODE (cond) != REG
5277 && REGNO_REG_CLASS (REGNO (cond)) != PR_REGS)
5278 abort ();
5279 pred = REGNO (cond);
5280 if (is_complemented)
5281 ++pred;
5283 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
5284 return need_barrier;
5286 case CLOBBER:
5287 case USE:
5288 /* Clobber & use are for earlier compiler-phases only. */
5289 break;
5291 case ASM_OPERANDS:
5292 case ASM_INPUT:
5293 /* We always emit stop bits for traditional asms. We emit stop bits
5294 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
5295 if (GET_CODE (x) != ASM_OPERANDS
5296 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
5298 /* Avoid writing the register multiple times if we have multiple
5299 asm outputs. This avoids an abort in rws_access_reg. */
5300 if (! rws_insn[REG_VOLATILE].write_count)
5302 new_flags.is_write = 1;
5303 rws_access_regno (REG_VOLATILE, new_flags, pred);
5305 return 1;
5308 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
5309 We can not just fall through here since then we would be confused
5310 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
5311 traditional asms unlike their normal usage. */
5313 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
5314 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
5315 need_barrier = 1;
5316 break;
5318 case PARALLEL:
5319 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5321 rtx pat = XVECEXP (x, 0, i);
5322 if (GET_CODE (pat) == SET)
5324 update_set_flags (pat, &new_flags, &pred, &cond);
5325 need_barrier |= set_src_needs_barrier (pat, new_flags, pred, cond);
5327 else if (GET_CODE (pat) == USE
5328 || GET_CODE (pat) == CALL
5329 || GET_CODE (pat) == ASM_OPERANDS)
5330 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5331 else if (GET_CODE (pat) != CLOBBER && GET_CODE (pat) != RETURN)
5332 abort ();
5334 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5336 rtx pat = XVECEXP (x, 0, i);
5337 if (GET_CODE (pat) == SET)
5339 if (GET_CODE (SET_SRC (pat)) != CALL)
5341 new_flags.is_write = 1;
5342 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
5343 pred);
5346 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
5347 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5349 break;
5351 case SUBREG:
5352 x = SUBREG_REG (x);
5353 /* FALLTHRU */
5354 case REG:
5355 if (REGNO (x) == AR_UNAT_REGNUM)
5357 for (i = 0; i < 64; ++i)
5358 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
5360 else
5361 need_barrier = rws_access_reg (x, flags, pred);
5362 break;
5364 case MEM:
5365 /* Find the regs used in memory address computation. */
5366 new_flags.is_write = 0;
5367 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5368 break;
5370 case CONST_INT: case CONST_DOUBLE:
5371 case SYMBOL_REF: case LABEL_REF: case CONST:
5372 break;
5374 /* Operators with side-effects. */
5375 case POST_INC: case POST_DEC:
5376 if (GET_CODE (XEXP (x, 0)) != REG)
5377 abort ();
5379 new_flags.is_write = 0;
5380 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5381 new_flags.is_write = 1;
5382 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5383 break;
5385 case POST_MODIFY:
5386 if (GET_CODE (XEXP (x, 0)) != REG)
5387 abort ();
5389 new_flags.is_write = 0;
5390 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5391 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5392 new_flags.is_write = 1;
5393 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5394 break;
5396 /* Handle common unary and binary ops for efficiency. */
5397 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
5398 case MOD: case UDIV: case UMOD: case AND: case IOR:
5399 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
5400 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
5401 case NE: case EQ: case GE: case GT: case LE:
5402 case LT: case GEU: case GTU: case LEU: case LTU:
5403 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5404 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5405 break;
5407 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
5408 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
5409 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
5410 case SQRT: case FFS: case POPCOUNT:
5411 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5412 break;
5414 case UNSPEC:
5415 switch (XINT (x, 1))
5417 case UNSPEC_LTOFF_DTPMOD:
5418 case UNSPEC_LTOFF_DTPREL:
5419 case UNSPEC_DTPREL:
5420 case UNSPEC_LTOFF_TPREL:
5421 case UNSPEC_TPREL:
5422 case UNSPEC_PRED_REL_MUTEX:
5423 case UNSPEC_PIC_CALL:
5424 case UNSPEC_MF:
5425 case UNSPEC_FETCHADD_ACQ:
5426 case UNSPEC_BSP_VALUE:
5427 case UNSPEC_FLUSHRS:
5428 case UNSPEC_BUNDLE_SELECTOR:
5429 break;
5431 case UNSPEC_GR_SPILL:
5432 case UNSPEC_GR_RESTORE:
5434 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
5435 HOST_WIDE_INT bit = (offset >> 3) & 63;
5437 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5438 new_flags.is_write = (XINT (x, 1) == 1);
5439 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
5440 new_flags, pred);
5441 break;
5444 case UNSPEC_FR_SPILL:
5445 case UNSPEC_FR_RESTORE:
5446 case UNSPEC_GETF_EXP:
5447 case UNSPEC_SETF_EXP:
5448 case UNSPEC_ADDP4:
5449 case UNSPEC_FR_SQRT_RECIP_APPROX:
5450 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5451 break;
5453 case UNSPEC_FR_RECIP_APPROX:
5454 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5455 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5456 break;
5458 case UNSPEC_CMPXCHG_ACQ:
5459 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5460 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
5461 break;
5463 default:
5464 abort ();
5466 break;
5468 case UNSPEC_VOLATILE:
5469 switch (XINT (x, 1))
5471 case UNSPECV_ALLOC:
5472 /* Alloc must always be the first instruction of a group.
5473 We force this by always returning true. */
5474 /* ??? We might get better scheduling if we explicitly check for
5475 input/local/output register dependencies, and modify the
5476 scheduler so that alloc is always reordered to the start of
5477 the current group. We could then eliminate all of the
5478 first_instruction code. */
5479 rws_access_regno (AR_PFS_REGNUM, flags, pred);
5481 new_flags.is_write = 1;
5482 rws_access_regno (REG_AR_CFM, new_flags, pred);
5483 return 1;
5485 case UNSPECV_SET_BSP:
5486 need_barrier = 1;
5487 break;
5489 case UNSPECV_BLOCKAGE:
5490 case UNSPECV_INSN_GROUP_BARRIER:
5491 case UNSPECV_BREAK:
5492 case UNSPECV_PSAC_ALL:
5493 case UNSPECV_PSAC_NORMAL:
5494 return 0;
5496 default:
5497 abort ();
5499 break;
5501 case RETURN:
5502 new_flags.is_write = 0;
5503 need_barrier = rws_access_regno (REG_RP, flags, pred);
5504 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
5506 new_flags.is_write = 1;
5507 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5508 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5509 break;
5511 default:
5512 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
5513 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5514 switch (format_ptr[i])
5516 case '0': /* unused field */
5517 case 'i': /* integer */
5518 case 'n': /* note */
5519 case 'w': /* wide integer */
5520 case 's': /* pointer to string */
5521 case 'S': /* optional pointer to string */
5522 break;
5524 case 'e':
5525 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
5526 need_barrier = 1;
5527 break;
5529 case 'E':
5530 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
5531 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
5532 need_barrier = 1;
5533 break;
5535 default:
5536 abort ();
5538 break;
5540 return need_barrier;
5543 /* Clear out the state for group_barrier_needed_p at the start of a
5544 sequence of insns. */
5546 static void
5547 init_insn_group_barriers (void)
5549 memset (rws_sum, 0, sizeof (rws_sum));
5550 first_instruction = 1;
5553 /* Given the current state, recorded by previous calls to this function,
5554 determine whether a group barrier (a stop bit) is necessary before INSN.
5555 Return nonzero if so. */
5557 static int
5558 group_barrier_needed_p (rtx insn)
5560 rtx pat;
5561 int need_barrier = 0;
5562 struct reg_flags flags;
5564 memset (&flags, 0, sizeof (flags));
5565 switch (GET_CODE (insn))
5567 case NOTE:
5568 break;
5570 case BARRIER:
5571 /* A barrier doesn't imply an instruction group boundary. */
5572 break;
5574 case CODE_LABEL:
5575 memset (rws_insn, 0, sizeof (rws_insn));
5576 return 1;
5578 case CALL_INSN:
5579 flags.is_branch = 1;
5580 flags.is_sibcall = SIBLING_CALL_P (insn);
5581 memset (rws_insn, 0, sizeof (rws_insn));
5583 /* Don't bundle a call following another call. */
5584 if ((pat = prev_active_insn (insn))
5585 && GET_CODE (pat) == CALL_INSN)
5587 need_barrier = 1;
5588 break;
5591 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
5592 break;
5594 case JUMP_INSN:
5595 flags.is_branch = 1;
5597 /* Don't bundle a jump following a call. */
5598 if ((pat = prev_active_insn (insn))
5599 && GET_CODE (pat) == CALL_INSN)
5601 need_barrier = 1;
5602 break;
5604 /* FALLTHRU */
5606 case INSN:
5607 if (GET_CODE (PATTERN (insn)) == USE
5608 || GET_CODE (PATTERN (insn)) == CLOBBER)
5609 /* Don't care about USE and CLOBBER "insns"---those are used to
5610 indicate to the optimizer that it shouldn't get rid of
5611 certain operations. */
5612 break;
5614 pat = PATTERN (insn);
5616 /* Ug. Hack hacks hacked elsewhere. */
5617 switch (recog_memoized (insn))
5619 /* We play dependency tricks with the epilogue in order
5620 to get proper schedules. Undo this for dv analysis. */
5621 case CODE_FOR_epilogue_deallocate_stack:
5622 case CODE_FOR_prologue_allocate_stack:
5623 pat = XVECEXP (pat, 0, 0);
5624 break;
5626 /* The pattern we use for br.cloop confuses the code above.
5627 The second element of the vector is representative. */
5628 case CODE_FOR_doloop_end_internal:
5629 pat = XVECEXP (pat, 0, 1);
5630 break;
5632 /* Doesn't generate code. */
5633 case CODE_FOR_pred_rel_mutex:
5634 case CODE_FOR_prologue_use:
5635 return 0;
5637 default:
5638 break;
5641 memset (rws_insn, 0, sizeof (rws_insn));
5642 need_barrier = rtx_needs_barrier (pat, flags, 0);
5644 /* Check to see if the previous instruction was a volatile
5645 asm. */
5646 if (! need_barrier)
5647 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
5648 break;
5650 default:
5651 abort ();
5654 if (first_instruction && INSN_P (insn)
5655 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
5656 && GET_CODE (PATTERN (insn)) != USE
5657 && GET_CODE (PATTERN (insn)) != CLOBBER)
5659 need_barrier = 0;
5660 first_instruction = 0;
5663 return need_barrier;
5666 /* Like group_barrier_needed_p, but do not clobber the current state. */
5668 static int
5669 safe_group_barrier_needed_p (rtx insn)
5671 struct reg_write_state rws_saved[NUM_REGS];
5672 int saved_first_instruction;
5673 int t;
5675 memcpy (rws_saved, rws_sum, NUM_REGS * sizeof *rws_saved);
5676 saved_first_instruction = first_instruction;
5678 t = group_barrier_needed_p (insn);
5680 memcpy (rws_sum, rws_saved, NUM_REGS * sizeof *rws_saved);
5681 first_instruction = saved_first_instruction;
5683 return t;
5686 /* Scan the current function and insert stop bits as necessary to
5687 eliminate dependencies. This function assumes that a final
5688 instruction scheduling pass has been run which has already
5689 inserted most of the necessary stop bits. This function only
5690 inserts new ones at basic block boundaries, since these are
5691 invisible to the scheduler. */
5693 static void
5694 emit_insn_group_barriers (FILE *dump)
5696 rtx insn;
5697 rtx last_label = 0;
5698 int insns_since_last_label = 0;
5700 init_insn_group_barriers ();
5702 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5704 if (GET_CODE (insn) == CODE_LABEL)
5706 if (insns_since_last_label)
5707 last_label = insn;
5708 insns_since_last_label = 0;
5710 else if (GET_CODE (insn) == NOTE
5711 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
5713 if (insns_since_last_label)
5714 last_label = insn;
5715 insns_since_last_label = 0;
5717 else if (GET_CODE (insn) == INSN
5718 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
5719 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
5721 init_insn_group_barriers ();
5722 last_label = 0;
5724 else if (INSN_P (insn))
5726 insns_since_last_label = 1;
5728 if (group_barrier_needed_p (insn))
5730 if (last_label)
5732 if (dump)
5733 fprintf (dump, "Emitting stop before label %d\n",
5734 INSN_UID (last_label));
5735 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
5736 insn = last_label;
5738 init_insn_group_barriers ();
5739 last_label = 0;
5746 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
5747 This function has to emit all necessary group barriers. */
5749 static void
5750 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
5752 rtx insn;
5754 init_insn_group_barriers ();
5756 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5758 if (GET_CODE (insn) == BARRIER)
5760 rtx last = prev_active_insn (insn);
5762 if (! last)
5763 continue;
5764 if (GET_CODE (last) == JUMP_INSN
5765 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
5766 last = prev_active_insn (last);
5767 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
5768 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
5770 init_insn_group_barriers ();
5772 else if (INSN_P (insn))
5774 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
5775 init_insn_group_barriers ();
5776 else if (group_barrier_needed_p (insn))
5778 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5779 init_insn_group_barriers ();
5780 group_barrier_needed_p (insn);
5787 static int errata_find_address_regs (rtx *, void *);
5788 static void errata_emit_nops (rtx);
5789 static void fixup_errata (void);
5791 /* This structure is used to track some details about the previous insns
5792 groups so we can determine if it may be necessary to insert NOPs to
5793 workaround hardware errata. */
5794 static struct group
5796 HARD_REG_SET p_reg_set;
5797 HARD_REG_SET gr_reg_conditionally_set;
5798 } last_group[2];
5800 /* Index into the last_group array. */
5801 static int group_idx;
5803 /* Called through for_each_rtx; determines if a hard register that was
5804 conditionally set in the previous group is used as an address register.
5805 It ensures that for_each_rtx returns 1 in that case. */
5806 static int
5807 errata_find_address_regs (rtx *xp, void *data ATTRIBUTE_UNUSED)
5809 rtx x = *xp;
5810 if (GET_CODE (x) != MEM)
5811 return 0;
5812 x = XEXP (x, 0);
5813 if (GET_CODE (x) == POST_MODIFY)
5814 x = XEXP (x, 0);
5815 if (GET_CODE (x) == REG)
5817 struct group *prev_group = last_group + (group_idx ^ 1);
5818 if (TEST_HARD_REG_BIT (prev_group->gr_reg_conditionally_set,
5819 REGNO (x)))
5820 return 1;
5821 return -1;
5823 return 0;
5826 /* Called for each insn; this function keeps track of the state in
5827 last_group and emits additional NOPs if necessary to work around
5828 an Itanium A/B step erratum. */
5829 static void
5830 errata_emit_nops (rtx insn)
5832 struct group *this_group = last_group + group_idx;
5833 struct group *prev_group = last_group + (group_idx ^ 1);
5834 rtx pat = PATTERN (insn);
5835 rtx cond = GET_CODE (pat) == COND_EXEC ? COND_EXEC_TEST (pat) : 0;
5836 rtx real_pat = cond ? COND_EXEC_CODE (pat) : pat;
5837 enum attr_type type;
5838 rtx set = real_pat;
5840 if (GET_CODE (real_pat) == USE
5841 || GET_CODE (real_pat) == CLOBBER
5842 || GET_CODE (real_pat) == ASM_INPUT
5843 || GET_CODE (real_pat) == ADDR_VEC
5844 || GET_CODE (real_pat) == ADDR_DIFF_VEC
5845 || asm_noperands (PATTERN (insn)) >= 0)
5846 return;
5848 /* single_set doesn't work for COND_EXEC insns, so we have to duplicate
5849 parts of it. */
5851 if (GET_CODE (set) == PARALLEL)
5853 int i;
5854 set = XVECEXP (real_pat, 0, 0);
5855 for (i = 1; i < XVECLEN (real_pat, 0); i++)
5856 if (GET_CODE (XVECEXP (real_pat, 0, i)) != USE
5857 && GET_CODE (XVECEXP (real_pat, 0, i)) != CLOBBER)
5859 set = 0;
5860 break;
5864 if (set && GET_CODE (set) != SET)
5865 set = 0;
5867 type = get_attr_type (insn);
5869 if (type == TYPE_F
5870 && set && REG_P (SET_DEST (set)) && PR_REGNO_P (REGNO (SET_DEST (set))))
5871 SET_HARD_REG_BIT (this_group->p_reg_set, REGNO (SET_DEST (set)));
5873 if ((type == TYPE_M || type == TYPE_A) && cond && set
5874 && REG_P (SET_DEST (set))
5875 && GET_CODE (SET_SRC (set)) != PLUS
5876 && GET_CODE (SET_SRC (set)) != MINUS
5877 && (GET_CODE (SET_SRC (set)) != ASHIFT
5878 || !shladd_operand (XEXP (SET_SRC (set), 1), VOIDmode))
5879 && (GET_CODE (SET_SRC (set)) != MEM
5880 || GET_CODE (XEXP (SET_SRC (set), 0)) != POST_MODIFY)
5881 && GENERAL_REGNO_P (REGNO (SET_DEST (set))))
5883 if (!COMPARISON_P (cond)
5884 || !REG_P (XEXP (cond, 0)))
5885 abort ();
5887 if (TEST_HARD_REG_BIT (prev_group->p_reg_set, REGNO (XEXP (cond, 0))))
5888 SET_HARD_REG_BIT (this_group->gr_reg_conditionally_set, REGNO (SET_DEST (set)));
5890 if (for_each_rtx (&real_pat, errata_find_address_regs, NULL))
5892 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5893 emit_insn_before (gen_nop (), insn);
5894 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5895 group_idx = 0;
5896 memset (last_group, 0, sizeof last_group);
5900 /* Emit extra nops if they are required to work around hardware errata. */
5902 static void
5903 fixup_errata (void)
5905 rtx insn;
5907 if (! TARGET_B_STEP)
5908 return;
5910 group_idx = 0;
5911 memset (last_group, 0, sizeof last_group);
5913 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5915 if (!INSN_P (insn))
5916 continue;
5918 if (ia64_safe_type (insn) == TYPE_S)
5920 group_idx ^= 1;
5921 memset (last_group + group_idx, 0, sizeof last_group[group_idx]);
5923 else
5924 errata_emit_nops (insn);
5929 /* Instruction scheduling support. */
5931 #define NR_BUNDLES 10
5933 /* A list of names of all available bundles. */
5935 static const char *bundle_name [NR_BUNDLES] =
5937 ".mii",
5938 ".mmi",
5939 ".mfi",
5940 ".mmf",
5941 #if NR_BUNDLES == 10
5942 ".bbb",
5943 ".mbb",
5944 #endif
5945 ".mib",
5946 ".mmb",
5947 ".mfb",
5948 ".mlx"
5951 /* Nonzero if we should insert stop bits into the schedule. */
5953 int ia64_final_schedule = 0;
5955 /* Codes of the corresponding quieryied units: */
5957 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
5958 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
5960 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
5961 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
5963 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
5965 /* The following variable value is an insn group barrier. */
5967 static rtx dfa_stop_insn;
5969 /* The following variable value is the last issued insn. */
5971 static rtx last_scheduled_insn;
5973 /* The following variable value is size of the DFA state. */
5975 static size_t dfa_state_size;
5977 /* The following variable value is pointer to a DFA state used as
5978 temporary variable. */
5980 static state_t temp_dfa_state = NULL;
5982 /* The following variable value is DFA state after issuing the last
5983 insn. */
5985 static state_t prev_cycle_state = NULL;
5987 /* The following array element values are TRUE if the corresponding
5988 insn requires to add stop bits before it. */
5990 static char *stops_p;
5992 /* The following variable is used to set up the mentioned above array. */
5994 static int stop_before_p = 0;
5996 /* The following variable value is length of the arrays `clocks' and
5997 `add_cycles'. */
5999 static int clocks_length;
6001 /* The following array element values are cycles on which the
6002 corresponding insn will be issued. The array is used only for
6003 Itanium1. */
6005 static int *clocks;
6007 /* The following array element values are numbers of cycles should be
6008 added to improve insn scheduling for MM_insns for Itanium1. */
6010 static int *add_cycles;
6012 static rtx ia64_single_set (rtx);
6013 static void ia64_emit_insn_before (rtx, rtx);
6015 /* Map a bundle number to its pseudo-op. */
6017 const char *
6018 get_bundle_name (int b)
6020 return bundle_name[b];
6024 /* Return the maximum number of instructions a cpu can issue. */
6026 static int
6027 ia64_issue_rate (void)
6029 return 6;
6032 /* Helper function - like single_set, but look inside COND_EXEC. */
6034 static rtx
6035 ia64_single_set (rtx insn)
6037 rtx x = PATTERN (insn), ret;
6038 if (GET_CODE (x) == COND_EXEC)
6039 x = COND_EXEC_CODE (x);
6040 if (GET_CODE (x) == SET)
6041 return x;
6043 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
6044 Although they are not classical single set, the second set is there just
6045 to protect it from moving past FP-relative stack accesses. */
6046 switch (recog_memoized (insn))
6048 case CODE_FOR_prologue_allocate_stack:
6049 case CODE_FOR_epilogue_deallocate_stack:
6050 ret = XVECEXP (x, 0, 0);
6051 break;
6053 default:
6054 ret = single_set_2 (insn, x);
6055 break;
6058 return ret;
6061 /* Adjust the cost of a scheduling dependency. Return the new cost of
6062 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
6064 static int
6065 ia64_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
6067 enum attr_itanium_class dep_class;
6068 enum attr_itanium_class insn_class;
6070 if (REG_NOTE_KIND (link) != REG_DEP_OUTPUT)
6071 return cost;
6073 insn_class = ia64_safe_itanium_class (insn);
6074 dep_class = ia64_safe_itanium_class (dep_insn);
6075 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
6076 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
6077 return 0;
6079 return cost;
6082 /* Like emit_insn_before, but skip cycle_display notes.
6083 ??? When cycle display notes are implemented, update this. */
6085 static void
6086 ia64_emit_insn_before (rtx insn, rtx before)
6088 emit_insn_before (insn, before);
6091 /* The following function marks insns who produce addresses for load
6092 and store insns. Such insns will be placed into M slots because it
6093 decrease latency time for Itanium1 (see function
6094 `ia64_produce_address_p' and the DFA descriptions). */
6096 static void
6097 ia64_dependencies_evaluation_hook (rtx head, rtx tail)
6099 rtx insn, link, next, next_tail;
6101 next_tail = NEXT_INSN (tail);
6102 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6103 if (INSN_P (insn))
6104 insn->call = 0;
6105 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6106 if (INSN_P (insn)
6107 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
6109 for (link = INSN_DEPEND (insn); link != 0; link = XEXP (link, 1))
6111 next = XEXP (link, 0);
6112 if ((ia64_safe_itanium_class (next) == ITANIUM_CLASS_ST
6113 || ia64_safe_itanium_class (next) == ITANIUM_CLASS_STF)
6114 && ia64_st_address_bypass_p (insn, next))
6115 break;
6116 else if ((ia64_safe_itanium_class (next) == ITANIUM_CLASS_LD
6117 || ia64_safe_itanium_class (next)
6118 == ITANIUM_CLASS_FLD)
6119 && ia64_ld_address_bypass_p (insn, next))
6120 break;
6122 insn->call = link != 0;
6126 /* We're beginning a new block. Initialize data structures as necessary. */
6128 static void
6129 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
6130 int sched_verbose ATTRIBUTE_UNUSED,
6131 int max_ready ATTRIBUTE_UNUSED)
6133 #ifdef ENABLE_CHECKING
6134 rtx insn;
6136 if (reload_completed)
6137 for (insn = NEXT_INSN (current_sched_info->prev_head);
6138 insn != current_sched_info->next_tail;
6139 insn = NEXT_INSN (insn))
6140 if (SCHED_GROUP_P (insn))
6141 abort ();
6142 #endif
6143 last_scheduled_insn = NULL_RTX;
6144 init_insn_group_barriers ();
6147 /* We are about to being issuing insns for this clock cycle.
6148 Override the default sort algorithm to better slot instructions. */
6150 static int
6151 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
6152 int *pn_ready, int clock_var ATTRIBUTE_UNUSED,
6153 int reorder_type)
6155 int n_asms;
6156 int n_ready = *pn_ready;
6157 rtx *e_ready = ready + n_ready;
6158 rtx *insnp;
6160 if (sched_verbose)
6161 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
6163 if (reorder_type == 0)
6165 /* First, move all USEs, CLOBBERs and other crud out of the way. */
6166 n_asms = 0;
6167 for (insnp = ready; insnp < e_ready; insnp++)
6168 if (insnp < e_ready)
6170 rtx insn = *insnp;
6171 enum attr_type t = ia64_safe_type (insn);
6172 if (t == TYPE_UNKNOWN)
6174 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6175 || asm_noperands (PATTERN (insn)) >= 0)
6177 rtx lowest = ready[n_asms];
6178 ready[n_asms] = insn;
6179 *insnp = lowest;
6180 n_asms++;
6182 else
6184 rtx highest = ready[n_ready - 1];
6185 ready[n_ready - 1] = insn;
6186 *insnp = highest;
6187 return 1;
6192 if (n_asms < n_ready)
6194 /* Some normal insns to process. Skip the asms. */
6195 ready += n_asms;
6196 n_ready -= n_asms;
6198 else if (n_ready > 0)
6199 return 1;
6202 if (ia64_final_schedule)
6204 int deleted = 0;
6205 int nr_need_stop = 0;
6207 for (insnp = ready; insnp < e_ready; insnp++)
6208 if (safe_group_barrier_needed_p (*insnp))
6209 nr_need_stop++;
6211 if (reorder_type == 1 && n_ready == nr_need_stop)
6212 return 0;
6213 if (reorder_type == 0)
6214 return 1;
6215 insnp = e_ready;
6216 /* Move down everything that needs a stop bit, preserving
6217 relative order. */
6218 while (insnp-- > ready + deleted)
6219 while (insnp >= ready + deleted)
6221 rtx insn = *insnp;
6222 if (! safe_group_barrier_needed_p (insn))
6223 break;
6224 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
6225 *ready = insn;
6226 deleted++;
6228 n_ready -= deleted;
6229 ready += deleted;
6232 return 1;
6235 /* We are about to being issuing insns for this clock cycle. Override
6236 the default sort algorithm to better slot instructions. */
6238 static int
6239 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
6240 int clock_var)
6242 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
6243 pn_ready, clock_var, 0);
6246 /* Like ia64_sched_reorder, but called after issuing each insn.
6247 Override the default sort algorithm to better slot instructions. */
6249 static int
6250 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
6251 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
6252 int *pn_ready, int clock_var)
6254 if (ia64_tune == PROCESSOR_ITANIUM && reload_completed && last_scheduled_insn)
6255 clocks [INSN_UID (last_scheduled_insn)] = clock_var;
6256 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
6257 clock_var, 1);
6260 /* We are about to issue INSN. Return the number of insns left on the
6261 ready queue that can be issued this cycle. */
6263 static int
6264 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
6265 int sched_verbose ATTRIBUTE_UNUSED,
6266 rtx insn ATTRIBUTE_UNUSED,
6267 int can_issue_more ATTRIBUTE_UNUSED)
6269 last_scheduled_insn = insn;
6270 memcpy (prev_cycle_state, curr_state, dfa_state_size);
6271 if (reload_completed)
6273 if (group_barrier_needed_p (insn))
6274 abort ();
6275 if (GET_CODE (insn) == CALL_INSN)
6276 init_insn_group_barriers ();
6277 stops_p [INSN_UID (insn)] = stop_before_p;
6278 stop_before_p = 0;
6280 return 1;
6283 /* We are choosing insn from the ready queue. Return nonzero if INSN
6284 can be chosen. */
6286 static int
6287 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn)
6289 if (insn == NULL_RTX || !INSN_P (insn))
6290 abort ();
6291 return (!reload_completed
6292 || !safe_group_barrier_needed_p (insn));
6295 /* The following variable value is pseudo-insn used by the DFA insn
6296 scheduler to change the DFA state when the simulated clock is
6297 increased. */
6299 static rtx dfa_pre_cycle_insn;
6301 /* We are about to being issuing INSN. Return nonzero if we can not
6302 issue it on given cycle CLOCK and return zero if we should not sort
6303 the ready queue on the next clock start. */
6305 static int
6306 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
6307 int clock, int *sort_p)
6309 int setup_clocks_p = FALSE;
6311 if (insn == NULL_RTX || !INSN_P (insn))
6312 abort ();
6313 if ((reload_completed && safe_group_barrier_needed_p (insn))
6314 || (last_scheduled_insn
6315 && (GET_CODE (last_scheduled_insn) == CALL_INSN
6316 || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6317 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
6319 init_insn_group_barriers ();
6320 if (verbose && dump)
6321 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
6322 last_clock == clock ? " + cycle advance" : "");
6323 stop_before_p = 1;
6324 if (last_clock == clock)
6326 state_transition (curr_state, dfa_stop_insn);
6327 if (TARGET_EARLY_STOP_BITS)
6328 *sort_p = (last_scheduled_insn == NULL_RTX
6329 || GET_CODE (last_scheduled_insn) != CALL_INSN);
6330 else
6331 *sort_p = 0;
6332 return 1;
6334 else if (reload_completed)
6335 setup_clocks_p = TRUE;
6336 memcpy (curr_state, prev_cycle_state, dfa_state_size);
6337 state_transition (curr_state, dfa_stop_insn);
6338 state_transition (curr_state, dfa_pre_cycle_insn);
6339 state_transition (curr_state, NULL);
6341 else if (reload_completed)
6342 setup_clocks_p = TRUE;
6343 if (setup_clocks_p && ia64_tune == PROCESSOR_ITANIUM)
6345 enum attr_itanium_class c = ia64_safe_itanium_class (insn);
6347 if (c != ITANIUM_CLASS_MMMUL && c != ITANIUM_CLASS_MMSHF)
6349 rtx link;
6350 int d = -1;
6352 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
6353 if (REG_NOTE_KIND (link) == 0)
6355 enum attr_itanium_class dep_class;
6356 rtx dep_insn = XEXP (link, 0);
6358 dep_class = ia64_safe_itanium_class (dep_insn);
6359 if ((dep_class == ITANIUM_CLASS_MMMUL
6360 || dep_class == ITANIUM_CLASS_MMSHF)
6361 && last_clock - clocks [INSN_UID (dep_insn)] < 4
6362 && (d < 0
6363 || last_clock - clocks [INSN_UID (dep_insn)] < d))
6364 d = last_clock - clocks [INSN_UID (dep_insn)];
6366 if (d >= 0)
6367 add_cycles [INSN_UID (insn)] = 3 - d;
6370 return 0;
6375 /* The following page contains abstract data `bundle states' which are
6376 used for bundling insns (inserting nops and template generation). */
6378 /* The following describes state of insn bundling. */
6380 struct bundle_state
6382 /* Unique bundle state number to identify them in the debugging
6383 output */
6384 int unique_num;
6385 rtx insn; /* corresponding insn, NULL for the 1st and the last state */
6386 /* number nops before and after the insn */
6387 short before_nops_num, after_nops_num;
6388 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
6389 insn */
6390 int cost; /* cost of the state in cycles */
6391 int accumulated_insns_num; /* number of all previous insns including
6392 nops. L is considered as 2 insns */
6393 int branch_deviation; /* deviation of previous branches from 3rd slots */
6394 struct bundle_state *next; /* next state with the same insn_num */
6395 struct bundle_state *originator; /* originator (previous insn state) */
6396 /* All bundle states are in the following chain. */
6397 struct bundle_state *allocated_states_chain;
6398 /* The DFA State after issuing the insn and the nops. */
6399 state_t dfa_state;
6402 /* The following is map insn number to the corresponding bundle state. */
6404 static struct bundle_state **index_to_bundle_states;
6406 /* The unique number of next bundle state. */
6408 static int bundle_states_num;
6410 /* All allocated bundle states are in the following chain. */
6412 static struct bundle_state *allocated_bundle_states_chain;
6414 /* All allocated but not used bundle states are in the following
6415 chain. */
6417 static struct bundle_state *free_bundle_state_chain;
6420 /* The following function returns a free bundle state. */
6422 static struct bundle_state *
6423 get_free_bundle_state (void)
6425 struct bundle_state *result;
6427 if (free_bundle_state_chain != NULL)
6429 result = free_bundle_state_chain;
6430 free_bundle_state_chain = result->next;
6432 else
6434 result = xmalloc (sizeof (struct bundle_state));
6435 result->dfa_state = xmalloc (dfa_state_size);
6436 result->allocated_states_chain = allocated_bundle_states_chain;
6437 allocated_bundle_states_chain = result;
6439 result->unique_num = bundle_states_num++;
6440 return result;
6444 /* The following function frees given bundle state. */
6446 static void
6447 free_bundle_state (struct bundle_state *state)
6449 state->next = free_bundle_state_chain;
6450 free_bundle_state_chain = state;
6453 /* Start work with abstract data `bundle states'. */
6455 static void
6456 initiate_bundle_states (void)
6458 bundle_states_num = 0;
6459 free_bundle_state_chain = NULL;
6460 allocated_bundle_states_chain = NULL;
6463 /* Finish work with abstract data `bundle states'. */
6465 static void
6466 finish_bundle_states (void)
6468 struct bundle_state *curr_state, *next_state;
6470 for (curr_state = allocated_bundle_states_chain;
6471 curr_state != NULL;
6472 curr_state = next_state)
6474 next_state = curr_state->allocated_states_chain;
6475 free (curr_state->dfa_state);
6476 free (curr_state);
6480 /* Hash table of the bundle states. The key is dfa_state and insn_num
6481 of the bundle states. */
6483 static htab_t bundle_state_table;
6485 /* The function returns hash of BUNDLE_STATE. */
6487 static unsigned
6488 bundle_state_hash (const void *bundle_state)
6490 const struct bundle_state *state = (struct bundle_state *) bundle_state;
6491 unsigned result, i;
6493 for (result = i = 0; i < dfa_state_size; i++)
6494 result += (((unsigned char *) state->dfa_state) [i]
6495 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
6496 return result + state->insn_num;
6499 /* The function returns nonzero if the bundle state keys are equal. */
6501 static int
6502 bundle_state_eq_p (const void *bundle_state_1, const void *bundle_state_2)
6504 const struct bundle_state * state1 = (struct bundle_state *) bundle_state_1;
6505 const struct bundle_state * state2 = (struct bundle_state *) bundle_state_2;
6507 return (state1->insn_num == state2->insn_num
6508 && memcmp (state1->dfa_state, state2->dfa_state,
6509 dfa_state_size) == 0);
6512 /* The function inserts the BUNDLE_STATE into the hash table. The
6513 function returns nonzero if the bundle has been inserted into the
6514 table. The table contains the best bundle state with given key. */
6516 static int
6517 insert_bundle_state (struct bundle_state *bundle_state)
6519 void **entry_ptr;
6521 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, 1);
6522 if (*entry_ptr == NULL)
6524 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
6525 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
6526 *entry_ptr = (void *) bundle_state;
6527 return TRUE;
6529 else if (bundle_state->cost < ((struct bundle_state *) *entry_ptr)->cost
6530 || (bundle_state->cost == ((struct bundle_state *) *entry_ptr)->cost
6531 && (((struct bundle_state *)*entry_ptr)->accumulated_insns_num
6532 > bundle_state->accumulated_insns_num
6533 || (((struct bundle_state *)
6534 *entry_ptr)->accumulated_insns_num
6535 == bundle_state->accumulated_insns_num
6536 && ((struct bundle_state *)
6537 *entry_ptr)->branch_deviation
6538 > bundle_state->branch_deviation))))
6541 struct bundle_state temp;
6543 temp = *(struct bundle_state *) *entry_ptr;
6544 *(struct bundle_state *) *entry_ptr = *bundle_state;
6545 ((struct bundle_state *) *entry_ptr)->next = temp.next;
6546 *bundle_state = temp;
6548 return FALSE;
6551 /* Start work with the hash table. */
6553 static void
6554 initiate_bundle_state_table (void)
6556 bundle_state_table = htab_create (50, bundle_state_hash, bundle_state_eq_p,
6557 (htab_del) 0);
6560 /* Finish work with the hash table. */
6562 static void
6563 finish_bundle_state_table (void)
6565 htab_delete (bundle_state_table);
6570 /* The following variable is a insn `nop' used to check bundle states
6571 with different number of inserted nops. */
6573 static rtx ia64_nop;
6575 /* The following function tries to issue NOPS_NUM nops for the current
6576 state without advancing processor cycle. If it failed, the
6577 function returns FALSE and frees the current state. */
6579 static int
6580 try_issue_nops (struct bundle_state *curr_state, int nops_num)
6582 int i;
6584 for (i = 0; i < nops_num; i++)
6585 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
6587 free_bundle_state (curr_state);
6588 return FALSE;
6590 return TRUE;
6593 /* The following function tries to issue INSN for the current
6594 state without advancing processor cycle. If it failed, the
6595 function returns FALSE and frees the current state. */
6597 static int
6598 try_issue_insn (struct bundle_state *curr_state, rtx insn)
6600 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
6602 free_bundle_state (curr_state);
6603 return FALSE;
6605 return TRUE;
6608 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
6609 starting with ORIGINATOR without advancing processor cycle. If
6610 TRY_BUNDLE_END_P is TRUE, the function also/only (if
6611 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
6612 If it was successful, the function creates new bundle state and
6613 insert into the hash table and into `index_to_bundle_states'. */
6615 static void
6616 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
6617 rtx insn, int try_bundle_end_p, int only_bundle_end_p)
6619 struct bundle_state *curr_state;
6621 curr_state = get_free_bundle_state ();
6622 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
6623 curr_state->insn = insn;
6624 curr_state->insn_num = originator->insn_num + 1;
6625 curr_state->cost = originator->cost;
6626 curr_state->originator = originator;
6627 curr_state->before_nops_num = before_nops_num;
6628 curr_state->after_nops_num = 0;
6629 curr_state->accumulated_insns_num
6630 = originator->accumulated_insns_num + before_nops_num;
6631 curr_state->branch_deviation = originator->branch_deviation;
6632 if (insn == NULL_RTX)
6633 abort ();
6634 else if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
6636 if (GET_MODE (insn) == TImode)
6637 abort ();
6638 if (!try_issue_nops (curr_state, before_nops_num))
6639 return;
6640 if (!try_issue_insn (curr_state, insn))
6641 return;
6642 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
6643 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
6644 && curr_state->accumulated_insns_num % 3 != 0)
6646 free_bundle_state (curr_state);
6647 return;
6650 else if (GET_MODE (insn) != TImode)
6652 if (!try_issue_nops (curr_state, before_nops_num))
6653 return;
6654 if (!try_issue_insn (curr_state, insn))
6655 return;
6656 curr_state->accumulated_insns_num++;
6657 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6658 || asm_noperands (PATTERN (insn)) >= 0)
6659 abort ();
6660 if (ia64_safe_type (insn) == TYPE_L)
6661 curr_state->accumulated_insns_num++;
6663 else
6665 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
6666 state_transition (curr_state->dfa_state, NULL);
6667 curr_state->cost++;
6668 if (!try_issue_nops (curr_state, before_nops_num))
6669 return;
6670 if (!try_issue_insn (curr_state, insn))
6671 return;
6672 curr_state->accumulated_insns_num++;
6673 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6674 || asm_noperands (PATTERN (insn)) >= 0)
6676 /* Finish bundle containing asm insn. */
6677 curr_state->after_nops_num
6678 = 3 - curr_state->accumulated_insns_num % 3;
6679 curr_state->accumulated_insns_num
6680 += 3 - curr_state->accumulated_insns_num % 3;
6682 else if (ia64_safe_type (insn) == TYPE_L)
6683 curr_state->accumulated_insns_num++;
6685 if (ia64_safe_type (insn) == TYPE_B)
6686 curr_state->branch_deviation
6687 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
6688 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
6690 if (!only_bundle_end_p && insert_bundle_state (curr_state))
6692 state_t dfa_state;
6693 struct bundle_state *curr_state1;
6694 struct bundle_state *allocated_states_chain;
6696 curr_state1 = get_free_bundle_state ();
6697 dfa_state = curr_state1->dfa_state;
6698 allocated_states_chain = curr_state1->allocated_states_chain;
6699 *curr_state1 = *curr_state;
6700 curr_state1->dfa_state = dfa_state;
6701 curr_state1->allocated_states_chain = allocated_states_chain;
6702 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
6703 dfa_state_size);
6704 curr_state = curr_state1;
6706 if (!try_issue_nops (curr_state,
6707 3 - curr_state->accumulated_insns_num % 3))
6708 return;
6709 curr_state->after_nops_num
6710 = 3 - curr_state->accumulated_insns_num % 3;
6711 curr_state->accumulated_insns_num
6712 += 3 - curr_state->accumulated_insns_num % 3;
6714 if (!insert_bundle_state (curr_state))
6715 free_bundle_state (curr_state);
6716 return;
6719 /* The following function returns position in the two window bundle
6720 for given STATE. */
6722 static int
6723 get_max_pos (state_t state)
6725 if (cpu_unit_reservation_p (state, pos_6))
6726 return 6;
6727 else if (cpu_unit_reservation_p (state, pos_5))
6728 return 5;
6729 else if (cpu_unit_reservation_p (state, pos_4))
6730 return 4;
6731 else if (cpu_unit_reservation_p (state, pos_3))
6732 return 3;
6733 else if (cpu_unit_reservation_p (state, pos_2))
6734 return 2;
6735 else if (cpu_unit_reservation_p (state, pos_1))
6736 return 1;
6737 else
6738 return 0;
6741 /* The function returns code of a possible template for given position
6742 and state. The function should be called only with 2 values of
6743 position equal to 3 or 6. */
6745 static int
6746 get_template (state_t state, int pos)
6748 switch (pos)
6750 case 3:
6751 if (cpu_unit_reservation_p (state, _0mii_))
6752 return 0;
6753 else if (cpu_unit_reservation_p (state, _0mmi_))
6754 return 1;
6755 else if (cpu_unit_reservation_p (state, _0mfi_))
6756 return 2;
6757 else if (cpu_unit_reservation_p (state, _0mmf_))
6758 return 3;
6759 else if (cpu_unit_reservation_p (state, _0bbb_))
6760 return 4;
6761 else if (cpu_unit_reservation_p (state, _0mbb_))
6762 return 5;
6763 else if (cpu_unit_reservation_p (state, _0mib_))
6764 return 6;
6765 else if (cpu_unit_reservation_p (state, _0mmb_))
6766 return 7;
6767 else if (cpu_unit_reservation_p (state, _0mfb_))
6768 return 8;
6769 else if (cpu_unit_reservation_p (state, _0mlx_))
6770 return 9;
6771 else
6772 abort ();
6773 case 6:
6774 if (cpu_unit_reservation_p (state, _1mii_))
6775 return 0;
6776 else if (cpu_unit_reservation_p (state, _1mmi_))
6777 return 1;
6778 else if (cpu_unit_reservation_p (state, _1mfi_))
6779 return 2;
6780 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
6781 return 3;
6782 else if (cpu_unit_reservation_p (state, _1bbb_))
6783 return 4;
6784 else if (cpu_unit_reservation_p (state, _1mbb_))
6785 return 5;
6786 else if (cpu_unit_reservation_p (state, _1mib_))
6787 return 6;
6788 else if (cpu_unit_reservation_p (state, _1mmb_))
6789 return 7;
6790 else if (cpu_unit_reservation_p (state, _1mfb_))
6791 return 8;
6792 else if (cpu_unit_reservation_p (state, _1mlx_))
6793 return 9;
6794 else
6795 abort ();
6796 default:
6797 abort ();
6801 /* The following function returns an insn important for insn bundling
6802 followed by INSN and before TAIL. */
6804 static rtx
6805 get_next_important_insn (rtx insn, rtx tail)
6807 for (; insn && insn != tail; insn = NEXT_INSN (insn))
6808 if (INSN_P (insn)
6809 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
6810 && GET_CODE (PATTERN (insn)) != USE
6811 && GET_CODE (PATTERN (insn)) != CLOBBER)
6812 return insn;
6813 return NULL_RTX;
6816 /* The following function does insn bundling. Bundling means
6817 inserting templates and nop insns to fit insn groups into permitted
6818 templates. Instruction scheduling uses NDFA (non-deterministic
6819 finite automata) encoding informations about the templates and the
6820 inserted nops. Nondeterminism of the automata permits follows
6821 all possible insn sequences very fast.
6823 Unfortunately it is not possible to get information about inserting
6824 nop insns and used templates from the automata states. The
6825 automata only says that we can issue an insn possibly inserting
6826 some nops before it and using some template. Therefore insn
6827 bundling in this function is implemented by using DFA
6828 (deterministic finite automata). We follows all possible insn
6829 sequences by inserting 0-2 nops (that is what the NDFA describe for
6830 insn scheduling) before/after each insn being bundled. We know the
6831 start of simulated processor cycle from insn scheduling (insn
6832 starting a new cycle has TImode).
6834 Simple implementation of insn bundling would create enormous
6835 number of possible insn sequences satisfying information about new
6836 cycle ticks taken from the insn scheduling. To make the algorithm
6837 practical we use dynamic programming. Each decision (about
6838 inserting nops and implicitly about previous decisions) is described
6839 by structure bundle_state (see above). If we generate the same
6840 bundle state (key is automaton state after issuing the insns and
6841 nops for it), we reuse already generated one. As consequence we
6842 reject some decisions which can not improve the solution and
6843 reduce memory for the algorithm.
6845 When we reach the end of EBB (extended basic block), we choose the
6846 best sequence and then, moving back in EBB, insert templates for
6847 the best alternative. The templates are taken from querying
6848 automaton state for each insn in chosen bundle states.
6850 So the algorithm makes two (forward and backward) passes through
6851 EBB. There is an additional forward pass through EBB for Itanium1
6852 processor. This pass inserts more nops to make dependency between
6853 a producer insn and MMMUL/MMSHF at least 4 cycles long. */
6855 static void
6856 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
6858 struct bundle_state *curr_state, *next_state, *best_state;
6859 rtx insn, next_insn;
6860 int insn_num;
6861 int i, bundle_end_p, only_bundle_end_p, asm_p;
6862 int pos = 0, max_pos, template0, template1;
6863 rtx b;
6864 rtx nop;
6865 enum attr_type type;
6867 insn_num = 0;
6868 /* Count insns in the EBB. */
6869 for (insn = NEXT_INSN (prev_head_insn);
6870 insn && insn != tail;
6871 insn = NEXT_INSN (insn))
6872 if (INSN_P (insn))
6873 insn_num++;
6874 if (insn_num == 0)
6875 return;
6876 bundling_p = 1;
6877 dfa_clean_insn_cache ();
6878 initiate_bundle_state_table ();
6879 index_to_bundle_states = xmalloc ((insn_num + 2)
6880 * sizeof (struct bundle_state *));
6881 /* First (forward) pass -- generation of bundle states. */
6882 curr_state = get_free_bundle_state ();
6883 curr_state->insn = NULL;
6884 curr_state->before_nops_num = 0;
6885 curr_state->after_nops_num = 0;
6886 curr_state->insn_num = 0;
6887 curr_state->cost = 0;
6888 curr_state->accumulated_insns_num = 0;
6889 curr_state->branch_deviation = 0;
6890 curr_state->next = NULL;
6891 curr_state->originator = NULL;
6892 state_reset (curr_state->dfa_state);
6893 index_to_bundle_states [0] = curr_state;
6894 insn_num = 0;
6895 /* Shift cycle mark if it is put on insn which could be ignored. */
6896 for (insn = NEXT_INSN (prev_head_insn);
6897 insn != tail;
6898 insn = NEXT_INSN (insn))
6899 if (INSN_P (insn)
6900 && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
6901 || GET_CODE (PATTERN (insn)) == USE
6902 || GET_CODE (PATTERN (insn)) == CLOBBER)
6903 && GET_MODE (insn) == TImode)
6905 PUT_MODE (insn, VOIDmode);
6906 for (next_insn = NEXT_INSN (insn);
6907 next_insn != tail;
6908 next_insn = NEXT_INSN (next_insn))
6909 if (INSN_P (next_insn)
6910 && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
6911 && GET_CODE (PATTERN (next_insn)) != USE
6912 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
6914 PUT_MODE (next_insn, TImode);
6915 break;
6918 /* Froward pass: generation of bundle states. */
6919 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
6920 insn != NULL_RTX;
6921 insn = next_insn)
6923 if (!INSN_P (insn)
6924 || ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
6925 || GET_CODE (PATTERN (insn)) == USE
6926 || GET_CODE (PATTERN (insn)) == CLOBBER)
6927 abort ();
6928 type = ia64_safe_type (insn);
6929 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
6930 insn_num++;
6931 index_to_bundle_states [insn_num] = NULL;
6932 for (curr_state = index_to_bundle_states [insn_num - 1];
6933 curr_state != NULL;
6934 curr_state = next_state)
6936 pos = curr_state->accumulated_insns_num % 3;
6937 next_state = curr_state->next;
6938 /* We must fill up the current bundle in order to start a
6939 subsequent asm insn in a new bundle. Asm insn is always
6940 placed in a separate bundle. */
6941 only_bundle_end_p
6942 = (next_insn != NULL_RTX
6943 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
6944 && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
6945 /* We may fill up the current bundle if it is the cycle end
6946 without a group barrier. */
6947 bundle_end_p
6948 = (only_bundle_end_p || next_insn == NULL_RTX
6949 || (GET_MODE (next_insn) == TImode
6950 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
6951 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
6952 || type == TYPE_S
6953 /* We need to insert 2 nops for cases like M_MII. To
6954 guarantee issuing all insns on the same cycle for
6955 Itanium 1, we need to issue 2 nops after the first M
6956 insn (MnnMII where n is a nop insn). */
6957 || ((type == TYPE_M || type == TYPE_A)
6958 && ia64_tune == PROCESSOR_ITANIUM
6959 && !bundle_end_p && pos == 1))
6960 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
6961 only_bundle_end_p);
6962 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
6963 only_bundle_end_p);
6964 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
6965 only_bundle_end_p);
6967 if (index_to_bundle_states [insn_num] == NULL)
6968 abort ();
6969 for (curr_state = index_to_bundle_states [insn_num];
6970 curr_state != NULL;
6971 curr_state = curr_state->next)
6972 if (verbose >= 2 && dump)
6974 /* This structure is taken from generated code of the
6975 pipeline hazard recognizer (see file insn-attrtab.c).
6976 Please don't forget to change the structure if a new
6977 automaton is added to .md file. */
6978 struct DFA_chip
6980 unsigned short one_automaton_state;
6981 unsigned short oneb_automaton_state;
6982 unsigned short two_automaton_state;
6983 unsigned short twob_automaton_state;
6986 fprintf
6987 (dump,
6988 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
6989 curr_state->unique_num,
6990 (curr_state->originator == NULL
6991 ? -1 : curr_state->originator->unique_num),
6992 curr_state->cost,
6993 curr_state->before_nops_num, curr_state->after_nops_num,
6994 curr_state->accumulated_insns_num, curr_state->branch_deviation,
6995 (ia64_tune == PROCESSOR_ITANIUM
6996 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
6997 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
6998 INSN_UID (insn));
7001 if (index_to_bundle_states [insn_num] == NULL)
7002 /* We should find a solution because the 2nd insn scheduling has
7003 found one. */
7004 abort ();
7005 /* Find a state corresponding to the best insn sequence. */
7006 best_state = NULL;
7007 for (curr_state = index_to_bundle_states [insn_num];
7008 curr_state != NULL;
7009 curr_state = curr_state->next)
7010 /* We are just looking at the states with fully filled up last
7011 bundle. The first we prefer insn sequences with minimal cost
7012 then with minimal inserted nops and finally with branch insns
7013 placed in the 3rd slots. */
7014 if (curr_state->accumulated_insns_num % 3 == 0
7015 && (best_state == NULL || best_state->cost > curr_state->cost
7016 || (best_state->cost == curr_state->cost
7017 && (curr_state->accumulated_insns_num
7018 < best_state->accumulated_insns_num
7019 || (curr_state->accumulated_insns_num
7020 == best_state->accumulated_insns_num
7021 && curr_state->branch_deviation
7022 < best_state->branch_deviation)))))
7023 best_state = curr_state;
7024 /* Second (backward) pass: adding nops and templates. */
7025 insn_num = best_state->before_nops_num;
7026 template0 = template1 = -1;
7027 for (curr_state = best_state;
7028 curr_state->originator != NULL;
7029 curr_state = curr_state->originator)
7031 insn = curr_state->insn;
7032 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
7033 || asm_noperands (PATTERN (insn)) >= 0);
7034 insn_num++;
7035 if (verbose >= 2 && dump)
7037 struct DFA_chip
7039 unsigned short one_automaton_state;
7040 unsigned short oneb_automaton_state;
7041 unsigned short two_automaton_state;
7042 unsigned short twob_automaton_state;
7045 fprintf
7046 (dump,
7047 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
7048 curr_state->unique_num,
7049 (curr_state->originator == NULL
7050 ? -1 : curr_state->originator->unique_num),
7051 curr_state->cost,
7052 curr_state->before_nops_num, curr_state->after_nops_num,
7053 curr_state->accumulated_insns_num, curr_state->branch_deviation,
7054 (ia64_tune == PROCESSOR_ITANIUM
7055 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
7056 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
7057 INSN_UID (insn));
7059 /* Find the position in the current bundle window. The window can
7060 contain at most two bundles. Two bundle window means that
7061 the processor will make two bundle rotation. */
7062 max_pos = get_max_pos (curr_state->dfa_state);
7063 if (max_pos == 6
7064 /* The following (negative template number) means that the
7065 processor did one bundle rotation. */
7066 || (max_pos == 3 && template0 < 0))
7068 /* We are at the end of the window -- find template(s) for
7069 its bundle(s). */
7070 pos = max_pos;
7071 if (max_pos == 3)
7072 template0 = get_template (curr_state->dfa_state, 3);
7073 else
7075 template1 = get_template (curr_state->dfa_state, 3);
7076 template0 = get_template (curr_state->dfa_state, 6);
7079 if (max_pos > 3 && template1 < 0)
7080 /* It may happen when we have the stop inside a bundle. */
7082 if (pos > 3)
7083 abort ();
7084 template1 = get_template (curr_state->dfa_state, 3);
7085 pos += 3;
7087 if (!asm_p)
7088 /* Emit nops after the current insn. */
7089 for (i = 0; i < curr_state->after_nops_num; i++)
7091 nop = gen_nop ();
7092 emit_insn_after (nop, insn);
7093 pos--;
7094 if (pos < 0)
7095 abort ();
7096 if (pos % 3 == 0)
7098 /* We are at the start of a bundle: emit the template
7099 (it should be defined). */
7100 if (template0 < 0)
7101 abort ();
7102 b = gen_bundle_selector (GEN_INT (template0));
7103 ia64_emit_insn_before (b, nop);
7104 /* If we have two bundle window, we make one bundle
7105 rotation. Otherwise template0 will be undefined
7106 (negative value). */
7107 template0 = template1;
7108 template1 = -1;
7111 /* Move the position backward in the window. Group barrier has
7112 no slot. Asm insn takes all bundle. */
7113 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
7114 && GET_CODE (PATTERN (insn)) != ASM_INPUT
7115 && asm_noperands (PATTERN (insn)) < 0)
7116 pos--;
7117 /* Long insn takes 2 slots. */
7118 if (ia64_safe_type (insn) == TYPE_L)
7119 pos--;
7120 if (pos < 0)
7121 abort ();
7122 if (pos % 3 == 0
7123 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
7124 && GET_CODE (PATTERN (insn)) != ASM_INPUT
7125 && asm_noperands (PATTERN (insn)) < 0)
7127 /* The current insn is at the bundle start: emit the
7128 template. */
7129 if (template0 < 0)
7130 abort ();
7131 b = gen_bundle_selector (GEN_INT (template0));
7132 ia64_emit_insn_before (b, insn);
7133 b = PREV_INSN (insn);
7134 insn = b;
7135 /* See comment above in analogous place for emitting nops
7136 after the insn. */
7137 template0 = template1;
7138 template1 = -1;
7140 /* Emit nops after the current insn. */
7141 for (i = 0; i < curr_state->before_nops_num; i++)
7143 nop = gen_nop ();
7144 ia64_emit_insn_before (nop, insn);
7145 nop = PREV_INSN (insn);
7146 insn = nop;
7147 pos--;
7148 if (pos < 0)
7149 abort ();
7150 if (pos % 3 == 0)
7152 /* See comment above in analogous place for emitting nops
7153 after the insn. */
7154 if (template0 < 0)
7155 abort ();
7156 b = gen_bundle_selector (GEN_INT (template0));
7157 ia64_emit_insn_before (b, insn);
7158 b = PREV_INSN (insn);
7159 insn = b;
7160 template0 = template1;
7161 template1 = -1;
7165 if (ia64_tune == PROCESSOR_ITANIUM)
7166 /* Insert additional cycles for MM-insns (MMMUL and MMSHF).
7167 Itanium1 has a strange design, if the distance between an insn
7168 and dependent MM-insn is less 4 then we have a 6 additional
7169 cycles stall. So we make the distance equal to 4 cycles if it
7170 is less. */
7171 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
7172 insn != NULL_RTX;
7173 insn = next_insn)
7175 if (!INSN_P (insn)
7176 || ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
7177 || GET_CODE (PATTERN (insn)) == USE
7178 || GET_CODE (PATTERN (insn)) == CLOBBER)
7179 abort ();
7180 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
7181 if (INSN_UID (insn) < clocks_length && add_cycles [INSN_UID (insn)])
7182 /* We found a MM-insn which needs additional cycles. */
7184 rtx last;
7185 int i, j, n;
7186 int pred_stop_p;
7188 /* Now we are searching for a template of the bundle in
7189 which the MM-insn is placed and the position of the
7190 insn in the bundle (0, 1, 2). Also we are searching
7191 for that there is a stop before the insn. */
7192 last = prev_active_insn (insn);
7193 pred_stop_p = recog_memoized (last) == CODE_FOR_insn_group_barrier;
7194 if (pred_stop_p)
7195 last = prev_active_insn (last);
7196 n = 0;
7197 for (;; last = prev_active_insn (last))
7198 if (recog_memoized (last) == CODE_FOR_bundle_selector)
7200 template0 = XINT (XVECEXP (PATTERN (last), 0, 0), 0);
7201 if (template0 == 9)
7202 /* The insn is in MLX bundle. Change the template
7203 onto MFI because we will add nops before the
7204 insn. It simplifies subsequent code a lot. */
7205 PATTERN (last)
7206 = gen_bundle_selector (const2_rtx); /* -> MFI */
7207 break;
7209 else if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
7210 n++;
7211 /* Some check of correctness: the stop is not at the
7212 bundle start, there are no more 3 insns in the bundle,
7213 and the MM-insn is not at the start of bundle with
7214 template MLX. */
7215 if ((pred_stop_p && n == 0) || n > 2
7216 || (template0 == 9 && n != 0))
7217 abort ();
7218 /* Put nops after the insn in the bundle. */
7219 for (j = 3 - n; j > 0; j --)
7220 ia64_emit_insn_before (gen_nop (), insn);
7221 /* It takes into account that we will add more N nops
7222 before the insn lately -- please see code below. */
7223 add_cycles [INSN_UID (insn)]--;
7224 if (!pred_stop_p || add_cycles [INSN_UID (insn)])
7225 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7226 insn);
7227 if (pred_stop_p)
7228 add_cycles [INSN_UID (insn)]--;
7229 for (i = add_cycles [INSN_UID (insn)]; i > 0; i--)
7231 /* Insert "MII;" template. */
7232 ia64_emit_insn_before (gen_bundle_selector (const0_rtx),
7233 insn);
7234 ia64_emit_insn_before (gen_nop (), insn);
7235 ia64_emit_insn_before (gen_nop (), insn);
7236 if (i > 1)
7238 /* To decrease code size, we use "MI;I;"
7239 template. */
7240 ia64_emit_insn_before
7241 (gen_insn_group_barrier (GEN_INT (3)), insn);
7242 i--;
7244 ia64_emit_insn_before (gen_nop (), insn);
7245 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7246 insn);
7248 /* Put the MM-insn in the same slot of a bundle with the
7249 same template as the original one. */
7250 ia64_emit_insn_before (gen_bundle_selector (GEN_INT (template0)),
7251 insn);
7252 /* To put the insn in the same slot, add necessary number
7253 of nops. */
7254 for (j = n; j > 0; j --)
7255 ia64_emit_insn_before (gen_nop (), insn);
7256 /* Put the stop if the original bundle had it. */
7257 if (pred_stop_p)
7258 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7259 insn);
7262 free (index_to_bundle_states);
7263 finish_bundle_state_table ();
7264 bundling_p = 0;
7265 dfa_clean_insn_cache ();
7268 /* The following function is called at the end of scheduling BB or
7269 EBB. After reload, it inserts stop bits and does insn bundling. */
7271 static void
7272 ia64_sched_finish (FILE *dump, int sched_verbose)
7274 if (sched_verbose)
7275 fprintf (dump, "// Finishing schedule.\n");
7276 if (!reload_completed)
7277 return;
7278 if (reload_completed)
7280 final_emit_insn_group_barriers (dump);
7281 bundling (dump, sched_verbose, current_sched_info->prev_head,
7282 current_sched_info->next_tail);
7283 if (sched_verbose && dump)
7284 fprintf (dump, "// finishing %d-%d\n",
7285 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
7286 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
7288 return;
7292 /* The following function inserts stop bits in scheduled BB or EBB. */
7294 static void
7295 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
7297 rtx insn;
7298 int need_barrier_p = 0;
7299 rtx prev_insn = NULL_RTX;
7301 init_insn_group_barriers ();
7303 for (insn = NEXT_INSN (current_sched_info->prev_head);
7304 insn != current_sched_info->next_tail;
7305 insn = NEXT_INSN (insn))
7307 if (GET_CODE (insn) == BARRIER)
7309 rtx last = prev_active_insn (insn);
7311 if (! last)
7312 continue;
7313 if (GET_CODE (last) == JUMP_INSN
7314 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
7315 last = prev_active_insn (last);
7316 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
7317 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
7319 init_insn_group_barriers ();
7320 need_barrier_p = 0;
7321 prev_insn = NULL_RTX;
7323 else if (INSN_P (insn))
7325 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
7327 init_insn_group_barriers ();
7328 need_barrier_p = 0;
7329 prev_insn = NULL_RTX;
7331 else if (need_barrier_p || group_barrier_needed_p (insn))
7333 if (TARGET_EARLY_STOP_BITS)
7335 rtx last;
7337 for (last = insn;
7338 last != current_sched_info->prev_head;
7339 last = PREV_INSN (last))
7340 if (INSN_P (last) && GET_MODE (last) == TImode
7341 && stops_p [INSN_UID (last)])
7342 break;
7343 if (last == current_sched_info->prev_head)
7344 last = insn;
7345 last = prev_active_insn (last);
7346 if (last
7347 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
7348 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
7349 last);
7350 init_insn_group_barriers ();
7351 for (last = NEXT_INSN (last);
7352 last != insn;
7353 last = NEXT_INSN (last))
7354 if (INSN_P (last))
7355 group_barrier_needed_p (last);
7357 else
7359 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7360 insn);
7361 init_insn_group_barriers ();
7363 group_barrier_needed_p (insn);
7364 prev_insn = NULL_RTX;
7366 else if (recog_memoized (insn) >= 0)
7367 prev_insn = insn;
7368 need_barrier_p = (GET_CODE (insn) == CALL_INSN
7369 || GET_CODE (PATTERN (insn)) == ASM_INPUT
7370 || asm_noperands (PATTERN (insn)) >= 0);
7377 /* If the following function returns TRUE, we will use the the DFA
7378 insn scheduler. */
7380 static int
7381 ia64_use_dfa_pipeline_interface (void)
7383 return 1;
7386 /* If the following function returns TRUE, we will use the the DFA
7387 insn scheduler. */
7389 static int
7390 ia64_first_cycle_multipass_dfa_lookahead (void)
7392 return (reload_completed ? 6 : 4);
7395 /* The following function initiates variable `dfa_pre_cycle_insn'. */
7397 static void
7398 ia64_init_dfa_pre_cycle_insn (void)
7400 if (temp_dfa_state == NULL)
7402 dfa_state_size = state_size ();
7403 temp_dfa_state = xmalloc (dfa_state_size);
7404 prev_cycle_state = xmalloc (dfa_state_size);
7406 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
7407 PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
7408 recog_memoized (dfa_pre_cycle_insn);
7409 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
7410 PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX;
7411 recog_memoized (dfa_stop_insn);
7414 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
7415 used by the DFA insn scheduler. */
7417 static rtx
7418 ia64_dfa_pre_cycle_insn (void)
7420 return dfa_pre_cycle_insn;
7423 /* The following function returns TRUE if PRODUCER (of type ilog or
7424 ld) produces address for CONSUMER (of type st or stf). */
7427 ia64_st_address_bypass_p (rtx producer, rtx consumer)
7429 rtx dest, reg, mem;
7431 if (producer == NULL_RTX || consumer == NULL_RTX)
7432 abort ();
7433 dest = ia64_single_set (producer);
7434 if (dest == NULL_RTX || (reg = SET_DEST (dest)) == NULL_RTX
7435 || (GET_CODE (reg) != REG && GET_CODE (reg) != SUBREG))
7436 abort ();
7437 if (GET_CODE (reg) == SUBREG)
7438 reg = SUBREG_REG (reg);
7439 dest = ia64_single_set (consumer);
7440 if (dest == NULL_RTX || (mem = SET_DEST (dest)) == NULL_RTX
7441 || GET_CODE (mem) != MEM)
7442 abort ();
7443 return reg_mentioned_p (reg, mem);
7446 /* The following function returns TRUE if PRODUCER (of type ilog or
7447 ld) produces address for CONSUMER (of type ld or fld). */
7450 ia64_ld_address_bypass_p (rtx producer, rtx consumer)
7452 rtx dest, src, reg, mem;
7454 if (producer == NULL_RTX || consumer == NULL_RTX)
7455 abort ();
7456 dest = ia64_single_set (producer);
7457 if (dest == NULL_RTX || (reg = SET_DEST (dest)) == NULL_RTX
7458 || (GET_CODE (reg) != REG && GET_CODE (reg) != SUBREG))
7459 abort ();
7460 if (GET_CODE (reg) == SUBREG)
7461 reg = SUBREG_REG (reg);
7462 src = ia64_single_set (consumer);
7463 if (src == NULL_RTX || (mem = SET_SRC (src)) == NULL_RTX)
7464 abort ();
7465 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
7466 mem = XVECEXP (mem, 0, 0);
7467 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
7468 mem = XEXP (mem, 0);
7470 /* Note that LO_SUM is used for GOT loads. */
7471 if (GET_CODE (mem) != LO_SUM && GET_CODE (mem) != MEM)
7472 abort ();
7474 return reg_mentioned_p (reg, mem);
7477 /* The following function returns TRUE if INSN produces address for a
7478 load/store insn. We will place such insns into M slot because it
7479 decreases its latency time. */
7482 ia64_produce_address_p (rtx insn)
7484 return insn->call;
7488 /* Emit pseudo-ops for the assembler to describe predicate relations.
7489 At present this assumes that we only consider predicate pairs to
7490 be mutex, and that the assembler can deduce proper values from
7491 straight-line code. */
7493 static void
7494 emit_predicate_relation_info (void)
7496 basic_block bb;
7498 FOR_EACH_BB_REVERSE (bb)
7500 int r;
7501 rtx head = BB_HEAD (bb);
7503 /* We only need such notes at code labels. */
7504 if (GET_CODE (head) != CODE_LABEL)
7505 continue;
7506 if (GET_CODE (NEXT_INSN (head)) == NOTE
7507 && NOTE_LINE_NUMBER (NEXT_INSN (head)) == NOTE_INSN_BASIC_BLOCK)
7508 head = NEXT_INSN (head);
7510 for (r = PR_REG (0); r < PR_REG (64); r += 2)
7511 if (REGNO_REG_SET_P (bb->global_live_at_start, r))
7513 rtx p = gen_rtx_REG (BImode, r);
7514 rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
7515 if (head == BB_END (bb))
7516 BB_END (bb) = n;
7517 head = n;
7521 /* Look for conditional calls that do not return, and protect predicate
7522 relations around them. Otherwise the assembler will assume the call
7523 returns, and complain about uses of call-clobbered predicates after
7524 the call. */
7525 FOR_EACH_BB_REVERSE (bb)
7527 rtx insn = BB_HEAD (bb);
7529 while (1)
7531 if (GET_CODE (insn) == CALL_INSN
7532 && GET_CODE (PATTERN (insn)) == COND_EXEC
7533 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
7535 rtx b = emit_insn_before (gen_safe_across_calls_all (), insn);
7536 rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn);
7537 if (BB_HEAD (bb) == insn)
7538 BB_HEAD (bb) = b;
7539 if (BB_END (bb) == insn)
7540 BB_END (bb) = a;
7543 if (insn == BB_END (bb))
7544 break;
7545 insn = NEXT_INSN (insn);
7550 /* Perform machine dependent operations on the rtl chain INSNS. */
7552 static void
7553 ia64_reorg (void)
7555 /* We are freeing block_for_insn in the toplev to keep compatibility
7556 with old MDEP_REORGS that are not CFG based. Recompute it now. */
7557 compute_bb_for_insn ();
7559 /* If optimizing, we'll have split before scheduling. */
7560 if (optimize == 0)
7561 split_all_insns (0);
7563 /* ??? update_life_info_in_dirty_blocks fails to terminate during
7564 non-optimizing bootstrap. */
7565 update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES, PROP_DEATH_NOTES);
7567 if (ia64_flag_schedule_insns2)
7569 timevar_push (TV_SCHED2);
7570 ia64_final_schedule = 1;
7572 initiate_bundle_states ();
7573 ia64_nop = make_insn_raw (gen_nop ());
7574 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
7575 recog_memoized (ia64_nop);
7576 clocks_length = get_max_uid () + 1;
7577 stops_p = xcalloc (1, clocks_length);
7578 if (ia64_tune == PROCESSOR_ITANIUM)
7580 clocks = xcalloc (clocks_length, sizeof (int));
7581 add_cycles = xcalloc (clocks_length, sizeof (int));
7583 if (ia64_tune == PROCESSOR_ITANIUM2)
7585 pos_1 = get_cpu_unit_code ("2_1");
7586 pos_2 = get_cpu_unit_code ("2_2");
7587 pos_3 = get_cpu_unit_code ("2_3");
7588 pos_4 = get_cpu_unit_code ("2_4");
7589 pos_5 = get_cpu_unit_code ("2_5");
7590 pos_6 = get_cpu_unit_code ("2_6");
7591 _0mii_ = get_cpu_unit_code ("2b_0mii.");
7592 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
7593 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
7594 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
7595 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
7596 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
7597 _0mib_ = get_cpu_unit_code ("2b_0mib.");
7598 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
7599 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
7600 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
7601 _1mii_ = get_cpu_unit_code ("2b_1mii.");
7602 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
7603 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
7604 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
7605 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
7606 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
7607 _1mib_ = get_cpu_unit_code ("2b_1mib.");
7608 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
7609 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
7610 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
7612 else
7614 pos_1 = get_cpu_unit_code ("1_1");
7615 pos_2 = get_cpu_unit_code ("1_2");
7616 pos_3 = get_cpu_unit_code ("1_3");
7617 pos_4 = get_cpu_unit_code ("1_4");
7618 pos_5 = get_cpu_unit_code ("1_5");
7619 pos_6 = get_cpu_unit_code ("1_6");
7620 _0mii_ = get_cpu_unit_code ("1b_0mii.");
7621 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
7622 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
7623 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
7624 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
7625 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
7626 _0mib_ = get_cpu_unit_code ("1b_0mib.");
7627 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
7628 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
7629 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
7630 _1mii_ = get_cpu_unit_code ("1b_1mii.");
7631 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
7632 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
7633 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
7634 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
7635 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
7636 _1mib_ = get_cpu_unit_code ("1b_1mib.");
7637 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
7638 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
7639 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
7641 schedule_ebbs (dump_file);
7642 finish_bundle_states ();
7643 if (ia64_tune == PROCESSOR_ITANIUM)
7645 free (add_cycles);
7646 free (clocks);
7648 free (stops_p);
7649 emit_insn_group_barriers (dump_file);
7651 ia64_final_schedule = 0;
7652 timevar_pop (TV_SCHED2);
7654 else
7655 emit_all_insn_group_barriers (dump_file);
7657 /* A call must not be the last instruction in a function, so that the
7658 return address is still within the function, so that unwinding works
7659 properly. Note that IA-64 differs from dwarf2 on this point. */
7660 if (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
7662 rtx insn;
7663 int saw_stop = 0;
7665 insn = get_last_insn ();
7666 if (! INSN_P (insn))
7667 insn = prev_active_insn (insn);
7668 if (GET_CODE (insn) == INSN
7669 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
7670 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
7672 saw_stop = 1;
7673 insn = prev_active_insn (insn);
7675 if (GET_CODE (insn) == CALL_INSN)
7677 if (! saw_stop)
7678 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7679 emit_insn (gen_break_f ());
7680 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7684 fixup_errata ();
7685 emit_predicate_relation_info ();
7687 if (ia64_flag_var_tracking)
7689 timevar_push (TV_VAR_TRACKING);
7690 variable_tracking_main ();
7691 timevar_pop (TV_VAR_TRACKING);
7695 /* Return true if REGNO is used by the epilogue. */
7698 ia64_epilogue_uses (int regno)
7700 switch (regno)
7702 case R_GR (1):
7703 /* With a call to a function in another module, we will write a new
7704 value to "gp". After returning from such a call, we need to make
7705 sure the function restores the original gp-value, even if the
7706 function itself does not use the gp anymore. */
7707 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
7709 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
7710 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
7711 /* For functions defined with the syscall_linkage attribute, all
7712 input registers are marked as live at all function exits. This
7713 prevents the register allocator from using the input registers,
7714 which in turn makes it possible to restart a system call after
7715 an interrupt without having to save/restore the input registers.
7716 This also prevents kernel data from leaking to application code. */
7717 return lookup_attribute ("syscall_linkage",
7718 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
7720 case R_BR (0):
7721 /* Conditional return patterns can't represent the use of `b0' as
7722 the return address, so we force the value live this way. */
7723 return 1;
7725 case AR_PFS_REGNUM:
7726 /* Likewise for ar.pfs, which is used by br.ret. */
7727 return 1;
7729 default:
7730 return 0;
7734 /* Return true if REGNO is used by the frame unwinder. */
7737 ia64_eh_uses (int regno)
7739 if (! reload_completed)
7740 return 0;
7742 if (current_frame_info.reg_save_b0
7743 && regno == current_frame_info.reg_save_b0)
7744 return 1;
7745 if (current_frame_info.reg_save_pr
7746 && regno == current_frame_info.reg_save_pr)
7747 return 1;
7748 if (current_frame_info.reg_save_ar_pfs
7749 && regno == current_frame_info.reg_save_ar_pfs)
7750 return 1;
7751 if (current_frame_info.reg_save_ar_unat
7752 && regno == current_frame_info.reg_save_ar_unat)
7753 return 1;
7754 if (current_frame_info.reg_save_ar_lc
7755 && regno == current_frame_info.reg_save_ar_lc)
7756 return 1;
7758 return 0;
7761 /* Return true if this goes in small data/bss. */
7763 /* ??? We could also support own long data here. Generating movl/add/ld8
7764 instead of addl,ld8/ld8. This makes the code bigger, but should make the
7765 code faster because there is one less load. This also includes incomplete
7766 types which can't go in sdata/sbss. */
7768 static bool
7769 ia64_in_small_data_p (tree exp)
7771 if (TARGET_NO_SDATA)
7772 return false;
7774 /* We want to merge strings, so we never consider them small data. */
7775 if (TREE_CODE (exp) == STRING_CST)
7776 return false;
7778 /* Functions are never small data. */
7779 if (TREE_CODE (exp) == FUNCTION_DECL)
7780 return false;
7782 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
7784 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
7785 if (strcmp (section, ".sdata") == 0
7786 || strcmp (section, ".sbss") == 0)
7787 return true;
7789 else
7791 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
7793 /* If this is an incomplete type with size 0, then we can't put it
7794 in sdata because it might be too big when completed. */
7795 if (size > 0 && size <= ia64_section_threshold)
7796 return true;
7799 return false;
7802 /* Output assembly directives for prologue regions. */
7804 /* The current basic block number. */
7806 static bool last_block;
7808 /* True if we need a copy_state command at the start of the next block. */
7810 static bool need_copy_state;
7812 /* The function emits unwind directives for the start of an epilogue. */
7814 static void
7815 process_epilogue (void)
7817 /* If this isn't the last block of the function, then we need to label the
7818 current state, and copy it back in at the start of the next block. */
7820 if (!last_block)
7822 fprintf (asm_out_file, "\t.label_state 1\n");
7823 need_copy_state = true;
7826 fprintf (asm_out_file, "\t.restore sp\n");
7829 /* This function processes a SET pattern looking for specific patterns
7830 which result in emitting an assembly directive required for unwinding. */
7832 static int
7833 process_set (FILE *asm_out_file, rtx pat)
7835 rtx src = SET_SRC (pat);
7836 rtx dest = SET_DEST (pat);
7837 int src_regno, dest_regno;
7839 /* Look for the ALLOC insn. */
7840 if (GET_CODE (src) == UNSPEC_VOLATILE
7841 && XINT (src, 1) == UNSPECV_ALLOC
7842 && GET_CODE (dest) == REG)
7844 dest_regno = REGNO (dest);
7846 /* If this isn't the final destination for ar.pfs, the alloc
7847 shouldn't have been marked frame related. */
7848 if (dest_regno != current_frame_info.reg_save_ar_pfs)
7849 abort ();
7851 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
7852 ia64_dbx_register_number (dest_regno));
7853 return 1;
7856 /* Look for SP = .... */
7857 if (GET_CODE (dest) == REG && REGNO (dest) == STACK_POINTER_REGNUM)
7859 if (GET_CODE (src) == PLUS)
7861 rtx op0 = XEXP (src, 0);
7862 rtx op1 = XEXP (src, 1);
7863 if (op0 == dest && GET_CODE (op1) == CONST_INT)
7865 if (INTVAL (op1) < 0)
7866 fprintf (asm_out_file, "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
7867 -INTVAL (op1));
7868 else
7869 process_epilogue ();
7871 else
7872 abort ();
7874 else if (GET_CODE (src) == REG
7875 && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
7876 process_epilogue ();
7877 else
7878 abort ();
7880 return 1;
7883 /* Register move we need to look at. */
7884 if (GET_CODE (dest) == REG && GET_CODE (src) == REG)
7886 src_regno = REGNO (src);
7887 dest_regno = REGNO (dest);
7889 switch (src_regno)
7891 case BR_REG (0):
7892 /* Saving return address pointer. */
7893 if (dest_regno != current_frame_info.reg_save_b0)
7894 abort ();
7895 fprintf (asm_out_file, "\t.save rp, r%d\n",
7896 ia64_dbx_register_number (dest_regno));
7897 return 1;
7899 case PR_REG (0):
7900 if (dest_regno != current_frame_info.reg_save_pr)
7901 abort ();
7902 fprintf (asm_out_file, "\t.save pr, r%d\n",
7903 ia64_dbx_register_number (dest_regno));
7904 return 1;
7906 case AR_UNAT_REGNUM:
7907 if (dest_regno != current_frame_info.reg_save_ar_unat)
7908 abort ();
7909 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
7910 ia64_dbx_register_number (dest_regno));
7911 return 1;
7913 case AR_LC_REGNUM:
7914 if (dest_regno != current_frame_info.reg_save_ar_lc)
7915 abort ();
7916 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
7917 ia64_dbx_register_number (dest_regno));
7918 return 1;
7920 case STACK_POINTER_REGNUM:
7921 if (dest_regno != HARD_FRAME_POINTER_REGNUM
7922 || ! frame_pointer_needed)
7923 abort ();
7924 fprintf (asm_out_file, "\t.vframe r%d\n",
7925 ia64_dbx_register_number (dest_regno));
7926 return 1;
7928 default:
7929 /* Everything else should indicate being stored to memory. */
7930 abort ();
7934 /* Memory store we need to look at. */
7935 if (GET_CODE (dest) == MEM && GET_CODE (src) == REG)
7937 long off;
7938 rtx base;
7939 const char *saveop;
7941 if (GET_CODE (XEXP (dest, 0)) == REG)
7943 base = XEXP (dest, 0);
7944 off = 0;
7946 else if (GET_CODE (XEXP (dest, 0)) == PLUS
7947 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT)
7949 base = XEXP (XEXP (dest, 0), 0);
7950 off = INTVAL (XEXP (XEXP (dest, 0), 1));
7952 else
7953 abort ();
7955 if (base == hard_frame_pointer_rtx)
7957 saveop = ".savepsp";
7958 off = - off;
7960 else if (base == stack_pointer_rtx)
7961 saveop = ".savesp";
7962 else
7963 abort ();
7965 src_regno = REGNO (src);
7966 switch (src_regno)
7968 case BR_REG (0):
7969 if (current_frame_info.reg_save_b0 != 0)
7970 abort ();
7971 fprintf (asm_out_file, "\t%s rp, %ld\n", saveop, off);
7972 return 1;
7974 case PR_REG (0):
7975 if (current_frame_info.reg_save_pr != 0)
7976 abort ();
7977 fprintf (asm_out_file, "\t%s pr, %ld\n", saveop, off);
7978 return 1;
7980 case AR_LC_REGNUM:
7981 if (current_frame_info.reg_save_ar_lc != 0)
7982 abort ();
7983 fprintf (asm_out_file, "\t%s ar.lc, %ld\n", saveop, off);
7984 return 1;
7986 case AR_PFS_REGNUM:
7987 if (current_frame_info.reg_save_ar_pfs != 0)
7988 abort ();
7989 fprintf (asm_out_file, "\t%s ar.pfs, %ld\n", saveop, off);
7990 return 1;
7992 case AR_UNAT_REGNUM:
7993 if (current_frame_info.reg_save_ar_unat != 0)
7994 abort ();
7995 fprintf (asm_out_file, "\t%s ar.unat, %ld\n", saveop, off);
7996 return 1;
7998 case GR_REG (4):
7999 case GR_REG (5):
8000 case GR_REG (6):
8001 case GR_REG (7):
8002 fprintf (asm_out_file, "\t.save.g 0x%x\n",
8003 1 << (src_regno - GR_REG (4)));
8004 return 1;
8006 case BR_REG (1):
8007 case BR_REG (2):
8008 case BR_REG (3):
8009 case BR_REG (4):
8010 case BR_REG (5):
8011 fprintf (asm_out_file, "\t.save.b 0x%x\n",
8012 1 << (src_regno - BR_REG (1)));
8013 return 1;
8015 case FR_REG (2):
8016 case FR_REG (3):
8017 case FR_REG (4):
8018 case FR_REG (5):
8019 fprintf (asm_out_file, "\t.save.f 0x%x\n",
8020 1 << (src_regno - FR_REG (2)));
8021 return 1;
8023 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
8024 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
8025 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
8026 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
8027 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
8028 1 << (src_regno - FR_REG (12)));
8029 return 1;
8031 default:
8032 return 0;
8036 return 0;
8040 /* This function looks at a single insn and emits any directives
8041 required to unwind this insn. */
8042 void
8043 process_for_unwind_directive (FILE *asm_out_file, rtx insn)
8045 if (flag_unwind_tables
8046 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
8048 rtx pat;
8050 if (GET_CODE (insn) == NOTE
8051 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
8053 last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
8055 /* Restore unwind state from immediately before the epilogue. */
8056 if (need_copy_state)
8058 fprintf (asm_out_file, "\t.body\n");
8059 fprintf (asm_out_file, "\t.copy_state 1\n");
8060 need_copy_state = false;
8064 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
8065 return;
8067 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
8068 if (pat)
8069 pat = XEXP (pat, 0);
8070 else
8071 pat = PATTERN (insn);
8073 switch (GET_CODE (pat))
8075 case SET:
8076 process_set (asm_out_file, pat);
8077 break;
8079 case PARALLEL:
8081 int par_index;
8082 int limit = XVECLEN (pat, 0);
8083 for (par_index = 0; par_index < limit; par_index++)
8085 rtx x = XVECEXP (pat, 0, par_index);
8086 if (GET_CODE (x) == SET)
8087 process_set (asm_out_file, x);
8089 break;
8092 default:
8093 abort ();
8099 void
8100 ia64_init_builtins (void)
8102 tree psi_type_node = build_pointer_type (integer_type_node);
8103 tree pdi_type_node = build_pointer_type (long_integer_type_node);
8105 /* __sync_val_compare_and_swap_si, __sync_bool_compare_and_swap_si */
8106 tree si_ftype_psi_si_si
8107 = build_function_type_list (integer_type_node,
8108 psi_type_node, integer_type_node,
8109 integer_type_node, NULL_TREE);
8111 /* __sync_val_compare_and_swap_di */
8112 tree di_ftype_pdi_di_di
8113 = build_function_type_list (long_integer_type_node,
8114 pdi_type_node, long_integer_type_node,
8115 long_integer_type_node, NULL_TREE);
8116 /* __sync_bool_compare_and_swap_di */
8117 tree si_ftype_pdi_di_di
8118 = build_function_type_list (integer_type_node,
8119 pdi_type_node, long_integer_type_node,
8120 long_integer_type_node, NULL_TREE);
8121 /* __sync_synchronize */
8122 tree void_ftype_void
8123 = build_function_type (void_type_node, void_list_node);
8125 /* __sync_lock_test_and_set_si */
8126 tree si_ftype_psi_si
8127 = build_function_type_list (integer_type_node,
8128 psi_type_node, integer_type_node, NULL_TREE);
8130 /* __sync_lock_test_and_set_di */
8131 tree di_ftype_pdi_di
8132 = build_function_type_list (long_integer_type_node,
8133 pdi_type_node, long_integer_type_node,
8134 NULL_TREE);
8136 /* __sync_lock_release_si */
8137 tree void_ftype_psi
8138 = build_function_type_list (void_type_node, psi_type_node, NULL_TREE);
8140 /* __sync_lock_release_di */
8141 tree void_ftype_pdi
8142 = build_function_type_list (void_type_node, pdi_type_node, NULL_TREE);
8144 tree fpreg_type;
8145 tree float80_type;
8147 /* The __fpreg type. */
8148 fpreg_type = make_node (REAL_TYPE);
8149 /* ??? The back end should know to load/save __fpreg variables using
8150 the ldf.fill and stf.spill instructions. */
8151 TYPE_PRECISION (fpreg_type) = 96;
8152 layout_type (fpreg_type);
8153 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
8155 /* The __float80 type. */
8156 float80_type = make_node (REAL_TYPE);
8157 TYPE_PRECISION (float80_type) = 96;
8158 layout_type (float80_type);
8159 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
8161 /* The __float128 type. */
8162 if (!TARGET_HPUX)
8164 tree float128_type = make_node (REAL_TYPE);
8165 TYPE_PRECISION (float128_type) = 128;
8166 layout_type (float128_type);
8167 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
8169 else
8170 /* Under HPUX, this is a synonym for "long double". */
8171 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
8172 "__float128");
8174 #define def_builtin(name, type, code) \
8175 builtin_function ((name), (type), (code), BUILT_IN_MD, NULL, NULL_TREE)
8177 def_builtin ("__sync_val_compare_and_swap_si", si_ftype_psi_si_si,
8178 IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI);
8179 def_builtin ("__sync_val_compare_and_swap_di", di_ftype_pdi_di_di,
8180 IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI);
8181 def_builtin ("__sync_bool_compare_and_swap_si", si_ftype_psi_si_si,
8182 IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI);
8183 def_builtin ("__sync_bool_compare_and_swap_di", si_ftype_pdi_di_di,
8184 IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI);
8186 def_builtin ("__sync_synchronize", void_ftype_void,
8187 IA64_BUILTIN_SYNCHRONIZE);
8189 def_builtin ("__sync_lock_test_and_set_si", si_ftype_psi_si,
8190 IA64_BUILTIN_LOCK_TEST_AND_SET_SI);
8191 def_builtin ("__sync_lock_test_and_set_di", di_ftype_pdi_di,
8192 IA64_BUILTIN_LOCK_TEST_AND_SET_DI);
8193 def_builtin ("__sync_lock_release_si", void_ftype_psi,
8194 IA64_BUILTIN_LOCK_RELEASE_SI);
8195 def_builtin ("__sync_lock_release_di", void_ftype_pdi,
8196 IA64_BUILTIN_LOCK_RELEASE_DI);
8198 def_builtin ("__builtin_ia64_bsp",
8199 build_function_type (ptr_type_node, void_list_node),
8200 IA64_BUILTIN_BSP);
8202 def_builtin ("__builtin_ia64_flushrs",
8203 build_function_type (void_type_node, void_list_node),
8204 IA64_BUILTIN_FLUSHRS);
8206 def_builtin ("__sync_fetch_and_add_si", si_ftype_psi_si,
8207 IA64_BUILTIN_FETCH_AND_ADD_SI);
8208 def_builtin ("__sync_fetch_and_sub_si", si_ftype_psi_si,
8209 IA64_BUILTIN_FETCH_AND_SUB_SI);
8210 def_builtin ("__sync_fetch_and_or_si", si_ftype_psi_si,
8211 IA64_BUILTIN_FETCH_AND_OR_SI);
8212 def_builtin ("__sync_fetch_and_and_si", si_ftype_psi_si,
8213 IA64_BUILTIN_FETCH_AND_AND_SI);
8214 def_builtin ("__sync_fetch_and_xor_si", si_ftype_psi_si,
8215 IA64_BUILTIN_FETCH_AND_XOR_SI);
8216 def_builtin ("__sync_fetch_and_nand_si", si_ftype_psi_si,
8217 IA64_BUILTIN_FETCH_AND_NAND_SI);
8219 def_builtin ("__sync_add_and_fetch_si", si_ftype_psi_si,
8220 IA64_BUILTIN_ADD_AND_FETCH_SI);
8221 def_builtin ("__sync_sub_and_fetch_si", si_ftype_psi_si,
8222 IA64_BUILTIN_SUB_AND_FETCH_SI);
8223 def_builtin ("__sync_or_and_fetch_si", si_ftype_psi_si,
8224 IA64_BUILTIN_OR_AND_FETCH_SI);
8225 def_builtin ("__sync_and_and_fetch_si", si_ftype_psi_si,
8226 IA64_BUILTIN_AND_AND_FETCH_SI);
8227 def_builtin ("__sync_xor_and_fetch_si", si_ftype_psi_si,
8228 IA64_BUILTIN_XOR_AND_FETCH_SI);
8229 def_builtin ("__sync_nand_and_fetch_si", si_ftype_psi_si,
8230 IA64_BUILTIN_NAND_AND_FETCH_SI);
8232 def_builtin ("__sync_fetch_and_add_di", di_ftype_pdi_di,
8233 IA64_BUILTIN_FETCH_AND_ADD_DI);
8234 def_builtin ("__sync_fetch_and_sub_di", di_ftype_pdi_di,
8235 IA64_BUILTIN_FETCH_AND_SUB_DI);
8236 def_builtin ("__sync_fetch_and_or_di", di_ftype_pdi_di,
8237 IA64_BUILTIN_FETCH_AND_OR_DI);
8238 def_builtin ("__sync_fetch_and_and_di", di_ftype_pdi_di,
8239 IA64_BUILTIN_FETCH_AND_AND_DI);
8240 def_builtin ("__sync_fetch_and_xor_di", di_ftype_pdi_di,
8241 IA64_BUILTIN_FETCH_AND_XOR_DI);
8242 def_builtin ("__sync_fetch_and_nand_di", di_ftype_pdi_di,
8243 IA64_BUILTIN_FETCH_AND_NAND_DI);
8245 def_builtin ("__sync_add_and_fetch_di", di_ftype_pdi_di,
8246 IA64_BUILTIN_ADD_AND_FETCH_DI);
8247 def_builtin ("__sync_sub_and_fetch_di", di_ftype_pdi_di,
8248 IA64_BUILTIN_SUB_AND_FETCH_DI);
8249 def_builtin ("__sync_or_and_fetch_di", di_ftype_pdi_di,
8250 IA64_BUILTIN_OR_AND_FETCH_DI);
8251 def_builtin ("__sync_and_and_fetch_di", di_ftype_pdi_di,
8252 IA64_BUILTIN_AND_AND_FETCH_DI);
8253 def_builtin ("__sync_xor_and_fetch_di", di_ftype_pdi_di,
8254 IA64_BUILTIN_XOR_AND_FETCH_DI);
8255 def_builtin ("__sync_nand_and_fetch_di", di_ftype_pdi_di,
8256 IA64_BUILTIN_NAND_AND_FETCH_DI);
8258 #undef def_builtin
8261 /* Expand fetch_and_op intrinsics. The basic code sequence is:
8264 tmp = [ptr];
8265 do {
8266 ret = tmp;
8267 ar.ccv = tmp;
8268 tmp <op>= value;
8269 cmpxchgsz.acq tmp = [ptr], tmp
8270 } while (tmp != ret)
8273 static rtx
8274 ia64_expand_fetch_and_op (optab binoptab, enum machine_mode mode,
8275 tree arglist, rtx target)
8277 rtx ret, label, tmp, ccv, insn, mem, value;
8278 tree arg0, arg1;
8280 arg0 = TREE_VALUE (arglist);
8281 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8282 mem = expand_expr (arg0, NULL_RTX, Pmode, 0);
8283 #ifdef POINTERS_EXTEND_UNSIGNED
8284 if (GET_MODE(mem) != Pmode)
8285 mem = convert_memory_address (Pmode, mem);
8286 #endif
8287 value = expand_expr (arg1, NULL_RTX, mode, 0);
8289 mem = gen_rtx_MEM (mode, force_reg (Pmode, mem));
8290 MEM_VOLATILE_P (mem) = 1;
8292 if (target && register_operand (target, mode))
8293 ret = target;
8294 else
8295 ret = gen_reg_rtx (mode);
8297 emit_insn (gen_mf ());
8299 /* Special case for fetchadd instructions. */
8300 if (binoptab == add_optab && fetchadd_operand (value, VOIDmode))
8302 if (mode == SImode)
8303 insn = gen_fetchadd_acq_si (ret, mem, value);
8304 else
8305 insn = gen_fetchadd_acq_di (ret, mem, value);
8306 emit_insn (insn);
8307 return ret;
8310 tmp = gen_reg_rtx (mode);
8311 /* ar.ccv must always be loaded with a zero-extended DImode value. */
8312 ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
8313 emit_move_insn (tmp, mem);
8315 label = gen_label_rtx ();
8316 emit_label (label);
8317 emit_move_insn (ret, tmp);
8318 convert_move (ccv, tmp, /*unsignedp=*/1);
8320 /* Perform the specific operation. Special case NAND by noticing
8321 one_cmpl_optab instead. */
8322 if (binoptab == one_cmpl_optab)
8324 tmp = expand_unop (mode, binoptab, tmp, NULL, OPTAB_WIDEN);
8325 binoptab = and_optab;
8327 tmp = expand_binop (mode, binoptab, tmp, value, tmp, 1, OPTAB_WIDEN);
8329 if (mode == SImode)
8330 insn = gen_cmpxchg_acq_si (tmp, mem, tmp, ccv);
8331 else
8332 insn = gen_cmpxchg_acq_di (tmp, mem, tmp, ccv);
8333 emit_insn (insn);
8335 emit_cmp_and_jump_insns (tmp, ret, NE, 0, mode, 1, label);
8337 return ret;
8340 /* Expand op_and_fetch intrinsics. The basic code sequence is:
8343 tmp = [ptr];
8344 do {
8345 old = tmp;
8346 ar.ccv = tmp;
8347 ret = tmp <op> value;
8348 cmpxchgsz.acq tmp = [ptr], ret
8349 } while (tmp != old)
8352 static rtx
8353 ia64_expand_op_and_fetch (optab binoptab, enum machine_mode mode,
8354 tree arglist, rtx target)
8356 rtx old, label, tmp, ret, ccv, insn, mem, value;
8357 tree arg0, arg1;
8359 arg0 = TREE_VALUE (arglist);
8360 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8361 mem = expand_expr (arg0, NULL_RTX, Pmode, 0);
8362 #ifdef POINTERS_EXTEND_UNSIGNED
8363 if (GET_MODE(mem) != Pmode)
8364 mem = convert_memory_address (Pmode, mem);
8365 #endif
8367 value = expand_expr (arg1, NULL_RTX, mode, 0);
8369 mem = gen_rtx_MEM (mode, force_reg (Pmode, mem));
8370 MEM_VOLATILE_P (mem) = 1;
8372 if (target && ! register_operand (target, mode))
8373 target = NULL_RTX;
8375 emit_insn (gen_mf ());
8376 tmp = gen_reg_rtx (mode);
8377 old = gen_reg_rtx (mode);
8378 /* ar.ccv must always be loaded with a zero-extended DImode value. */
8379 ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
8381 emit_move_insn (tmp, mem);
8383 label = gen_label_rtx ();
8384 emit_label (label);
8385 emit_move_insn (old, tmp);
8386 convert_move (ccv, tmp, /*unsignedp=*/1);
8388 /* Perform the specific operation. Special case NAND by noticing
8389 one_cmpl_optab instead. */
8390 if (binoptab == one_cmpl_optab)
8392 tmp = expand_unop (mode, binoptab, tmp, NULL, OPTAB_WIDEN);
8393 binoptab = and_optab;
8395 ret = expand_binop (mode, binoptab, tmp, value, target, 1, OPTAB_WIDEN);
8397 if (mode == SImode)
8398 insn = gen_cmpxchg_acq_si (tmp, mem, ret, ccv);
8399 else
8400 insn = gen_cmpxchg_acq_di (tmp, mem, ret, ccv);
8401 emit_insn (insn);
8403 emit_cmp_and_jump_insns (tmp, old, NE, 0, mode, 1, label);
8405 return ret;
8408 /* Expand val_ and bool_compare_and_swap. For val_ we want:
8410 ar.ccv = oldval
8412 cmpxchgsz.acq ret = [ptr], newval, ar.ccv
8413 return ret
8415 For bool_ it's the same except return ret == oldval.
8418 static rtx
8419 ia64_expand_compare_and_swap (enum machine_mode rmode, enum machine_mode mode,
8420 int boolp, tree arglist, rtx target)
8422 tree arg0, arg1, arg2;
8423 rtx mem, old, new, ccv, tmp, insn;
8425 arg0 = TREE_VALUE (arglist);
8426 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8427 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
8428 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
8429 old = expand_expr (arg1, NULL_RTX, mode, 0);
8430 new = expand_expr (arg2, NULL_RTX, mode, 0);
8432 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
8433 MEM_VOLATILE_P (mem) = 1;
8435 if (GET_MODE (old) != mode)
8436 old = convert_to_mode (mode, old, /*unsignedp=*/1);
8437 if (GET_MODE (new) != mode)
8438 new = convert_to_mode (mode, new, /*unsignedp=*/1);
8440 if (! register_operand (old, mode))
8441 old = copy_to_mode_reg (mode, old);
8442 if (! register_operand (new, mode))
8443 new = copy_to_mode_reg (mode, new);
8445 if (! boolp && target && register_operand (target, mode))
8446 tmp = target;
8447 else
8448 tmp = gen_reg_rtx (mode);
8450 ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
8451 convert_move (ccv, old, /*unsignedp=*/1);
8452 emit_insn (gen_mf ());
8453 if (mode == SImode)
8454 insn = gen_cmpxchg_acq_si (tmp, mem, new, ccv);
8455 else
8456 insn = gen_cmpxchg_acq_di (tmp, mem, new, ccv);
8457 emit_insn (insn);
8459 if (boolp)
8461 if (! target)
8462 target = gen_reg_rtx (rmode);
8463 return emit_store_flag_force (target, EQ, tmp, old, mode, 1, 1);
8465 else
8466 return tmp;
8469 /* Expand lock_test_and_set. I.e. `xchgsz ret = [ptr], new'. */
8471 static rtx
8472 ia64_expand_lock_test_and_set (enum machine_mode mode, tree arglist,
8473 rtx target)
8475 tree arg0, arg1;
8476 rtx mem, new, ret, insn;
8478 arg0 = TREE_VALUE (arglist);
8479 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8480 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
8481 new = expand_expr (arg1, NULL_RTX, mode, 0);
8483 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
8484 MEM_VOLATILE_P (mem) = 1;
8485 if (! register_operand (new, mode))
8486 new = copy_to_mode_reg (mode, new);
8488 if (target && register_operand (target, mode))
8489 ret = target;
8490 else
8491 ret = gen_reg_rtx (mode);
8493 if (mode == SImode)
8494 insn = gen_xchgsi (ret, mem, new);
8495 else
8496 insn = gen_xchgdi (ret, mem, new);
8497 emit_insn (insn);
8499 return ret;
8502 /* Expand lock_release. I.e. `stsz.rel [ptr] = r0'. */
8504 static rtx
8505 ia64_expand_lock_release (enum machine_mode mode, tree arglist,
8506 rtx target ATTRIBUTE_UNUSED)
8508 tree arg0;
8509 rtx mem;
8511 arg0 = TREE_VALUE (arglist);
8512 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
8514 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
8515 MEM_VOLATILE_P (mem) = 1;
8517 emit_move_insn (mem, const0_rtx);
8519 return const0_rtx;
8523 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
8524 enum machine_mode mode ATTRIBUTE_UNUSED,
8525 int ignore ATTRIBUTE_UNUSED)
8527 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
8528 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
8529 tree arglist = TREE_OPERAND (exp, 1);
8530 enum machine_mode rmode = VOIDmode;
8532 switch (fcode)
8534 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI:
8535 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI:
8536 mode = SImode;
8537 rmode = SImode;
8538 break;
8540 case IA64_BUILTIN_LOCK_TEST_AND_SET_SI:
8541 case IA64_BUILTIN_LOCK_RELEASE_SI:
8542 case IA64_BUILTIN_FETCH_AND_ADD_SI:
8543 case IA64_BUILTIN_FETCH_AND_SUB_SI:
8544 case IA64_BUILTIN_FETCH_AND_OR_SI:
8545 case IA64_BUILTIN_FETCH_AND_AND_SI:
8546 case IA64_BUILTIN_FETCH_AND_XOR_SI:
8547 case IA64_BUILTIN_FETCH_AND_NAND_SI:
8548 case IA64_BUILTIN_ADD_AND_FETCH_SI:
8549 case IA64_BUILTIN_SUB_AND_FETCH_SI:
8550 case IA64_BUILTIN_OR_AND_FETCH_SI:
8551 case IA64_BUILTIN_AND_AND_FETCH_SI:
8552 case IA64_BUILTIN_XOR_AND_FETCH_SI:
8553 case IA64_BUILTIN_NAND_AND_FETCH_SI:
8554 mode = SImode;
8555 break;
8557 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI:
8558 mode = DImode;
8559 rmode = SImode;
8560 break;
8562 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI:
8563 mode = DImode;
8564 rmode = DImode;
8565 break;
8567 case IA64_BUILTIN_LOCK_TEST_AND_SET_DI:
8568 case IA64_BUILTIN_LOCK_RELEASE_DI:
8569 case IA64_BUILTIN_FETCH_AND_ADD_DI:
8570 case IA64_BUILTIN_FETCH_AND_SUB_DI:
8571 case IA64_BUILTIN_FETCH_AND_OR_DI:
8572 case IA64_BUILTIN_FETCH_AND_AND_DI:
8573 case IA64_BUILTIN_FETCH_AND_XOR_DI:
8574 case IA64_BUILTIN_FETCH_AND_NAND_DI:
8575 case IA64_BUILTIN_ADD_AND_FETCH_DI:
8576 case IA64_BUILTIN_SUB_AND_FETCH_DI:
8577 case IA64_BUILTIN_OR_AND_FETCH_DI:
8578 case IA64_BUILTIN_AND_AND_FETCH_DI:
8579 case IA64_BUILTIN_XOR_AND_FETCH_DI:
8580 case IA64_BUILTIN_NAND_AND_FETCH_DI:
8581 mode = DImode;
8582 break;
8584 default:
8585 break;
8588 switch (fcode)
8590 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI:
8591 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI:
8592 return ia64_expand_compare_and_swap (rmode, mode, 1, arglist,
8593 target);
8595 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI:
8596 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI:
8597 return ia64_expand_compare_and_swap (rmode, mode, 0, arglist,
8598 target);
8600 case IA64_BUILTIN_SYNCHRONIZE:
8601 emit_insn (gen_mf ());
8602 return const0_rtx;
8604 case IA64_BUILTIN_LOCK_TEST_AND_SET_SI:
8605 case IA64_BUILTIN_LOCK_TEST_AND_SET_DI:
8606 return ia64_expand_lock_test_and_set (mode, arglist, target);
8608 case IA64_BUILTIN_LOCK_RELEASE_SI:
8609 case IA64_BUILTIN_LOCK_RELEASE_DI:
8610 return ia64_expand_lock_release (mode, arglist, target);
8612 case IA64_BUILTIN_BSP:
8613 if (! target || ! register_operand (target, DImode))
8614 target = gen_reg_rtx (DImode);
8615 emit_insn (gen_bsp_value (target));
8616 #ifdef POINTERS_EXTEND_UNSIGNED
8617 target = convert_memory_address (ptr_mode, target);
8618 #endif
8619 return target;
8621 case IA64_BUILTIN_FLUSHRS:
8622 emit_insn (gen_flushrs ());
8623 return const0_rtx;
8625 case IA64_BUILTIN_FETCH_AND_ADD_SI:
8626 case IA64_BUILTIN_FETCH_AND_ADD_DI:
8627 return ia64_expand_fetch_and_op (add_optab, mode, arglist, target);
8629 case IA64_BUILTIN_FETCH_AND_SUB_SI:
8630 case IA64_BUILTIN_FETCH_AND_SUB_DI:
8631 return ia64_expand_fetch_and_op (sub_optab, mode, arglist, target);
8633 case IA64_BUILTIN_FETCH_AND_OR_SI:
8634 case IA64_BUILTIN_FETCH_AND_OR_DI:
8635 return ia64_expand_fetch_and_op (ior_optab, mode, arglist, target);
8637 case IA64_BUILTIN_FETCH_AND_AND_SI:
8638 case IA64_BUILTIN_FETCH_AND_AND_DI:
8639 return ia64_expand_fetch_and_op (and_optab, mode, arglist, target);
8641 case IA64_BUILTIN_FETCH_AND_XOR_SI:
8642 case IA64_BUILTIN_FETCH_AND_XOR_DI:
8643 return ia64_expand_fetch_and_op (xor_optab, mode, arglist, target);
8645 case IA64_BUILTIN_FETCH_AND_NAND_SI:
8646 case IA64_BUILTIN_FETCH_AND_NAND_DI:
8647 return ia64_expand_fetch_and_op (one_cmpl_optab, mode, arglist, target);
8649 case IA64_BUILTIN_ADD_AND_FETCH_SI:
8650 case IA64_BUILTIN_ADD_AND_FETCH_DI:
8651 return ia64_expand_op_and_fetch (add_optab, mode, arglist, target);
8653 case IA64_BUILTIN_SUB_AND_FETCH_SI:
8654 case IA64_BUILTIN_SUB_AND_FETCH_DI:
8655 return ia64_expand_op_and_fetch (sub_optab, mode, arglist, target);
8657 case IA64_BUILTIN_OR_AND_FETCH_SI:
8658 case IA64_BUILTIN_OR_AND_FETCH_DI:
8659 return ia64_expand_op_and_fetch (ior_optab, mode, arglist, target);
8661 case IA64_BUILTIN_AND_AND_FETCH_SI:
8662 case IA64_BUILTIN_AND_AND_FETCH_DI:
8663 return ia64_expand_op_and_fetch (and_optab, mode, arglist, target);
8665 case IA64_BUILTIN_XOR_AND_FETCH_SI:
8666 case IA64_BUILTIN_XOR_AND_FETCH_DI:
8667 return ia64_expand_op_and_fetch (xor_optab, mode, arglist, target);
8669 case IA64_BUILTIN_NAND_AND_FETCH_SI:
8670 case IA64_BUILTIN_NAND_AND_FETCH_DI:
8671 return ia64_expand_op_and_fetch (one_cmpl_optab, mode, arglist, target);
8673 default:
8674 break;
8677 return NULL_RTX;
8680 /* For the HP-UX IA64 aggregate parameters are passed stored in the
8681 most significant bits of the stack slot. */
8683 enum direction
8684 ia64_hpux_function_arg_padding (enum machine_mode mode, tree type)
8686 /* Exception to normal case for structures/unions/etc. */
8688 if (type && AGGREGATE_TYPE_P (type)
8689 && int_size_in_bytes (type) < UNITS_PER_WORD)
8690 return upward;
8692 /* Fall back to the default. */
8693 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
8696 /* Linked list of all external functions that are to be emitted by GCC.
8697 We output the name if and only if TREE_SYMBOL_REFERENCED is set in
8698 order to avoid putting out names that are never really used. */
8700 struct extern_func_list GTY(())
8702 struct extern_func_list *next;
8703 tree decl;
8706 static GTY(()) struct extern_func_list *extern_func_head;
8708 static void
8709 ia64_hpux_add_extern_decl (tree decl)
8711 struct extern_func_list *p = ggc_alloc (sizeof (struct extern_func_list));
8713 p->decl = decl;
8714 p->next = extern_func_head;
8715 extern_func_head = p;
8718 /* Print out the list of used global functions. */
8720 static void
8721 ia64_hpux_file_end (void)
8723 struct extern_func_list *p;
8725 for (p = extern_func_head; p; p = p->next)
8727 tree decl = p->decl;
8728 tree id = DECL_ASSEMBLER_NAME (decl);
8730 if (!id)
8731 abort ();
8733 if (!TREE_ASM_WRITTEN (decl) && TREE_SYMBOL_REFERENCED (id))
8735 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
8737 TREE_ASM_WRITTEN (decl) = 1;
8738 (*targetm.asm_out.globalize_label) (asm_out_file, name);
8739 fputs (TYPE_ASM_OP, asm_out_file);
8740 assemble_name (asm_out_file, name);
8741 fprintf (asm_out_file, "," TYPE_OPERAND_FMT "\n", "function");
8745 extern_func_head = 0;
8748 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
8749 modes of word_mode and larger. Rename the TFmode libfuncs using the
8750 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
8751 backward compatibility. */
8753 static void
8754 ia64_init_libfuncs (void)
8756 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
8757 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
8758 set_optab_libfunc (smod_optab, SImode, "__modsi3");
8759 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
8761 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
8762 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
8763 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
8764 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
8765 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
8767 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
8768 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
8769 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
8770 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
8771 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
8772 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
8774 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
8775 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
8776 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
8777 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
8779 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
8780 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
8783 /* Rename all the TFmode libfuncs using the HPUX conventions. */
8785 static void
8786 ia64_hpux_init_libfuncs (void)
8788 ia64_init_libfuncs ();
8790 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
8791 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
8792 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
8794 /* ia64_expand_compare uses this. */
8795 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
8797 /* These should never be used. */
8798 set_optab_libfunc (eq_optab, TFmode, 0);
8799 set_optab_libfunc (ne_optab, TFmode, 0);
8800 set_optab_libfunc (gt_optab, TFmode, 0);
8801 set_optab_libfunc (ge_optab, TFmode, 0);
8802 set_optab_libfunc (lt_optab, TFmode, 0);
8803 set_optab_libfunc (le_optab, TFmode, 0);
8806 /* Rename the division and modulus functions in VMS. */
8808 static void
8809 ia64_vms_init_libfuncs (void)
8811 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
8812 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
8813 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
8814 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
8815 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
8816 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
8817 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
8818 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
8821 /* Rename the TFmode libfuncs available from soft-fp in glibc using
8822 the HPUX conventions. */
8824 static void
8825 ia64_sysv4_init_libfuncs (void)
8827 ia64_init_libfuncs ();
8829 /* These functions are not part of the HPUX TFmode interface. We
8830 use them instead of _U_Qfcmp, which doesn't work the way we
8831 expect. */
8832 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
8833 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
8834 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
8835 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
8836 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
8837 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
8839 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
8840 glibc doesn't have them. */
8843 /* Switch to the section to which we should output X. The only thing
8844 special we do here is to honor small data. */
8846 static void
8847 ia64_select_rtx_section (enum machine_mode mode, rtx x,
8848 unsigned HOST_WIDE_INT align)
8850 if (GET_MODE_SIZE (mode) > 0
8851 && GET_MODE_SIZE (mode) <= ia64_section_threshold)
8852 sdata_section ();
8853 else
8854 default_elf_select_rtx_section (mode, x, align);
8857 /* It is illegal to have relocations in shared segments on AIX and HPUX.
8858 Pretend flag_pic is always set. */
8860 static void
8861 ia64_rwreloc_select_section (tree exp, int reloc, unsigned HOST_WIDE_INT align)
8863 default_elf_select_section_1 (exp, reloc, align, true);
8866 static void
8867 ia64_rwreloc_unique_section (tree decl, int reloc)
8869 default_unique_section_1 (decl, reloc, true);
8872 static void
8873 ia64_rwreloc_select_rtx_section (enum machine_mode mode, rtx x,
8874 unsigned HOST_WIDE_INT align)
8876 int save_pic = flag_pic;
8877 flag_pic = 1;
8878 ia64_select_rtx_section (mode, x, align);
8879 flag_pic = save_pic;
8882 static unsigned int
8883 ia64_rwreloc_section_type_flags (tree decl, const char *name, int reloc)
8885 return default_section_type_flags_1 (decl, name, reloc, true);
8888 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
8889 structure type and that the address of that type should be passed
8890 in out0, rather than in r8. */
8892 static bool
8893 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
8895 tree ret_type = TREE_TYPE (fntype);
8897 /* The Itanium C++ ABI requires that out0, rather than r8, be used
8898 as the structure return address parameter, if the return value
8899 type has a non-trivial copy constructor or destructor. It is not
8900 clear if this same convention should be used for other
8901 programming languages. Until G++ 3.4, we incorrectly used r8 for
8902 these return values. */
8903 return (abi_version_at_least (2)
8904 && ret_type
8905 && TYPE_MODE (ret_type) == BLKmode
8906 && TREE_ADDRESSABLE (ret_type)
8907 && strcmp (lang_hooks.name, "GNU C++") == 0);
8910 /* Output the assembler code for a thunk function. THUNK_DECL is the
8911 declaration for the thunk function itself, FUNCTION is the decl for
8912 the target function. DELTA is an immediate constant offset to be
8913 added to THIS. If VCALL_OFFSET is nonzero, the word at
8914 *(*this + vcall_offset) should be added to THIS. */
8916 static void
8917 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
8918 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8919 tree function)
8921 rtx this, insn, funexp;
8922 unsigned int this_parmno;
8923 unsigned int this_regno;
8925 reload_completed = 1;
8926 epilogue_completed = 1;
8927 no_new_pseudos = 1;
8929 /* Set things up as ia64_expand_prologue might. */
8930 last_scratch_gr_reg = 15;
8932 memset (&current_frame_info, 0, sizeof (current_frame_info));
8933 current_frame_info.spill_cfa_off = -16;
8934 current_frame_info.n_input_regs = 1;
8935 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
8937 /* Mark the end of the (empty) prologue. */
8938 emit_note (NOTE_INSN_PROLOGUE_END);
8940 /* Figure out whether "this" will be the first parameter (the
8941 typical case) or the second parameter (as happens when the
8942 virtual function returns certain class objects). */
8943 this_parmno
8944 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
8945 ? 1 : 0);
8946 this_regno = IN_REG (this_parmno);
8947 if (!TARGET_REG_NAMES)
8948 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
8950 this = gen_rtx_REG (Pmode, this_regno);
8951 if (TARGET_ILP32)
8953 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
8954 REG_POINTER (tmp) = 1;
8955 if (delta && CONST_OK_FOR_I (delta))
8957 emit_insn (gen_ptr_extend_plus_imm (this, tmp, GEN_INT (delta)));
8958 delta = 0;
8960 else
8961 emit_insn (gen_ptr_extend (this, tmp));
8964 /* Apply the constant offset, if required. */
8965 if (delta)
8967 rtx delta_rtx = GEN_INT (delta);
8969 if (!CONST_OK_FOR_I (delta))
8971 rtx tmp = gen_rtx_REG (Pmode, 2);
8972 emit_move_insn (tmp, delta_rtx);
8973 delta_rtx = tmp;
8975 emit_insn (gen_adddi3 (this, this, delta_rtx));
8978 /* Apply the offset from the vtable, if required. */
8979 if (vcall_offset)
8981 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8982 rtx tmp = gen_rtx_REG (Pmode, 2);
8984 if (TARGET_ILP32)
8986 rtx t = gen_rtx_REG (ptr_mode, 2);
8987 REG_POINTER (t) = 1;
8988 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this));
8989 if (CONST_OK_FOR_I (vcall_offset))
8991 emit_insn (gen_ptr_extend_plus_imm (tmp, t,
8992 vcall_offset_rtx));
8993 vcall_offset = 0;
8995 else
8996 emit_insn (gen_ptr_extend (tmp, t));
8998 else
8999 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
9001 if (vcall_offset)
9003 if (!CONST_OK_FOR_J (vcall_offset))
9005 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
9006 emit_move_insn (tmp2, vcall_offset_rtx);
9007 vcall_offset_rtx = tmp2;
9009 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
9012 if (TARGET_ILP32)
9013 emit_move_insn (gen_rtx_REG (ptr_mode, 2),
9014 gen_rtx_MEM (ptr_mode, tmp));
9015 else
9016 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
9018 emit_insn (gen_adddi3 (this, this, tmp));
9021 /* Generate a tail call to the target function. */
9022 if (! TREE_USED (function))
9024 assemble_external (function);
9025 TREE_USED (function) = 1;
9027 funexp = XEXP (DECL_RTL (function), 0);
9028 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
9029 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
9030 insn = get_last_insn ();
9031 SIBLING_CALL_P (insn) = 1;
9033 /* Code generation for calls relies on splitting. */
9034 reload_completed = 1;
9035 epilogue_completed = 1;
9036 try_split (PATTERN (insn), insn, 0);
9038 emit_barrier ();
9040 /* Run just enough of rest_of_compilation to get the insns emitted.
9041 There's not really enough bulk here to make other passes such as
9042 instruction scheduling worth while. Note that use_thunk calls
9043 assemble_start_function and assemble_end_function. */
9045 insn_locators_initialize ();
9046 emit_all_insn_group_barriers (NULL);
9047 insn = get_insns ();
9048 shorten_branches (insn);
9049 final_start_function (insn, file, 1);
9050 final (insn, file, 1, 0);
9051 final_end_function ();
9053 reload_completed = 0;
9054 epilogue_completed = 0;
9055 no_new_pseudos = 0;
9058 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9060 static rtx
9061 ia64_struct_value_rtx (tree fntype,
9062 int incoming ATTRIBUTE_UNUSED)
9064 if (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype))
9065 return NULL_RTX;
9066 return gen_rtx_REG (Pmode, GR_REG (8));
9069 #include "gt-ia64.h"