2003-12-26 Guilhem Lavaux <guilhem@kaffe.org>
[official-gcc.git] / gcc / config / ia64 / ia64.c
blob67df9cb2b90a9c40a984dcaae3e592d98e171046
1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
3 Contributed by James E. Wilson <wilson@cygnus.com> and
4 David Mosberger <davidm@hpl.hp.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "recog.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "except.h"
41 #include "function.h"
42 #include "ggc.h"
43 #include "basic-block.h"
44 #include "toplev.h"
45 #include "sched-int.h"
46 #include "timevar.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "tm_p.h"
50 #include "hashtab.h"
51 #include "langhooks.h"
52 #include "cfglayout.h"
54 /* This is used for communication between ASM_OUTPUT_LABEL and
55 ASM_OUTPUT_LABELREF. */
56 int ia64_asm_output_label = 0;
58 /* Define the information needed to generate branch and scc insns. This is
59 stored from the compare operation. */
60 struct rtx_def * ia64_compare_op0;
61 struct rtx_def * ia64_compare_op1;
63 /* Register names for ia64_expand_prologue. */
64 static const char * const ia64_reg_numbers[96] =
65 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
66 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
67 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
68 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
69 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
70 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
71 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
72 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
73 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
74 "r104","r105","r106","r107","r108","r109","r110","r111",
75 "r112","r113","r114","r115","r116","r117","r118","r119",
76 "r120","r121","r122","r123","r124","r125","r126","r127"};
78 /* ??? These strings could be shared with REGISTER_NAMES. */
79 static const char * const ia64_input_reg_names[8] =
80 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
82 /* ??? These strings could be shared with REGISTER_NAMES. */
83 static const char * const ia64_local_reg_names[80] =
84 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
85 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
86 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
87 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
88 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
89 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
90 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
91 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
92 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
93 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
95 /* ??? These strings could be shared with REGISTER_NAMES. */
96 static const char * const ia64_output_reg_names[8] =
97 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
99 /* String used with the -mfixed-range= option. */
100 const char *ia64_fixed_range_string;
102 /* Determines whether we use adds, addl, or movl to generate our
103 TLS immediate offsets. */
104 int ia64_tls_size = 22;
106 /* String used with the -mtls-size= option. */
107 const char *ia64_tls_size_string;
109 /* Which cpu are we scheduling for. */
110 enum processor_type ia64_tune;
112 /* String used with the -tune= option. */
113 const char *ia64_tune_string;
115 /* Determines whether we run our final scheduling pass or not. We always
116 avoid the normal second scheduling pass. */
117 static int ia64_flag_schedule_insns2;
119 /* Variables which are this size or smaller are put in the sdata/sbss
120 sections. */
122 unsigned int ia64_section_threshold;
124 /* The following variable is used by the DFA insn scheduler. The value is
125 TRUE if we do insn bundling instead of insn scheduling. */
126 int bundling_p = 0;
128 /* Structure to be filled in by ia64_compute_frame_size with register
129 save masks and offsets for the current function. */
131 struct ia64_frame_info
133 HOST_WIDE_INT total_size; /* size of the stack frame, not including
134 the caller's scratch area. */
135 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
136 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
137 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
138 HARD_REG_SET mask; /* mask of saved registers. */
139 unsigned int gr_used_mask; /* mask of registers in use as gr spill
140 registers or long-term scratches. */
141 int n_spilled; /* number of spilled registers. */
142 int reg_fp; /* register for fp. */
143 int reg_save_b0; /* save register for b0. */
144 int reg_save_pr; /* save register for prs. */
145 int reg_save_ar_pfs; /* save register for ar.pfs. */
146 int reg_save_ar_unat; /* save register for ar.unat. */
147 int reg_save_ar_lc; /* save register for ar.lc. */
148 int reg_save_gp; /* save register for gp. */
149 int n_input_regs; /* number of input registers used. */
150 int n_local_regs; /* number of local registers used. */
151 int n_output_regs; /* number of output registers used. */
152 int n_rotate_regs; /* number of rotating registers used. */
154 char need_regstk; /* true if a .regstk directive needed. */
155 char initialized; /* true if the data is finalized. */
158 /* Current frame information calculated by ia64_compute_frame_size. */
159 static struct ia64_frame_info current_frame_info;
161 static int ia64_use_dfa_pipeline_interface (void);
162 static int ia64_first_cycle_multipass_dfa_lookahead (void);
163 static void ia64_dependencies_evaluation_hook (rtx, rtx);
164 static void ia64_init_dfa_pre_cycle_insn (void);
165 static rtx ia64_dfa_pre_cycle_insn (void);
166 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
167 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
168 static rtx gen_tls_get_addr (void);
169 static rtx gen_thread_pointer (void);
170 static rtx ia64_expand_tls_address (enum tls_model, rtx, rtx);
171 static int find_gr_spill (int);
172 static int next_scratch_gr_reg (void);
173 static void mark_reg_gr_used_mask (rtx, void *);
174 static void ia64_compute_frame_size (HOST_WIDE_INT);
175 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
176 static void finish_spill_pointers (void);
177 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
178 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
179 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
180 static rtx gen_movdi_x (rtx, rtx, rtx);
181 static rtx gen_fr_spill_x (rtx, rtx, rtx);
182 static rtx gen_fr_restore_x (rtx, rtx, rtx);
184 static enum machine_mode hfa_element_mode (tree, int);
185 static bool ia64_function_ok_for_sibcall (tree, tree);
186 static bool ia64_rtx_costs (rtx, int, int, int *);
187 static void fix_range (const char *);
188 static struct machine_function * ia64_init_machine_status (void);
189 static void emit_insn_group_barriers (FILE *);
190 static void emit_all_insn_group_barriers (FILE *);
191 static void final_emit_insn_group_barriers (FILE *);
192 static void emit_predicate_relation_info (void);
193 static void ia64_reorg (void);
194 static bool ia64_in_small_data_p (tree);
195 static void process_epilogue (void);
196 static int process_set (FILE *, rtx);
198 static rtx ia64_expand_fetch_and_op (optab, enum machine_mode, tree, rtx);
199 static rtx ia64_expand_op_and_fetch (optab, enum machine_mode, tree, rtx);
200 static rtx ia64_expand_compare_and_swap (enum machine_mode, enum machine_mode,
201 int, tree, rtx);
202 static rtx ia64_expand_lock_test_and_set (enum machine_mode, tree, rtx);
203 static rtx ia64_expand_lock_release (enum machine_mode, tree, rtx);
204 static bool ia64_assemble_integer (rtx, unsigned int, int);
205 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
206 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
207 static void ia64_output_function_end_prologue (FILE *);
209 static int ia64_issue_rate (void);
210 static int ia64_adjust_cost (rtx, rtx, rtx, int);
211 static void ia64_sched_init (FILE *, int, int);
212 static void ia64_sched_finish (FILE *, int);
213 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
214 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
215 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
216 static int ia64_variable_issue (FILE *, int, rtx, int);
218 static struct bundle_state *get_free_bundle_state (void);
219 static void free_bundle_state (struct bundle_state *);
220 static void initiate_bundle_states (void);
221 static void finish_bundle_states (void);
222 static unsigned bundle_state_hash (const void *);
223 static int bundle_state_eq_p (const void *, const void *);
224 static int insert_bundle_state (struct bundle_state *);
225 static void initiate_bundle_state_table (void);
226 static void finish_bundle_state_table (void);
227 static int try_issue_nops (struct bundle_state *, int);
228 static int try_issue_insn (struct bundle_state *, rtx);
229 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
230 static int get_max_pos (state_t);
231 static int get_template (state_t, int);
233 static rtx get_next_important_insn (rtx, rtx);
234 static void bundling (FILE *, int, rtx, rtx);
236 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
237 HOST_WIDE_INT, tree);
238 static void ia64_file_start (void);
240 static void ia64_select_rtx_section (enum machine_mode, rtx,
241 unsigned HOST_WIDE_INT);
242 static void ia64_rwreloc_select_section (tree, int, unsigned HOST_WIDE_INT)
243 ATTRIBUTE_UNUSED;
244 static void ia64_rwreloc_unique_section (tree, int)
245 ATTRIBUTE_UNUSED;
246 static void ia64_rwreloc_select_rtx_section (enum machine_mode, rtx,
247 unsigned HOST_WIDE_INT)
248 ATTRIBUTE_UNUSED;
249 static unsigned int ia64_rwreloc_section_type_flags (tree, const char *, int)
250 ATTRIBUTE_UNUSED;
252 static void ia64_hpux_add_extern_decl (const char *name)
253 ATTRIBUTE_UNUSED;
254 static void ia64_hpux_file_end (void)
255 ATTRIBUTE_UNUSED;
256 static void ia64_hpux_init_libfuncs (void)
257 ATTRIBUTE_UNUSED;
258 static void ia64_vms_init_libfuncs (void)
259 ATTRIBUTE_UNUSED;
261 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
262 static void ia64_encode_section_info (tree, rtx, int);
265 /* Table of valid machine attributes. */
266 static const struct attribute_spec ia64_attribute_table[] =
268 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
269 { "syscall_linkage", 0, 0, false, true, true, NULL },
270 { "model", 1, 1, true, false, false, ia64_handle_model_attribute },
271 { NULL, 0, 0, false, false, false, NULL }
274 /* Initialize the GCC target structure. */
275 #undef TARGET_ATTRIBUTE_TABLE
276 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
278 #undef TARGET_INIT_BUILTINS
279 #define TARGET_INIT_BUILTINS ia64_init_builtins
281 #undef TARGET_EXPAND_BUILTIN
282 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
284 #undef TARGET_ASM_BYTE_OP
285 #define TARGET_ASM_BYTE_OP "\tdata1\t"
286 #undef TARGET_ASM_ALIGNED_HI_OP
287 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
288 #undef TARGET_ASM_ALIGNED_SI_OP
289 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
290 #undef TARGET_ASM_ALIGNED_DI_OP
291 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
292 #undef TARGET_ASM_UNALIGNED_HI_OP
293 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
294 #undef TARGET_ASM_UNALIGNED_SI_OP
295 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
296 #undef TARGET_ASM_UNALIGNED_DI_OP
297 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
298 #undef TARGET_ASM_INTEGER
299 #define TARGET_ASM_INTEGER ia64_assemble_integer
301 #undef TARGET_ASM_FUNCTION_PROLOGUE
302 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
303 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
304 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
305 #undef TARGET_ASM_FUNCTION_EPILOGUE
306 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
308 #undef TARGET_IN_SMALL_DATA_P
309 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
311 #undef TARGET_SCHED_ADJUST_COST
312 #define TARGET_SCHED_ADJUST_COST ia64_adjust_cost
313 #undef TARGET_SCHED_ISSUE_RATE
314 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
315 #undef TARGET_SCHED_VARIABLE_ISSUE
316 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
317 #undef TARGET_SCHED_INIT
318 #define TARGET_SCHED_INIT ia64_sched_init
319 #undef TARGET_SCHED_FINISH
320 #define TARGET_SCHED_FINISH ia64_sched_finish
321 #undef TARGET_SCHED_REORDER
322 #define TARGET_SCHED_REORDER ia64_sched_reorder
323 #undef TARGET_SCHED_REORDER2
324 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
326 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
327 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
329 #undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE
330 #define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE ia64_use_dfa_pipeline_interface
332 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
333 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
335 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
336 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
337 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
338 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
340 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
341 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
342 ia64_first_cycle_multipass_dfa_lookahead_guard
344 #undef TARGET_SCHED_DFA_NEW_CYCLE
345 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
347 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
348 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
350 #undef TARGET_ASM_OUTPUT_MI_THUNK
351 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
352 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
353 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
355 #undef TARGET_ASM_FILE_START
356 #define TARGET_ASM_FILE_START ia64_file_start
358 #undef TARGET_RTX_COSTS
359 #define TARGET_RTX_COSTS ia64_rtx_costs
360 #undef TARGET_ADDRESS_COST
361 #define TARGET_ADDRESS_COST hook_int_rtx_0
363 #undef TARGET_MACHINE_DEPENDENT_REORG
364 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
366 #undef TARGET_ENCODE_SECTION_INFO
367 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
369 struct gcc_target targetm = TARGET_INITIALIZER;
371 /* Return 1 if OP is a valid operand for the MEM of a CALL insn. */
374 call_operand (rtx op, enum machine_mode mode)
376 if (mode != GET_MODE (op) && mode != VOIDmode)
377 return 0;
379 return (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == REG
380 || (GET_CODE (op) == SUBREG && GET_CODE (XEXP (op, 0)) == REG));
383 /* Return 1 if OP refers to a symbol in the sdata section. */
386 sdata_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
388 switch (GET_CODE (op))
390 case CONST:
391 if (GET_CODE (XEXP (op, 0)) != PLUS
392 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF)
393 break;
394 op = XEXP (XEXP (op, 0), 0);
395 /* FALLTHRU */
397 case SYMBOL_REF:
398 if (CONSTANT_POOL_ADDRESS_P (op))
399 return GET_MODE_SIZE (get_pool_mode (op)) <= ia64_section_threshold;
400 else
401 return SYMBOL_REF_LOCAL_P (op) && SYMBOL_REF_SMALL_P (op);
403 default:
404 break;
407 return 0;
411 small_addr_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
413 return SYMBOL_REF_SMALL_ADDR_P (op);
416 /* Return 1 if OP refers to a symbol, and is appropriate for a GOT load. */
419 got_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
421 switch (GET_CODE (op))
423 case CONST:
424 op = XEXP (op, 0);
425 if (GET_CODE (op) != PLUS)
426 return 0;
427 if (GET_CODE (XEXP (op, 0)) != SYMBOL_REF)
428 return 0;
429 op = XEXP (op, 1);
430 if (GET_CODE (op) != CONST_INT)
431 return 0;
433 return 1;
435 /* Ok if we're not using GOT entries at all. */
436 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
437 return 1;
439 /* "Ok" while emitting rtl, since otherwise we won't be provided
440 with the entire offset during emission, which makes it very
441 hard to split the offset into high and low parts. */
442 if (rtx_equal_function_value_matters)
443 return 1;
445 /* Force the low 14 bits of the constant to zero so that we do not
446 use up so many GOT entries. */
447 return (INTVAL (op) & 0x3fff) == 0;
449 case SYMBOL_REF:
450 if (SYMBOL_REF_SMALL_ADDR_P (op))
451 return 0;
452 case LABEL_REF:
453 return 1;
455 default:
456 break;
458 return 0;
461 /* Return 1 if OP refers to a symbol. */
464 symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
466 switch (GET_CODE (op))
468 case CONST:
469 case SYMBOL_REF:
470 case LABEL_REF:
471 return 1;
473 default:
474 break;
476 return 0;
479 /* Return tls_model if OP refers to a TLS symbol. */
482 tls_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
484 if (GET_CODE (op) != SYMBOL_REF)
485 return 0;
486 return SYMBOL_REF_TLS_MODEL (op);
490 /* Return 1 if OP refers to a function. */
493 function_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
495 if (GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (op))
496 return 1;
497 else
498 return 0;
501 /* Return 1 if OP is setjmp or a similar function. */
503 /* ??? This is an unsatisfying solution. Should rethink. */
506 setjmp_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
508 const char *name;
509 int retval = 0;
511 if (GET_CODE (op) != SYMBOL_REF)
512 return 0;
514 name = XSTR (op, 0);
516 /* The following code is borrowed from special_function_p in calls.c. */
518 /* Disregard prefix _, __ or __x. */
519 if (name[0] == '_')
521 if (name[1] == '_' && name[2] == 'x')
522 name += 3;
523 else if (name[1] == '_')
524 name += 2;
525 else
526 name += 1;
529 if (name[0] == 's')
531 retval
532 = ((name[1] == 'e'
533 && (! strcmp (name, "setjmp")
534 || ! strcmp (name, "setjmp_syscall")))
535 || (name[1] == 'i'
536 && ! strcmp (name, "sigsetjmp"))
537 || (name[1] == 'a'
538 && ! strcmp (name, "savectx")));
540 else if ((name[0] == 'q' && name[1] == 's'
541 && ! strcmp (name, "qsetjmp"))
542 || (name[0] == 'v' && name[1] == 'f'
543 && ! strcmp (name, "vfork")))
544 retval = 1;
546 return retval;
549 /* Return 1 if OP is a general operand, excluding tls symbolic operands. */
552 move_operand (rtx op, enum machine_mode mode)
554 return general_operand (op, mode) && !tls_symbolic_operand (op, mode);
557 /* Return 1 if OP is a register operand that is (or could be) a GR reg. */
560 gr_register_operand (rtx op, enum machine_mode mode)
562 if (! register_operand (op, mode))
563 return 0;
564 if (GET_CODE (op) == SUBREG)
565 op = SUBREG_REG (op);
566 if (GET_CODE (op) == REG)
568 unsigned int regno = REGNO (op);
569 if (regno < FIRST_PSEUDO_REGISTER)
570 return GENERAL_REGNO_P (regno);
572 return 1;
575 /* Return 1 if OP is a register operand that is (or could be) an FR reg. */
578 fr_register_operand (rtx op, enum machine_mode mode)
580 if (! register_operand (op, mode))
581 return 0;
582 if (GET_CODE (op) == SUBREG)
583 op = SUBREG_REG (op);
584 if (GET_CODE (op) == REG)
586 unsigned int regno = REGNO (op);
587 if (regno < FIRST_PSEUDO_REGISTER)
588 return FR_REGNO_P (regno);
590 return 1;
593 /* Return 1 if OP is a register operand that is (or could be) a GR/FR reg. */
596 grfr_register_operand (rtx op, enum machine_mode mode)
598 if (! register_operand (op, mode))
599 return 0;
600 if (GET_CODE (op) == SUBREG)
601 op = SUBREG_REG (op);
602 if (GET_CODE (op) == REG)
604 unsigned int regno = REGNO (op);
605 if (regno < FIRST_PSEUDO_REGISTER)
606 return GENERAL_REGNO_P (regno) || FR_REGNO_P (regno);
608 return 1;
611 /* Return 1 if OP is a nonimmediate operand that is (or could be) a GR reg. */
614 gr_nonimmediate_operand (rtx op, enum machine_mode mode)
616 if (! nonimmediate_operand (op, mode))
617 return 0;
618 if (GET_CODE (op) == SUBREG)
619 op = SUBREG_REG (op);
620 if (GET_CODE (op) == REG)
622 unsigned int regno = REGNO (op);
623 if (regno < FIRST_PSEUDO_REGISTER)
624 return GENERAL_REGNO_P (regno);
626 return 1;
629 /* Return 1 if OP is a nonimmediate operand that is (or could be) a FR reg. */
632 fr_nonimmediate_operand (rtx op, enum machine_mode mode)
634 if (! nonimmediate_operand (op, mode))
635 return 0;
636 if (GET_CODE (op) == SUBREG)
637 op = SUBREG_REG (op);
638 if (GET_CODE (op) == REG)
640 unsigned int regno = REGNO (op);
641 if (regno < FIRST_PSEUDO_REGISTER)
642 return FR_REGNO_P (regno);
644 return 1;
647 /* Return 1 if OP is a nonimmediate operand that is a GR/FR reg. */
650 grfr_nonimmediate_operand (rtx op, enum machine_mode mode)
652 if (! nonimmediate_operand (op, mode))
653 return 0;
654 if (GET_CODE (op) == SUBREG)
655 op = SUBREG_REG (op);
656 if (GET_CODE (op) == REG)
658 unsigned int regno = REGNO (op);
659 if (regno < FIRST_PSEUDO_REGISTER)
660 return GENERAL_REGNO_P (regno) || FR_REGNO_P (regno);
662 return 1;
665 /* Return 1 if OP is a GR register operand, or zero. */
668 gr_reg_or_0_operand (rtx op, enum machine_mode mode)
670 return (op == const0_rtx || gr_register_operand (op, mode));
673 /* Return 1 if OP is a GR register operand, or a 5 bit immediate operand. */
676 gr_reg_or_5bit_operand (rtx op, enum machine_mode mode)
678 return ((GET_CODE (op) == CONST_INT && INTVAL (op) >= 0 && INTVAL (op) < 32)
679 || GET_CODE (op) == CONSTANT_P_RTX
680 || gr_register_operand (op, mode));
683 /* Return 1 if OP is a GR register operand, or a 6 bit immediate operand. */
686 gr_reg_or_6bit_operand (rtx op, enum machine_mode mode)
688 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (INTVAL (op)))
689 || GET_CODE (op) == CONSTANT_P_RTX
690 || gr_register_operand (op, mode));
693 /* Return 1 if OP is a GR register operand, or an 8 bit immediate operand. */
696 gr_reg_or_8bit_operand (rtx op, enum machine_mode mode)
698 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op)))
699 || GET_CODE (op) == CONSTANT_P_RTX
700 || gr_register_operand (op, mode));
703 /* Return 1 if OP is a GR/FR register operand, or an 8 bit immediate. */
706 grfr_reg_or_8bit_operand (rtx op, enum machine_mode mode)
708 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op)))
709 || GET_CODE (op) == CONSTANT_P_RTX
710 || grfr_register_operand (op, mode));
713 /* Return 1 if OP is a register operand, or an 8 bit adjusted immediate
714 operand. */
717 gr_reg_or_8bit_adjusted_operand (rtx op, enum machine_mode mode)
719 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_L (INTVAL (op)))
720 || GET_CODE (op) == CONSTANT_P_RTX
721 || gr_register_operand (op, mode));
724 /* Return 1 if OP is a register operand, or is valid for both an 8 bit
725 immediate and an 8 bit adjusted immediate operand. This is necessary
726 because when we emit a compare, we don't know what the condition will be,
727 so we need the union of the immediates accepted by GT and LT. */
730 gr_reg_or_8bit_and_adjusted_operand (rtx op, enum machine_mode mode)
732 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op))
733 && CONST_OK_FOR_L (INTVAL (op)))
734 || GET_CODE (op) == CONSTANT_P_RTX
735 || gr_register_operand (op, mode));
738 /* Return 1 if OP is a register operand, or a 14 bit immediate operand. */
741 gr_reg_or_14bit_operand (rtx op, enum machine_mode mode)
743 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_I (INTVAL (op)))
744 || GET_CODE (op) == CONSTANT_P_RTX
745 || gr_register_operand (op, mode));
748 /* Return 1 if OP is a register operand, or a 22 bit immediate operand. */
751 gr_reg_or_22bit_operand (rtx op, enum machine_mode mode)
753 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_J (INTVAL (op)))
754 || GET_CODE (op) == CONSTANT_P_RTX
755 || gr_register_operand (op, mode));
758 /* Return 1 if OP is a 6 bit immediate operand. */
761 shift_count_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
763 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (INTVAL (op)))
764 || GET_CODE (op) == CONSTANT_P_RTX);
767 /* Return 1 if OP is a 5 bit immediate operand. */
770 shift_32bit_count_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
772 return ((GET_CODE (op) == CONST_INT
773 && (INTVAL (op) >= 0 && INTVAL (op) < 32))
774 || GET_CODE (op) == CONSTANT_P_RTX);
777 /* Return 1 if OP is a 2, 4, 8, or 16 immediate operand. */
780 shladd_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
782 return (GET_CODE (op) == CONST_INT
783 && (INTVAL (op) == 2 || INTVAL (op) == 4
784 || INTVAL (op) == 8 || INTVAL (op) == 16));
787 /* Return 1 if OP is a -16, -8, -4, -1, 1, 4, 8, or 16 immediate operand. */
790 fetchadd_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
792 return (GET_CODE (op) == CONST_INT
793 && (INTVAL (op) == -16 || INTVAL (op) == -8 ||
794 INTVAL (op) == -4 || INTVAL (op) == -1 ||
795 INTVAL (op) == 1 || INTVAL (op) == 4 ||
796 INTVAL (op) == 8 || INTVAL (op) == 16));
799 /* Return 1 if OP is a floating-point constant zero, one, or a register. */
802 fr_reg_or_fp01_operand (rtx op, enum machine_mode mode)
804 return ((GET_CODE (op) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_G (op))
805 || fr_register_operand (op, mode));
808 /* Like nonimmediate_operand, but don't allow MEMs that try to use a
809 POST_MODIFY with a REG as displacement. */
812 destination_operand (rtx op, enum machine_mode mode)
814 if (! nonimmediate_operand (op, mode))
815 return 0;
816 if (GET_CODE (op) == MEM
817 && GET_CODE (XEXP (op, 0)) == POST_MODIFY
818 && GET_CODE (XEXP (XEXP (XEXP (op, 0), 1), 1)) == REG)
819 return 0;
820 return 1;
823 /* Like memory_operand, but don't allow post-increments. */
826 not_postinc_memory_operand (rtx op, enum machine_mode mode)
828 return (memory_operand (op, mode)
829 && GET_RTX_CLASS (GET_CODE (XEXP (op, 0))) != 'a');
832 /* Return 1 if this is a comparison operator, which accepts a normal 8-bit
833 signed immediate operand. */
836 normal_comparison_operator (register rtx op, enum machine_mode mode)
838 enum rtx_code code = GET_CODE (op);
839 return ((mode == VOIDmode || GET_MODE (op) == mode)
840 && (code == EQ || code == NE
841 || code == GT || code == LE || code == GTU || code == LEU));
844 /* Return 1 if this is a comparison operator, which accepts an adjusted 8-bit
845 signed immediate operand. */
848 adjusted_comparison_operator (register rtx op, enum machine_mode mode)
850 enum rtx_code code = GET_CODE (op);
851 return ((mode == VOIDmode || GET_MODE (op) == mode)
852 && (code == LT || code == GE || code == LTU || code == GEU));
855 /* Return 1 if this is a signed inequality operator. */
858 signed_inequality_operator (register rtx op, enum machine_mode mode)
860 enum rtx_code code = GET_CODE (op);
861 return ((mode == VOIDmode || GET_MODE (op) == mode)
862 && (code == GE || code == GT
863 || code == LE || code == LT));
866 /* Return 1 if this operator is valid for predication. */
869 predicate_operator (register rtx op, enum machine_mode mode)
871 enum rtx_code code = GET_CODE (op);
872 return ((GET_MODE (op) == mode || mode == VOIDmode)
873 && (code == EQ || code == NE));
876 /* Return 1 if this operator can be used in a conditional operation. */
879 condop_operator (register rtx op, enum machine_mode mode)
881 enum rtx_code code = GET_CODE (op);
882 return ((GET_MODE (op) == mode || mode == VOIDmode)
883 && (code == PLUS || code == MINUS || code == AND
884 || code == IOR || code == XOR));
887 /* Return 1 if this is the ar.lc register. */
890 ar_lc_reg_operand (register rtx op, enum machine_mode mode)
892 return (GET_MODE (op) == DImode
893 && (mode == DImode || mode == VOIDmode)
894 && GET_CODE (op) == REG
895 && REGNO (op) == AR_LC_REGNUM);
898 /* Return 1 if this is the ar.ccv register. */
901 ar_ccv_reg_operand (register rtx op, enum machine_mode mode)
903 return ((GET_MODE (op) == mode || mode == VOIDmode)
904 && GET_CODE (op) == REG
905 && REGNO (op) == AR_CCV_REGNUM);
908 /* Return 1 if this is the ar.pfs register. */
911 ar_pfs_reg_operand (register rtx op, enum machine_mode mode)
913 return ((GET_MODE (op) == mode || mode == VOIDmode)
914 && GET_CODE (op) == REG
915 && REGNO (op) == AR_PFS_REGNUM);
918 /* Like general_operand, but don't allow (mem (addressof)). */
921 general_xfmode_operand (rtx op, enum machine_mode mode)
923 if (! general_operand (op, mode))
924 return 0;
925 if (GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) == ADDRESSOF)
926 return 0;
927 return 1;
930 /* Similarly. */
933 destination_xfmode_operand (rtx op, enum machine_mode mode)
935 if (! destination_operand (op, mode))
936 return 0;
937 if (GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) == ADDRESSOF)
938 return 0;
939 return 1;
942 /* Similarly. */
945 xfreg_or_fp01_operand (rtx op, enum machine_mode mode)
947 if (GET_CODE (op) == SUBREG)
948 return 0;
949 return fr_reg_or_fp01_operand (op, mode);
952 /* Return 1 if OP is valid as a base register in a reg + offset address. */
955 basereg_operand (rtx op, enum machine_mode mode)
957 /* ??? Should I copy the flag_omit_frame_pointer and cse_not_expected
958 checks from pa.c basereg_operand as well? Seems to be OK without them
959 in test runs. */
961 return (register_operand (op, mode) &&
962 REG_POINTER ((GET_CODE (op) == SUBREG) ? SUBREG_REG (op) : op));
965 typedef enum
967 ADDR_AREA_NORMAL, /* normal address area */
968 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
970 ia64_addr_area;
972 static GTY(()) tree small_ident1;
973 static GTY(()) tree small_ident2;
975 static void
976 init_idents (void)
978 if (small_ident1 == 0)
980 small_ident1 = get_identifier ("small");
981 small_ident2 = get_identifier ("__small__");
985 /* Retrieve the address area that has been chosen for the given decl. */
987 static ia64_addr_area
988 ia64_get_addr_area (tree decl)
990 tree model_attr;
992 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
993 if (model_attr)
995 tree id;
997 init_idents ();
998 id = TREE_VALUE (TREE_VALUE (model_attr));
999 if (id == small_ident1 || id == small_ident2)
1000 return ADDR_AREA_SMALL;
1002 return ADDR_AREA_NORMAL;
1005 static tree
1006 ia64_handle_model_attribute (tree *node, tree name, tree args, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1008 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
1009 ia64_addr_area area;
1010 tree arg, decl = *node;
1012 init_idents ();
1013 arg = TREE_VALUE (args);
1014 if (arg == small_ident1 || arg == small_ident2)
1016 addr_area = ADDR_AREA_SMALL;
1018 else
1020 warning ("invalid argument of `%s' attribute",
1021 IDENTIFIER_POINTER (name));
1022 *no_add_attrs = true;
1025 switch (TREE_CODE (decl))
1027 case VAR_DECL:
1028 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
1029 == FUNCTION_DECL)
1030 && !TREE_STATIC (decl))
1032 error ("%Jan address area attribute cannot be specified for "
1033 "local variables", decl, decl);
1034 *no_add_attrs = true;
1036 area = ia64_get_addr_area (decl);
1037 if (area != ADDR_AREA_NORMAL && addr_area != area)
1039 error ("%Jaddress area of '%s' conflicts with previous "
1040 "declaration", decl, decl);
1041 *no_add_attrs = true;
1043 break;
1045 case FUNCTION_DECL:
1046 error ("%Jaddress area attribute cannot be specified for functions",
1047 decl, decl);
1048 *no_add_attrs = true;
1049 break;
1051 default:
1052 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
1053 *no_add_attrs = true;
1054 break;
1057 return NULL_TREE;
1060 static void
1061 ia64_encode_addr_area (tree decl, rtx symbol)
1063 int flags;
1065 flags = SYMBOL_REF_FLAGS (symbol);
1066 switch (ia64_get_addr_area (decl))
1068 case ADDR_AREA_NORMAL: break;
1069 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
1070 default: abort ();
1072 SYMBOL_REF_FLAGS (symbol) = flags;
1075 static void
1076 ia64_encode_section_info (tree decl, rtx rtl, int first)
1078 default_encode_section_info (decl, rtl, first);
1080 if (TREE_CODE (decl) == VAR_DECL
1081 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
1082 ia64_encode_addr_area (decl, XEXP (rtl, 0));
1085 /* Return 1 if the operands of a move are ok. */
1088 ia64_move_ok (rtx dst, rtx src)
1090 /* If we're under init_recog_no_volatile, we'll not be able to use
1091 memory_operand. So check the code directly and don't worry about
1092 the validity of the underlying address, which should have been
1093 checked elsewhere anyway. */
1094 if (GET_CODE (dst) != MEM)
1095 return 1;
1096 if (GET_CODE (src) == MEM)
1097 return 0;
1098 if (register_operand (src, VOIDmode))
1099 return 1;
1101 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
1102 if (INTEGRAL_MODE_P (GET_MODE (dst)))
1103 return src == const0_rtx;
1104 else
1105 return GET_CODE (src) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_G (src);
1109 addp4_optimize_ok (rtx op1, rtx op2)
1111 return (basereg_operand (op1, GET_MODE(op1)) !=
1112 basereg_operand (op2, GET_MODE(op2)));
1115 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
1116 Return the length of the field, or <= 0 on failure. */
1119 ia64_depz_field_mask (rtx rop, rtx rshift)
1121 unsigned HOST_WIDE_INT op = INTVAL (rop);
1122 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
1124 /* Get rid of the zero bits we're shifting in. */
1125 op >>= shift;
1127 /* We must now have a solid block of 1's at bit 0. */
1128 return exact_log2 (op + 1);
1131 /* Expand a symbolic constant load. */
1133 void
1134 ia64_expand_load_address (rtx dest, rtx src)
1136 if (tls_symbolic_operand (src, VOIDmode))
1137 abort ();
1138 if (GET_CODE (dest) != REG)
1139 abort ();
1141 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
1142 having to pointer-extend the value afterward. Other forms of address
1143 computation below are also more natural to compute as 64-bit quantities.
1144 If we've been given an SImode destination register, change it. */
1145 if (GET_MODE (dest) != Pmode)
1146 dest = gen_rtx_REG (Pmode, REGNO (dest));
1148 if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_SMALL_ADDR_P (src))
1150 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
1151 return;
1153 else if (TARGET_AUTO_PIC)
1155 emit_insn (gen_load_gprel64 (dest, src));
1156 return;
1158 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
1160 emit_insn (gen_load_fptr (dest, src));
1161 return;
1163 else if (sdata_symbolic_operand (src, VOIDmode))
1165 emit_insn (gen_load_gprel (dest, src));
1166 return;
1169 if (GET_CODE (src) == CONST
1170 && GET_CODE (XEXP (src, 0)) == PLUS
1171 && GET_CODE (XEXP (XEXP (src, 0), 1)) == CONST_INT
1172 && (INTVAL (XEXP (XEXP (src, 0), 1)) & 0x1fff) != 0)
1174 rtx sym = XEXP (XEXP (src, 0), 0);
1175 HOST_WIDE_INT ofs, hi, lo;
1177 /* Split the offset into a sign extended 14-bit low part
1178 and a complementary high part. */
1179 ofs = INTVAL (XEXP (XEXP (src, 0), 1));
1180 lo = ((ofs & 0x3fff) ^ 0x2000) - 0x2000;
1181 hi = ofs - lo;
1183 ia64_expand_load_address (dest, plus_constant (sym, hi));
1184 emit_insn (gen_adddi3 (dest, dest, GEN_INT (lo)));
1186 else
1188 rtx tmp;
1190 tmp = gen_rtx_HIGH (Pmode, src);
1191 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
1192 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1194 tmp = gen_rtx_LO_SUM (GET_MODE (dest), dest, src);
1195 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1199 static GTY(()) rtx gen_tls_tga;
1200 static rtx
1201 gen_tls_get_addr (void)
1203 if (!gen_tls_tga)
1204 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
1205 return gen_tls_tga;
1208 static GTY(()) rtx thread_pointer_rtx;
1209 static rtx
1210 gen_thread_pointer (void)
1212 if (!thread_pointer_rtx)
1214 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
1215 RTX_UNCHANGING_P (thread_pointer_rtx) = 1;
1217 return thread_pointer_rtx;
1220 static rtx
1221 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1)
1223 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
1224 rtx orig_op0 = op0;
1226 switch (tls_kind)
1228 case TLS_MODEL_GLOBAL_DYNAMIC:
1229 start_sequence ();
1231 tga_op1 = gen_reg_rtx (Pmode);
1232 emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1));
1233 tga_op1 = gen_rtx_MEM (Pmode, tga_op1);
1234 RTX_UNCHANGING_P (tga_op1) = 1;
1236 tga_op2 = gen_reg_rtx (Pmode);
1237 emit_insn (gen_load_ltoff_dtprel (tga_op2, op1));
1238 tga_op2 = gen_rtx_MEM (Pmode, tga_op2);
1239 RTX_UNCHANGING_P (tga_op2) = 1;
1241 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1242 LCT_CONST, Pmode, 2, tga_op1,
1243 Pmode, tga_op2, Pmode);
1245 insns = get_insns ();
1246 end_sequence ();
1248 if (GET_MODE (op0) != Pmode)
1249 op0 = tga_ret;
1250 emit_libcall_block (insns, op0, tga_ret, op1);
1251 break;
1253 case TLS_MODEL_LOCAL_DYNAMIC:
1254 /* ??? This isn't the completely proper way to do local-dynamic
1255 If the call to __tls_get_addr is used only by a single symbol,
1256 then we should (somehow) move the dtprel to the second arg
1257 to avoid the extra add. */
1258 start_sequence ();
1260 tga_op1 = gen_reg_rtx (Pmode);
1261 emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1));
1262 tga_op1 = gen_rtx_MEM (Pmode, tga_op1);
1263 RTX_UNCHANGING_P (tga_op1) = 1;
1265 tga_op2 = const0_rtx;
1267 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1268 LCT_CONST, Pmode, 2, tga_op1,
1269 Pmode, tga_op2, Pmode);
1271 insns = get_insns ();
1272 end_sequence ();
1274 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1275 UNSPEC_LD_BASE);
1276 tmp = gen_reg_rtx (Pmode);
1277 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
1279 if (!register_operand (op0, Pmode))
1280 op0 = gen_reg_rtx (Pmode);
1281 if (TARGET_TLS64)
1283 emit_insn (gen_load_dtprel (op0, op1));
1284 emit_insn (gen_adddi3 (op0, tmp, op0));
1286 else
1287 emit_insn (gen_add_dtprel (op0, tmp, op1));
1288 break;
1290 case TLS_MODEL_INITIAL_EXEC:
1291 tmp = gen_reg_rtx (Pmode);
1292 emit_insn (gen_load_ltoff_tprel (tmp, op1));
1293 tmp = gen_rtx_MEM (Pmode, tmp);
1294 RTX_UNCHANGING_P (tmp) = 1;
1295 tmp = force_reg (Pmode, tmp);
1297 if (!register_operand (op0, Pmode))
1298 op0 = gen_reg_rtx (Pmode);
1299 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
1300 break;
1302 case TLS_MODEL_LOCAL_EXEC:
1303 if (!register_operand (op0, Pmode))
1304 op0 = gen_reg_rtx (Pmode);
1305 if (TARGET_TLS64)
1307 emit_insn (gen_load_tprel (op0, op1));
1308 emit_insn (gen_adddi3 (op0, gen_thread_pointer (), op0));
1310 else
1311 emit_insn (gen_add_tprel (op0, gen_thread_pointer (), op1));
1312 break;
1314 default:
1315 abort ();
1318 if (orig_op0 == op0)
1319 return NULL_RTX;
1320 if (GET_MODE (orig_op0) == Pmode)
1321 return op0;
1322 return gen_lowpart (GET_MODE (orig_op0), op0);
1326 ia64_expand_move (rtx op0, rtx op1)
1328 enum machine_mode mode = GET_MODE (op0);
1330 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
1331 op1 = force_reg (mode, op1);
1333 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
1335 enum tls_model tls_kind;
1336 if ((tls_kind = tls_symbolic_operand (op1, VOIDmode)))
1337 return ia64_expand_tls_address (tls_kind, op0, op1);
1339 if (!TARGET_NO_PIC && reload_completed)
1341 ia64_expand_load_address (op0, op1);
1342 return NULL_RTX;
1346 return op1;
1349 /* Split a move from OP1 to OP0 conditional on COND. */
1351 void
1352 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
1354 rtx insn, first = get_last_insn ();
1356 emit_move_insn (op0, op1);
1358 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
1359 if (INSN_P (insn))
1360 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
1361 PATTERN (insn));
1364 /* Split a post-reload TImode reference into two DImode components. */
1367 ia64_split_timode (rtx out[2], rtx in, rtx scratch)
1369 switch (GET_CODE (in))
1371 case REG:
1372 out[0] = gen_rtx_REG (DImode, REGNO (in));
1373 out[1] = gen_rtx_REG (DImode, REGNO (in) + 1);
1374 return NULL_RTX;
1376 case MEM:
1378 rtx base = XEXP (in, 0);
1380 switch (GET_CODE (base))
1382 case REG:
1383 out[0] = adjust_address (in, DImode, 0);
1384 break;
1385 case POST_MODIFY:
1386 base = XEXP (base, 0);
1387 out[0] = adjust_address (in, DImode, 0);
1388 break;
1390 /* Since we're changing the mode, we need to change to POST_MODIFY
1391 as well to preserve the size of the increment. Either that or
1392 do the update in two steps, but we've already got this scratch
1393 register handy so let's use it. */
1394 case POST_INC:
1395 base = XEXP (base, 0);
1396 out[0]
1397 = change_address (in, DImode,
1398 gen_rtx_POST_MODIFY
1399 (Pmode, base, plus_constant (base, 16)));
1400 break;
1401 case POST_DEC:
1402 base = XEXP (base, 0);
1403 out[0]
1404 = change_address (in, DImode,
1405 gen_rtx_POST_MODIFY
1406 (Pmode, base, plus_constant (base, -16)));
1407 break;
1408 default:
1409 abort ();
1412 if (scratch == NULL_RTX)
1413 abort ();
1414 out[1] = change_address (in, DImode, scratch);
1415 return gen_adddi3 (scratch, base, GEN_INT (8));
1418 case CONST_INT:
1419 case CONST_DOUBLE:
1420 split_double (in, &out[0], &out[1]);
1421 return NULL_RTX;
1423 default:
1424 abort ();
1428 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1429 through memory plus an extra GR scratch register. Except that you can
1430 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1431 SECONDARY_RELOAD_CLASS, but not both.
1433 We got into problems in the first place by allowing a construct like
1434 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1435 This solution attempts to prevent this situation from occurring. When
1436 we see something like the above, we spill the inner register to memory. */
1439 spill_xfmode_operand (rtx in, int force)
1441 if (GET_CODE (in) == SUBREG
1442 && GET_MODE (SUBREG_REG (in)) == TImode
1443 && GET_CODE (SUBREG_REG (in)) == REG)
1445 rtx mem = gen_mem_addressof (SUBREG_REG (in), NULL_TREE, /*rescan=*/true);
1446 return gen_rtx_MEM (XFmode, copy_to_reg (XEXP (mem, 0)));
1448 else if (force && GET_CODE (in) == REG)
1450 rtx mem = gen_mem_addressof (in, NULL_TREE, /*rescan=*/true);
1451 return gen_rtx_MEM (XFmode, copy_to_reg (XEXP (mem, 0)));
1453 else if (GET_CODE (in) == MEM
1454 && GET_CODE (XEXP (in, 0)) == ADDRESSOF)
1455 return change_address (in, XFmode, copy_to_reg (XEXP (in, 0)));
1456 else
1457 return in;
1460 /* Emit comparison instruction if necessary, returning the expression
1461 that holds the compare result in the proper mode. */
1463 static GTY(()) rtx cmptf_libfunc;
1466 ia64_expand_compare (enum rtx_code code, enum machine_mode mode)
1468 rtx op0 = ia64_compare_op0, op1 = ia64_compare_op1;
1469 rtx cmp;
1471 /* If we have a BImode input, then we already have a compare result, and
1472 do not need to emit another comparison. */
1473 if (GET_MODE (op0) == BImode)
1475 if ((code == NE || code == EQ) && op1 == const0_rtx)
1476 cmp = op0;
1477 else
1478 abort ();
1480 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1481 magic number as its third argument, that indicates what to do.
1482 The return value is an integer to be compared against zero. */
1483 else if (TARGET_HPUX && GET_MODE (op0) == TFmode)
1485 enum qfcmp_magic {
1486 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1487 QCMP_UNORD = 2,
1488 QCMP_EQ = 4,
1489 QCMP_LT = 8,
1490 QCMP_GT = 16
1491 } magic;
1492 enum rtx_code ncode;
1493 rtx ret, insns;
1494 if (GET_MODE (op1) != TFmode)
1495 abort ();
1496 switch (code)
1498 /* 1 = equal, 0 = not equal. Equality operators do
1499 not raise FP_INVALID when given an SNaN operand. */
1500 case EQ: magic = QCMP_EQ; ncode = NE; break;
1501 case NE: magic = QCMP_EQ; ncode = EQ; break;
1502 /* isunordered() from C99. */
1503 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1504 /* Relational operators raise FP_INVALID when given
1505 an SNaN operand. */
1506 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1507 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1508 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1509 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1510 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1511 Expanders for buneq etc. weuld have to be added to ia64.md
1512 for this to be useful. */
1513 default: abort ();
1516 start_sequence ();
1518 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1519 op0, TFmode, op1, TFmode,
1520 GEN_INT (magic), DImode);
1521 cmp = gen_reg_rtx (BImode);
1522 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1523 gen_rtx_fmt_ee (ncode, BImode,
1524 ret, const0_rtx)));
1526 insns = get_insns ();
1527 end_sequence ();
1529 emit_libcall_block (insns, cmp, cmp,
1530 gen_rtx_fmt_ee (code, BImode, op0, op1));
1531 code = NE;
1533 else
1535 cmp = gen_reg_rtx (BImode);
1536 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1537 gen_rtx_fmt_ee (code, BImode, op0, op1)));
1538 code = NE;
1541 return gen_rtx_fmt_ee (code, mode, cmp, const0_rtx);
1544 /* Emit the appropriate sequence for a call. */
1546 void
1547 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
1548 int sibcall_p)
1550 rtx insn, b0;
1552 addr = XEXP (addr, 0);
1553 addr = convert_memory_address (DImode, addr);
1554 b0 = gen_rtx_REG (DImode, R_BR (0));
1556 /* ??? Should do this for functions known to bind local too. */
1557 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
1559 if (sibcall_p)
1560 insn = gen_sibcall_nogp (addr);
1561 else if (! retval)
1562 insn = gen_call_nogp (addr, b0);
1563 else
1564 insn = gen_call_value_nogp (retval, addr, b0);
1565 insn = emit_call_insn (insn);
1567 else
1569 if (sibcall_p)
1570 insn = gen_sibcall_gp (addr);
1571 else if (! retval)
1572 insn = gen_call_gp (addr, b0);
1573 else
1574 insn = gen_call_value_gp (retval, addr, b0);
1575 insn = emit_call_insn (insn);
1577 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
1580 if (sibcall_p)
1581 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
1584 void
1585 ia64_reload_gp (void)
1587 rtx tmp;
1589 if (current_frame_info.reg_save_gp)
1590 tmp = gen_rtx_REG (DImode, current_frame_info.reg_save_gp);
1591 else
1593 HOST_WIDE_INT offset;
1595 offset = (current_frame_info.spill_cfa_off
1596 + current_frame_info.spill_size);
1597 if (frame_pointer_needed)
1599 tmp = hard_frame_pointer_rtx;
1600 offset = -offset;
1602 else
1604 tmp = stack_pointer_rtx;
1605 offset = current_frame_info.total_size - offset;
1608 if (CONST_OK_FOR_I (offset))
1609 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1610 tmp, GEN_INT (offset)));
1611 else
1613 emit_move_insn (pic_offset_table_rtx, GEN_INT (offset));
1614 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1615 pic_offset_table_rtx, tmp));
1618 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
1621 emit_move_insn (pic_offset_table_rtx, tmp);
1624 void
1625 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
1626 rtx scratch_b, int noreturn_p, int sibcall_p)
1628 rtx insn;
1629 bool is_desc = false;
1631 /* If we find we're calling through a register, then we're actually
1632 calling through a descriptor, so load up the values. */
1633 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
1635 rtx tmp;
1636 bool addr_dead_p;
1638 /* ??? We are currently constrained to *not* use peep2, because
1639 we can legitimately change the global lifetime of the GP
1640 (in the form of killing where previously live). This is
1641 because a call through a descriptor doesn't use the previous
1642 value of the GP, while a direct call does, and we do not
1643 commit to either form until the split here.
1645 That said, this means that we lack precise life info for
1646 whether ADDR is dead after this call. This is not terribly
1647 important, since we can fix things up essentially for free
1648 with the POST_DEC below, but it's nice to not use it when we
1649 can immediately tell it's not necessary. */
1650 addr_dead_p = ((noreturn_p || sibcall_p
1651 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
1652 REGNO (addr)))
1653 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
1655 /* Load the code address into scratch_b. */
1656 tmp = gen_rtx_POST_INC (Pmode, addr);
1657 tmp = gen_rtx_MEM (Pmode, tmp);
1658 emit_move_insn (scratch_r, tmp);
1659 emit_move_insn (scratch_b, scratch_r);
1661 /* Load the GP address. If ADDR is not dead here, then we must
1662 revert the change made above via the POST_INCREMENT. */
1663 if (!addr_dead_p)
1664 tmp = gen_rtx_POST_DEC (Pmode, addr);
1665 else
1666 tmp = addr;
1667 tmp = gen_rtx_MEM (Pmode, tmp);
1668 emit_move_insn (pic_offset_table_rtx, tmp);
1670 is_desc = true;
1671 addr = scratch_b;
1674 if (sibcall_p)
1675 insn = gen_sibcall_nogp (addr);
1676 else if (retval)
1677 insn = gen_call_value_nogp (retval, addr, retaddr);
1678 else
1679 insn = gen_call_nogp (addr, retaddr);
1680 emit_call_insn (insn);
1682 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
1683 ia64_reload_gp ();
1686 /* Begin the assembly file. */
1688 static void
1689 ia64_file_start (void)
1691 default_file_start ();
1692 emit_safe_across_calls ();
1695 void
1696 emit_safe_across_calls (void)
1698 unsigned int rs, re;
1699 int out_state;
1701 rs = 1;
1702 out_state = 0;
1703 while (1)
1705 while (rs < 64 && call_used_regs[PR_REG (rs)])
1706 rs++;
1707 if (rs >= 64)
1708 break;
1709 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
1710 continue;
1711 if (out_state == 0)
1713 fputs ("\t.pred.safe_across_calls ", asm_out_file);
1714 out_state = 1;
1716 else
1717 fputc (',', asm_out_file);
1718 if (re == rs + 1)
1719 fprintf (asm_out_file, "p%u", rs);
1720 else
1721 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
1722 rs = re + 1;
1724 if (out_state)
1725 fputc ('\n', asm_out_file);
1728 /* Helper function for ia64_compute_frame_size: find an appropriate general
1729 register to spill some special register to. SPECIAL_SPILL_MASK contains
1730 bits in GR0 to GR31 that have already been allocated by this routine.
1731 TRY_LOCALS is true if we should attempt to locate a local regnum. */
1733 static int
1734 find_gr_spill (int try_locals)
1736 int regno;
1738 /* If this is a leaf function, first try an otherwise unused
1739 call-clobbered register. */
1740 if (current_function_is_leaf)
1742 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
1743 if (! regs_ever_live[regno]
1744 && call_used_regs[regno]
1745 && ! fixed_regs[regno]
1746 && ! global_regs[regno]
1747 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
1749 current_frame_info.gr_used_mask |= 1 << regno;
1750 return regno;
1754 if (try_locals)
1756 regno = current_frame_info.n_local_regs;
1757 /* If there is a frame pointer, then we can't use loc79, because
1758 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
1759 reg_name switching code in ia64_expand_prologue. */
1760 if (regno < (80 - frame_pointer_needed))
1762 current_frame_info.n_local_regs = regno + 1;
1763 return LOC_REG (0) + regno;
1767 /* Failed to find a general register to spill to. Must use stack. */
1768 return 0;
1771 /* In order to make for nice schedules, we try to allocate every temporary
1772 to a different register. We must of course stay away from call-saved,
1773 fixed, and global registers. We must also stay away from registers
1774 allocated in current_frame_info.gr_used_mask, since those include regs
1775 used all through the prologue.
1777 Any register allocated here must be used immediately. The idea is to
1778 aid scheduling, not to solve data flow problems. */
1780 static int last_scratch_gr_reg;
1782 static int
1783 next_scratch_gr_reg (void)
1785 int i, regno;
1787 for (i = 0; i < 32; ++i)
1789 regno = (last_scratch_gr_reg + i + 1) & 31;
1790 if (call_used_regs[regno]
1791 && ! fixed_regs[regno]
1792 && ! global_regs[regno]
1793 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
1795 last_scratch_gr_reg = regno;
1796 return regno;
1800 /* There must be _something_ available. */
1801 abort ();
1804 /* Helper function for ia64_compute_frame_size, called through
1805 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
1807 static void
1808 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
1810 unsigned int regno = REGNO (reg);
1811 if (regno < 32)
1813 unsigned int i, n = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1814 for (i = 0; i < n; ++i)
1815 current_frame_info.gr_used_mask |= 1 << (regno + i);
1819 /* Returns the number of bytes offset between the frame pointer and the stack
1820 pointer for the current function. SIZE is the number of bytes of space
1821 needed for local variables. */
1823 static void
1824 ia64_compute_frame_size (HOST_WIDE_INT size)
1826 HOST_WIDE_INT total_size;
1827 HOST_WIDE_INT spill_size = 0;
1828 HOST_WIDE_INT extra_spill_size = 0;
1829 HOST_WIDE_INT pretend_args_size;
1830 HARD_REG_SET mask;
1831 int n_spilled = 0;
1832 int spilled_gr_p = 0;
1833 int spilled_fr_p = 0;
1834 unsigned int regno;
1835 int i;
1837 if (current_frame_info.initialized)
1838 return;
1840 memset (&current_frame_info, 0, sizeof current_frame_info);
1841 CLEAR_HARD_REG_SET (mask);
1843 /* Don't allocate scratches to the return register. */
1844 diddle_return_value (mark_reg_gr_used_mask, NULL);
1846 /* Don't allocate scratches to the EH scratch registers. */
1847 if (cfun->machine->ia64_eh_epilogue_sp)
1848 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
1849 if (cfun->machine->ia64_eh_epilogue_bsp)
1850 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
1852 /* Find the size of the register stack frame. We have only 80 local
1853 registers, because we reserve 8 for the inputs and 8 for the
1854 outputs. */
1856 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
1857 since we'll be adjusting that down later. */
1858 regno = LOC_REG (78) + ! frame_pointer_needed;
1859 for (; regno >= LOC_REG (0); regno--)
1860 if (regs_ever_live[regno])
1861 break;
1862 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
1864 /* For functions marked with the syscall_linkage attribute, we must mark
1865 all eight input registers as in use, so that locals aren't visible to
1866 the caller. */
1868 if (cfun->machine->n_varargs > 0
1869 || lookup_attribute ("syscall_linkage",
1870 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
1871 current_frame_info.n_input_regs = 8;
1872 else
1874 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
1875 if (regs_ever_live[regno])
1876 break;
1877 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
1880 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
1881 if (regs_ever_live[regno])
1882 break;
1883 i = regno - OUT_REG (0) + 1;
1885 /* When -p profiling, we need one output register for the mcount argument.
1886 Likewise for -a profiling for the bb_init_func argument. For -ax
1887 profiling, we need two output registers for the two bb_init_trace_func
1888 arguments. */
1889 if (current_function_profile)
1890 i = MAX (i, 1);
1891 current_frame_info.n_output_regs = i;
1893 /* ??? No rotating register support yet. */
1894 current_frame_info.n_rotate_regs = 0;
1896 /* Discover which registers need spilling, and how much room that
1897 will take. Begin with floating point and general registers,
1898 which will always wind up on the stack. */
1900 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
1901 if (regs_ever_live[regno] && ! call_used_regs[regno])
1903 SET_HARD_REG_BIT (mask, regno);
1904 spill_size += 16;
1905 n_spilled += 1;
1906 spilled_fr_p = 1;
1909 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
1910 if (regs_ever_live[regno] && ! call_used_regs[regno])
1912 SET_HARD_REG_BIT (mask, regno);
1913 spill_size += 8;
1914 n_spilled += 1;
1915 spilled_gr_p = 1;
1918 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
1919 if (regs_ever_live[regno] && ! call_used_regs[regno])
1921 SET_HARD_REG_BIT (mask, regno);
1922 spill_size += 8;
1923 n_spilled += 1;
1926 /* Now come all special registers that might get saved in other
1927 general registers. */
1929 if (frame_pointer_needed)
1931 current_frame_info.reg_fp = find_gr_spill (1);
1932 /* If we did not get a register, then we take LOC79. This is guaranteed
1933 to be free, even if regs_ever_live is already set, because this is
1934 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
1935 as we don't count loc79 above. */
1936 if (current_frame_info.reg_fp == 0)
1938 current_frame_info.reg_fp = LOC_REG (79);
1939 current_frame_info.n_local_regs++;
1943 if (! current_function_is_leaf)
1945 /* Emit a save of BR0 if we call other functions. Do this even
1946 if this function doesn't return, as EH depends on this to be
1947 able to unwind the stack. */
1948 SET_HARD_REG_BIT (mask, BR_REG (0));
1950 current_frame_info.reg_save_b0 = find_gr_spill (1);
1951 if (current_frame_info.reg_save_b0 == 0)
1953 spill_size += 8;
1954 n_spilled += 1;
1957 /* Similarly for ar.pfs. */
1958 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
1959 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
1960 if (current_frame_info.reg_save_ar_pfs == 0)
1962 extra_spill_size += 8;
1963 n_spilled += 1;
1966 /* Similarly for gp. Note that if we're calling setjmp, the stacked
1967 registers are clobbered, so we fall back to the stack. */
1968 current_frame_info.reg_save_gp
1969 = (current_function_calls_setjmp ? 0 : find_gr_spill (1));
1970 if (current_frame_info.reg_save_gp == 0)
1972 SET_HARD_REG_BIT (mask, GR_REG (1));
1973 spill_size += 8;
1974 n_spilled += 1;
1977 else
1979 if (regs_ever_live[BR_REG (0)] && ! call_used_regs[BR_REG (0)])
1981 SET_HARD_REG_BIT (mask, BR_REG (0));
1982 spill_size += 8;
1983 n_spilled += 1;
1986 if (regs_ever_live[AR_PFS_REGNUM])
1988 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
1989 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
1990 if (current_frame_info.reg_save_ar_pfs == 0)
1992 extra_spill_size += 8;
1993 n_spilled += 1;
1998 /* Unwind descriptor hackery: things are most efficient if we allocate
1999 consecutive GR save registers for RP, PFS, FP in that order. However,
2000 it is absolutely critical that FP get the only hard register that's
2001 guaranteed to be free, so we allocated it first. If all three did
2002 happen to be allocated hard regs, and are consecutive, rearrange them
2003 into the preferred order now. */
2004 if (current_frame_info.reg_fp != 0
2005 && current_frame_info.reg_save_b0 == current_frame_info.reg_fp + 1
2006 && current_frame_info.reg_save_ar_pfs == current_frame_info.reg_fp + 2)
2008 current_frame_info.reg_save_b0 = current_frame_info.reg_fp;
2009 current_frame_info.reg_save_ar_pfs = current_frame_info.reg_fp + 1;
2010 current_frame_info.reg_fp = current_frame_info.reg_fp + 2;
2013 /* See if we need to store the predicate register block. */
2014 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2015 if (regs_ever_live[regno] && ! call_used_regs[regno])
2016 break;
2017 if (regno <= PR_REG (63))
2019 SET_HARD_REG_BIT (mask, PR_REG (0));
2020 current_frame_info.reg_save_pr = find_gr_spill (1);
2021 if (current_frame_info.reg_save_pr == 0)
2023 extra_spill_size += 8;
2024 n_spilled += 1;
2027 /* ??? Mark them all as used so that register renaming and such
2028 are free to use them. */
2029 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2030 regs_ever_live[regno] = 1;
2033 /* If we're forced to use st8.spill, we're forced to save and restore
2034 ar.unat as well. The check for existing liveness allows inline asm
2035 to touch ar.unat. */
2036 if (spilled_gr_p || cfun->machine->n_varargs
2037 || regs_ever_live[AR_UNAT_REGNUM])
2039 regs_ever_live[AR_UNAT_REGNUM] = 1;
2040 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
2041 current_frame_info.reg_save_ar_unat = find_gr_spill (spill_size == 0);
2042 if (current_frame_info.reg_save_ar_unat == 0)
2044 extra_spill_size += 8;
2045 n_spilled += 1;
2049 if (regs_ever_live[AR_LC_REGNUM])
2051 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
2052 current_frame_info.reg_save_ar_lc = find_gr_spill (spill_size == 0);
2053 if (current_frame_info.reg_save_ar_lc == 0)
2055 extra_spill_size += 8;
2056 n_spilled += 1;
2060 /* If we have an odd number of words of pretend arguments written to
2061 the stack, then the FR save area will be unaligned. We round the
2062 size of this area up to keep things 16 byte aligned. */
2063 if (spilled_fr_p)
2064 pretend_args_size = IA64_STACK_ALIGN (current_function_pretend_args_size);
2065 else
2066 pretend_args_size = current_function_pretend_args_size;
2068 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2069 + current_function_outgoing_args_size);
2070 total_size = IA64_STACK_ALIGN (total_size);
2072 /* We always use the 16-byte scratch area provided by the caller, but
2073 if we are a leaf function, there's no one to which we need to provide
2074 a scratch area. */
2075 if (current_function_is_leaf)
2076 total_size = MAX (0, total_size - 16);
2078 current_frame_info.total_size = total_size;
2079 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2080 current_frame_info.spill_size = spill_size;
2081 current_frame_info.extra_spill_size = extra_spill_size;
2082 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2083 current_frame_info.n_spilled = n_spilled;
2084 current_frame_info.initialized = reload_completed;
2087 /* Compute the initial difference between the specified pair of registers. */
2089 HOST_WIDE_INT
2090 ia64_initial_elimination_offset (int from, int to)
2092 HOST_WIDE_INT offset;
2094 ia64_compute_frame_size (get_frame_size ());
2095 switch (from)
2097 case FRAME_POINTER_REGNUM:
2098 if (to == HARD_FRAME_POINTER_REGNUM)
2100 if (current_function_is_leaf)
2101 offset = -current_frame_info.total_size;
2102 else
2103 offset = -(current_frame_info.total_size
2104 - current_function_outgoing_args_size - 16);
2106 else if (to == STACK_POINTER_REGNUM)
2108 if (current_function_is_leaf)
2109 offset = 0;
2110 else
2111 offset = 16 + current_function_outgoing_args_size;
2113 else
2114 abort ();
2115 break;
2117 case ARG_POINTER_REGNUM:
2118 /* Arguments start above the 16 byte save area, unless stdarg
2119 in which case we store through the 16 byte save area. */
2120 if (to == HARD_FRAME_POINTER_REGNUM)
2121 offset = 16 - current_function_pretend_args_size;
2122 else if (to == STACK_POINTER_REGNUM)
2123 offset = (current_frame_info.total_size
2124 + 16 - current_function_pretend_args_size);
2125 else
2126 abort ();
2127 break;
2129 default:
2130 abort ();
2133 return offset;
2136 /* If there are more than a trivial number of register spills, we use
2137 two interleaved iterators so that we can get two memory references
2138 per insn group.
2140 In order to simplify things in the prologue and epilogue expanders,
2141 we use helper functions to fix up the memory references after the
2142 fact with the appropriate offsets to a POST_MODIFY memory mode.
2143 The following data structure tracks the state of the two iterators
2144 while insns are being emitted. */
2146 struct spill_fill_data
2148 rtx init_after; /* point at which to emit initializations */
2149 rtx init_reg[2]; /* initial base register */
2150 rtx iter_reg[2]; /* the iterator registers */
2151 rtx *prev_addr[2]; /* address of last memory use */
2152 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
2153 HOST_WIDE_INT prev_off[2]; /* last offset */
2154 int n_iter; /* number of iterators in use */
2155 int next_iter; /* next iterator to use */
2156 unsigned int save_gr_used_mask;
2159 static struct spill_fill_data spill_fill_data;
2161 static void
2162 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
2164 int i;
2166 spill_fill_data.init_after = get_last_insn ();
2167 spill_fill_data.init_reg[0] = init_reg;
2168 spill_fill_data.init_reg[1] = init_reg;
2169 spill_fill_data.prev_addr[0] = NULL;
2170 spill_fill_data.prev_addr[1] = NULL;
2171 spill_fill_data.prev_insn[0] = NULL;
2172 spill_fill_data.prev_insn[1] = NULL;
2173 spill_fill_data.prev_off[0] = cfa_off;
2174 spill_fill_data.prev_off[1] = cfa_off;
2175 spill_fill_data.next_iter = 0;
2176 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
2178 spill_fill_data.n_iter = 1 + (n_spills > 2);
2179 for (i = 0; i < spill_fill_data.n_iter; ++i)
2181 int regno = next_scratch_gr_reg ();
2182 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
2183 current_frame_info.gr_used_mask |= 1 << regno;
2187 static void
2188 finish_spill_pointers (void)
2190 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
2193 static rtx
2194 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
2196 int iter = spill_fill_data.next_iter;
2197 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
2198 rtx disp_rtx = GEN_INT (disp);
2199 rtx mem;
2201 if (spill_fill_data.prev_addr[iter])
2203 if (CONST_OK_FOR_N (disp))
2205 *spill_fill_data.prev_addr[iter]
2206 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
2207 gen_rtx_PLUS (DImode,
2208 spill_fill_data.iter_reg[iter],
2209 disp_rtx));
2210 REG_NOTES (spill_fill_data.prev_insn[iter])
2211 = gen_rtx_EXPR_LIST (REG_INC, spill_fill_data.iter_reg[iter],
2212 REG_NOTES (spill_fill_data.prev_insn[iter]));
2214 else
2216 /* ??? Could use register post_modify for loads. */
2217 if (! CONST_OK_FOR_I (disp))
2219 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2220 emit_move_insn (tmp, disp_rtx);
2221 disp_rtx = tmp;
2223 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2224 spill_fill_data.iter_reg[iter], disp_rtx));
2227 /* Micro-optimization: if we've created a frame pointer, it's at
2228 CFA 0, which may allow the real iterator to be initialized lower,
2229 slightly increasing parallelism. Also, if there are few saves
2230 it may eliminate the iterator entirely. */
2231 else if (disp == 0
2232 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
2233 && frame_pointer_needed)
2235 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
2236 set_mem_alias_set (mem, get_varargs_alias_set ());
2237 return mem;
2239 else
2241 rtx seq, insn;
2243 if (disp == 0)
2244 seq = gen_movdi (spill_fill_data.iter_reg[iter],
2245 spill_fill_data.init_reg[iter]);
2246 else
2248 start_sequence ();
2250 if (! CONST_OK_FOR_I (disp))
2252 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2253 emit_move_insn (tmp, disp_rtx);
2254 disp_rtx = tmp;
2257 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2258 spill_fill_data.init_reg[iter],
2259 disp_rtx));
2261 seq = get_insns ();
2262 end_sequence ();
2265 /* Careful for being the first insn in a sequence. */
2266 if (spill_fill_data.init_after)
2267 insn = emit_insn_after (seq, spill_fill_data.init_after);
2268 else
2270 rtx first = get_insns ();
2271 if (first)
2272 insn = emit_insn_before (seq, first);
2273 else
2274 insn = emit_insn (seq);
2276 spill_fill_data.init_after = insn;
2278 /* If DISP is 0, we may or may not have a further adjustment
2279 afterward. If we do, then the load/store insn may be modified
2280 to be a post-modify. If we don't, then this copy may be
2281 eliminated by copyprop_hardreg_forward, which makes this
2282 insn garbage, which runs afoul of the sanity check in
2283 propagate_one_insn. So mark this insn as legal to delete. */
2284 if (disp == 0)
2285 REG_NOTES(insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx,
2286 REG_NOTES (insn));
2289 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
2291 /* ??? Not all of the spills are for varargs, but some of them are.
2292 The rest of the spills belong in an alias set of their own. But
2293 it doesn't actually hurt to include them here. */
2294 set_mem_alias_set (mem, get_varargs_alias_set ());
2296 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
2297 spill_fill_data.prev_off[iter] = cfa_off;
2299 if (++iter >= spill_fill_data.n_iter)
2300 iter = 0;
2301 spill_fill_data.next_iter = iter;
2303 return mem;
2306 static void
2307 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
2308 rtx frame_reg)
2310 int iter = spill_fill_data.next_iter;
2311 rtx mem, insn;
2313 mem = spill_restore_mem (reg, cfa_off);
2314 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
2315 spill_fill_data.prev_insn[iter] = insn;
2317 if (frame_reg)
2319 rtx base;
2320 HOST_WIDE_INT off;
2322 RTX_FRAME_RELATED_P (insn) = 1;
2324 /* Don't even pretend that the unwind code can intuit its way
2325 through a pair of interleaved post_modify iterators. Just
2326 provide the correct answer. */
2328 if (frame_pointer_needed)
2330 base = hard_frame_pointer_rtx;
2331 off = - cfa_off;
2333 else
2335 base = stack_pointer_rtx;
2336 off = current_frame_info.total_size - cfa_off;
2339 REG_NOTES (insn)
2340 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2341 gen_rtx_SET (VOIDmode,
2342 gen_rtx_MEM (GET_MODE (reg),
2343 plus_constant (base, off)),
2344 frame_reg),
2345 REG_NOTES (insn));
2349 static void
2350 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
2352 int iter = spill_fill_data.next_iter;
2353 rtx insn;
2355 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
2356 GEN_INT (cfa_off)));
2357 spill_fill_data.prev_insn[iter] = insn;
2360 /* Wrapper functions that discards the CONST_INT spill offset. These
2361 exist so that we can give gr_spill/gr_fill the offset they need and
2362 use a consistent function interface. */
2364 static rtx
2365 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2367 return gen_movdi (dest, src);
2370 static rtx
2371 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2373 return gen_fr_spill (dest, src);
2376 static rtx
2377 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2379 return gen_fr_restore (dest, src);
2382 /* Called after register allocation to add any instructions needed for the
2383 prologue. Using a prologue insn is favored compared to putting all of the
2384 instructions in output_function_prologue(), since it allows the scheduler
2385 to intermix instructions with the saves of the caller saved registers. In
2386 some cases, it might be necessary to emit a barrier instruction as the last
2387 insn to prevent such scheduling.
2389 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
2390 so that the debug info generation code can handle them properly.
2392 The register save area is layed out like so:
2393 cfa+16
2394 [ varargs spill area ]
2395 [ fr register spill area ]
2396 [ br register spill area ]
2397 [ ar register spill area ]
2398 [ pr register spill area ]
2399 [ gr register spill area ] */
2401 /* ??? Get inefficient code when the frame size is larger than can fit in an
2402 adds instruction. */
2404 void
2405 ia64_expand_prologue (void)
2407 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
2408 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
2409 rtx reg, alt_reg;
2411 ia64_compute_frame_size (get_frame_size ());
2412 last_scratch_gr_reg = 15;
2414 /* If there is no epilogue, then we don't need some prologue insns.
2415 We need to avoid emitting the dead prologue insns, because flow
2416 will complain about them. */
2417 if (optimize)
2419 edge e;
2421 for (e = EXIT_BLOCK_PTR->pred; e ; e = e->pred_next)
2422 if ((e->flags & EDGE_FAKE) == 0
2423 && (e->flags & EDGE_FALLTHRU) != 0)
2424 break;
2425 epilogue_p = (e != NULL);
2427 else
2428 epilogue_p = 1;
2430 /* Set the local, input, and output register names. We need to do this
2431 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
2432 half. If we use in/loc/out register names, then we get assembler errors
2433 in crtn.S because there is no alloc insn or regstk directive in there. */
2434 if (! TARGET_REG_NAMES)
2436 int inputs = current_frame_info.n_input_regs;
2437 int locals = current_frame_info.n_local_regs;
2438 int outputs = current_frame_info.n_output_regs;
2440 for (i = 0; i < inputs; i++)
2441 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
2442 for (i = 0; i < locals; i++)
2443 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
2444 for (i = 0; i < outputs; i++)
2445 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
2448 /* Set the frame pointer register name. The regnum is logically loc79,
2449 but of course we'll not have allocated that many locals. Rather than
2450 worrying about renumbering the existing rtxs, we adjust the name. */
2451 /* ??? This code means that we can never use one local register when
2452 there is a frame pointer. loc79 gets wasted in this case, as it is
2453 renamed to a register that will never be used. See also the try_locals
2454 code in find_gr_spill. */
2455 if (current_frame_info.reg_fp)
2457 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
2458 reg_names[HARD_FRAME_POINTER_REGNUM]
2459 = reg_names[current_frame_info.reg_fp];
2460 reg_names[current_frame_info.reg_fp] = tmp;
2463 /* We don't need an alloc instruction if we've used no outputs or locals. */
2464 if (current_frame_info.n_local_regs == 0
2465 && current_frame_info.n_output_regs == 0
2466 && current_frame_info.n_input_regs <= current_function_args_info.int_regs
2467 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
2469 /* If there is no alloc, but there are input registers used, then we
2470 need a .regstk directive. */
2471 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
2472 ar_pfs_save_reg = NULL_RTX;
2474 else
2476 current_frame_info.need_regstk = 0;
2478 if (current_frame_info.reg_save_ar_pfs)
2479 regno = current_frame_info.reg_save_ar_pfs;
2480 else
2481 regno = next_scratch_gr_reg ();
2482 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
2484 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
2485 GEN_INT (current_frame_info.n_input_regs),
2486 GEN_INT (current_frame_info.n_local_regs),
2487 GEN_INT (current_frame_info.n_output_regs),
2488 GEN_INT (current_frame_info.n_rotate_regs)));
2489 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_pfs != 0);
2492 /* Set up frame pointer, stack pointer, and spill iterators. */
2494 n_varargs = cfun->machine->n_varargs;
2495 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
2496 stack_pointer_rtx, 0);
2498 if (frame_pointer_needed)
2500 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
2501 RTX_FRAME_RELATED_P (insn) = 1;
2504 if (current_frame_info.total_size != 0)
2506 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
2507 rtx offset;
2509 if (CONST_OK_FOR_I (- current_frame_info.total_size))
2510 offset = frame_size_rtx;
2511 else
2513 regno = next_scratch_gr_reg ();
2514 offset = gen_rtx_REG (DImode, regno);
2515 emit_move_insn (offset, frame_size_rtx);
2518 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
2519 stack_pointer_rtx, offset));
2521 if (! frame_pointer_needed)
2523 RTX_FRAME_RELATED_P (insn) = 1;
2524 if (GET_CODE (offset) != CONST_INT)
2526 REG_NOTES (insn)
2527 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2528 gen_rtx_SET (VOIDmode,
2529 stack_pointer_rtx,
2530 gen_rtx_PLUS (DImode,
2531 stack_pointer_rtx,
2532 frame_size_rtx)),
2533 REG_NOTES (insn));
2537 /* ??? At this point we must generate a magic insn that appears to
2538 modify the stack pointer, the frame pointer, and all spill
2539 iterators. This would allow the most scheduling freedom. For
2540 now, just hard stop. */
2541 emit_insn (gen_blockage ());
2544 /* Must copy out ar.unat before doing any integer spills. */
2545 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2547 if (current_frame_info.reg_save_ar_unat)
2548 ar_unat_save_reg
2549 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
2550 else
2552 alt_regno = next_scratch_gr_reg ();
2553 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
2554 current_frame_info.gr_used_mask |= 1 << alt_regno;
2557 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2558 insn = emit_move_insn (ar_unat_save_reg, reg);
2559 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_unat != 0);
2561 /* Even if we're not going to generate an epilogue, we still
2562 need to save the register so that EH works. */
2563 if (! epilogue_p && current_frame_info.reg_save_ar_unat)
2564 emit_insn (gen_prologue_use (ar_unat_save_reg));
2566 else
2567 ar_unat_save_reg = NULL_RTX;
2569 /* Spill all varargs registers. Do this before spilling any GR registers,
2570 since we want the UNAT bits for the GR registers to override the UNAT
2571 bits from varargs, which we don't care about. */
2573 cfa_off = -16;
2574 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
2576 reg = gen_rtx_REG (DImode, regno);
2577 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
2580 /* Locate the bottom of the register save area. */
2581 cfa_off = (current_frame_info.spill_cfa_off
2582 + current_frame_info.spill_size
2583 + current_frame_info.extra_spill_size);
2585 /* Save the predicate register block either in a register or in memory. */
2586 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
2588 reg = gen_rtx_REG (DImode, PR_REG (0));
2589 if (current_frame_info.reg_save_pr != 0)
2591 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
2592 insn = emit_move_insn (alt_reg, reg);
2594 /* ??? Denote pr spill/fill by a DImode move that modifies all
2595 64 hard registers. */
2596 RTX_FRAME_RELATED_P (insn) = 1;
2597 REG_NOTES (insn)
2598 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2599 gen_rtx_SET (VOIDmode, alt_reg, reg),
2600 REG_NOTES (insn));
2602 /* Even if we're not going to generate an epilogue, we still
2603 need to save the register so that EH works. */
2604 if (! epilogue_p)
2605 emit_insn (gen_prologue_use (alt_reg));
2607 else
2609 alt_regno = next_scratch_gr_reg ();
2610 alt_reg = gen_rtx_REG (DImode, alt_regno);
2611 insn = emit_move_insn (alt_reg, reg);
2612 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2613 cfa_off -= 8;
2617 /* Handle AR regs in numerical order. All of them get special handling. */
2618 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
2619 && current_frame_info.reg_save_ar_unat == 0)
2621 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2622 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
2623 cfa_off -= 8;
2626 /* The alloc insn already copied ar.pfs into a general register. The
2627 only thing we have to do now is copy that register to a stack slot
2628 if we'd not allocated a local register for the job. */
2629 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
2630 && current_frame_info.reg_save_ar_pfs == 0)
2632 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2633 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
2634 cfa_off -= 8;
2637 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
2639 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
2640 if (current_frame_info.reg_save_ar_lc != 0)
2642 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
2643 insn = emit_move_insn (alt_reg, reg);
2644 RTX_FRAME_RELATED_P (insn) = 1;
2646 /* Even if we're not going to generate an epilogue, we still
2647 need to save the register so that EH works. */
2648 if (! epilogue_p)
2649 emit_insn (gen_prologue_use (alt_reg));
2651 else
2653 alt_regno = next_scratch_gr_reg ();
2654 alt_reg = gen_rtx_REG (DImode, alt_regno);
2655 emit_move_insn (alt_reg, reg);
2656 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2657 cfa_off -= 8;
2661 if (current_frame_info.reg_save_gp)
2663 insn = emit_move_insn (gen_rtx_REG (DImode,
2664 current_frame_info.reg_save_gp),
2665 pic_offset_table_rtx);
2666 /* We don't know for sure yet if this is actually needed, since
2667 we've not split the PIC call patterns. If all of the calls
2668 are indirect, and not followed by any uses of the gp, then
2669 this save is dead. Allow it to go away. */
2670 REG_NOTES (insn)
2671 = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, REG_NOTES (insn));
2674 /* We should now be at the base of the gr/br/fr spill area. */
2675 if (cfa_off != (current_frame_info.spill_cfa_off
2676 + current_frame_info.spill_size))
2677 abort ();
2679 /* Spill all general registers. */
2680 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
2681 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2683 reg = gen_rtx_REG (DImode, regno);
2684 do_spill (gen_gr_spill, reg, cfa_off, reg);
2685 cfa_off -= 8;
2688 /* Handle BR0 specially -- it may be getting stored permanently in
2689 some GR register. */
2690 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
2692 reg = gen_rtx_REG (DImode, BR_REG (0));
2693 if (current_frame_info.reg_save_b0 != 0)
2695 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
2696 insn = emit_move_insn (alt_reg, reg);
2697 RTX_FRAME_RELATED_P (insn) = 1;
2699 /* Even if we're not going to generate an epilogue, we still
2700 need to save the register so that EH works. */
2701 if (! epilogue_p)
2702 emit_insn (gen_prologue_use (alt_reg));
2704 else
2706 alt_regno = next_scratch_gr_reg ();
2707 alt_reg = gen_rtx_REG (DImode, alt_regno);
2708 emit_move_insn (alt_reg, reg);
2709 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2710 cfa_off -= 8;
2714 /* Spill the rest of the BR registers. */
2715 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
2716 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2718 alt_regno = next_scratch_gr_reg ();
2719 alt_reg = gen_rtx_REG (DImode, alt_regno);
2720 reg = gen_rtx_REG (DImode, regno);
2721 emit_move_insn (alt_reg, reg);
2722 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2723 cfa_off -= 8;
2726 /* Align the frame and spill all FR registers. */
2727 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
2728 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2730 if (cfa_off & 15)
2731 abort ();
2732 reg = gen_rtx_REG (XFmode, regno);
2733 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
2734 cfa_off -= 16;
2737 if (cfa_off != current_frame_info.spill_cfa_off)
2738 abort ();
2740 finish_spill_pointers ();
2743 /* Called after register allocation to add any instructions needed for the
2744 epilogue. Using an epilogue insn is favored compared to putting all of the
2745 instructions in output_function_prologue(), since it allows the scheduler
2746 to intermix instructions with the saves of the caller saved registers. In
2747 some cases, it might be necessary to emit a barrier instruction as the last
2748 insn to prevent such scheduling. */
2750 void
2751 ia64_expand_epilogue (int sibcall_p)
2753 rtx insn, reg, alt_reg, ar_unat_save_reg;
2754 int regno, alt_regno, cfa_off;
2756 ia64_compute_frame_size (get_frame_size ());
2758 /* If there is a frame pointer, then we use it instead of the stack
2759 pointer, so that the stack pointer does not need to be valid when
2760 the epilogue starts. See EXIT_IGNORE_STACK. */
2761 if (frame_pointer_needed)
2762 setup_spill_pointers (current_frame_info.n_spilled,
2763 hard_frame_pointer_rtx, 0);
2764 else
2765 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
2766 current_frame_info.total_size);
2768 if (current_frame_info.total_size != 0)
2770 /* ??? At this point we must generate a magic insn that appears to
2771 modify the spill iterators and the frame pointer. This would
2772 allow the most scheduling freedom. For now, just hard stop. */
2773 emit_insn (gen_blockage ());
2776 /* Locate the bottom of the register save area. */
2777 cfa_off = (current_frame_info.spill_cfa_off
2778 + current_frame_info.spill_size
2779 + current_frame_info.extra_spill_size);
2781 /* Restore the predicate registers. */
2782 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
2784 if (current_frame_info.reg_save_pr != 0)
2785 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
2786 else
2788 alt_regno = next_scratch_gr_reg ();
2789 alt_reg = gen_rtx_REG (DImode, alt_regno);
2790 do_restore (gen_movdi_x, alt_reg, cfa_off);
2791 cfa_off -= 8;
2793 reg = gen_rtx_REG (DImode, PR_REG (0));
2794 emit_move_insn (reg, alt_reg);
2797 /* Restore the application registers. */
2799 /* Load the saved unat from the stack, but do not restore it until
2800 after the GRs have been restored. */
2801 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2803 if (current_frame_info.reg_save_ar_unat != 0)
2804 ar_unat_save_reg
2805 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
2806 else
2808 alt_regno = next_scratch_gr_reg ();
2809 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
2810 current_frame_info.gr_used_mask |= 1 << alt_regno;
2811 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
2812 cfa_off -= 8;
2815 else
2816 ar_unat_save_reg = NULL_RTX;
2818 if (current_frame_info.reg_save_ar_pfs != 0)
2820 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_pfs);
2821 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2822 emit_move_insn (reg, alt_reg);
2824 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
2826 alt_regno = next_scratch_gr_reg ();
2827 alt_reg = gen_rtx_REG (DImode, alt_regno);
2828 do_restore (gen_movdi_x, alt_reg, cfa_off);
2829 cfa_off -= 8;
2830 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2831 emit_move_insn (reg, alt_reg);
2834 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
2836 if (current_frame_info.reg_save_ar_lc != 0)
2837 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
2838 else
2840 alt_regno = next_scratch_gr_reg ();
2841 alt_reg = gen_rtx_REG (DImode, alt_regno);
2842 do_restore (gen_movdi_x, alt_reg, cfa_off);
2843 cfa_off -= 8;
2845 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
2846 emit_move_insn (reg, alt_reg);
2849 /* We should now be at the base of the gr/br/fr spill area. */
2850 if (cfa_off != (current_frame_info.spill_cfa_off
2851 + current_frame_info.spill_size))
2852 abort ();
2854 /* The GP may be stored on the stack in the prologue, but it's
2855 never restored in the epilogue. Skip the stack slot. */
2856 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
2857 cfa_off -= 8;
2859 /* Restore all general registers. */
2860 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
2861 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2863 reg = gen_rtx_REG (DImode, regno);
2864 do_restore (gen_gr_restore, reg, cfa_off);
2865 cfa_off -= 8;
2868 /* Restore the branch registers. Handle B0 specially, as it may
2869 have gotten stored in some GR register. */
2870 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
2872 if (current_frame_info.reg_save_b0 != 0)
2873 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
2874 else
2876 alt_regno = next_scratch_gr_reg ();
2877 alt_reg = gen_rtx_REG (DImode, alt_regno);
2878 do_restore (gen_movdi_x, alt_reg, cfa_off);
2879 cfa_off -= 8;
2881 reg = gen_rtx_REG (DImode, BR_REG (0));
2882 emit_move_insn (reg, alt_reg);
2885 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
2886 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2888 alt_regno = next_scratch_gr_reg ();
2889 alt_reg = gen_rtx_REG (DImode, alt_regno);
2890 do_restore (gen_movdi_x, alt_reg, cfa_off);
2891 cfa_off -= 8;
2892 reg = gen_rtx_REG (DImode, regno);
2893 emit_move_insn (reg, alt_reg);
2896 /* Restore floating point registers. */
2897 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
2898 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2900 if (cfa_off & 15)
2901 abort ();
2902 reg = gen_rtx_REG (XFmode, regno);
2903 do_restore (gen_fr_restore_x, reg, cfa_off);
2904 cfa_off -= 16;
2907 /* Restore ar.unat for real. */
2908 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2910 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2911 emit_move_insn (reg, ar_unat_save_reg);
2914 if (cfa_off != current_frame_info.spill_cfa_off)
2915 abort ();
2917 finish_spill_pointers ();
2919 if (current_frame_info.total_size || cfun->machine->ia64_eh_epilogue_sp)
2921 /* ??? At this point we must generate a magic insn that appears to
2922 modify the spill iterators, the stack pointer, and the frame
2923 pointer. This would allow the most scheduling freedom. For now,
2924 just hard stop. */
2925 emit_insn (gen_blockage ());
2928 if (cfun->machine->ia64_eh_epilogue_sp)
2929 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
2930 else if (frame_pointer_needed)
2932 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
2933 RTX_FRAME_RELATED_P (insn) = 1;
2935 else if (current_frame_info.total_size)
2937 rtx offset, frame_size_rtx;
2939 frame_size_rtx = GEN_INT (current_frame_info.total_size);
2940 if (CONST_OK_FOR_I (current_frame_info.total_size))
2941 offset = frame_size_rtx;
2942 else
2944 regno = next_scratch_gr_reg ();
2945 offset = gen_rtx_REG (DImode, regno);
2946 emit_move_insn (offset, frame_size_rtx);
2949 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
2950 offset));
2952 RTX_FRAME_RELATED_P (insn) = 1;
2953 if (GET_CODE (offset) != CONST_INT)
2955 REG_NOTES (insn)
2956 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2957 gen_rtx_SET (VOIDmode,
2958 stack_pointer_rtx,
2959 gen_rtx_PLUS (DImode,
2960 stack_pointer_rtx,
2961 frame_size_rtx)),
2962 REG_NOTES (insn));
2966 if (cfun->machine->ia64_eh_epilogue_bsp)
2967 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
2969 if (! sibcall_p)
2970 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
2971 else
2973 int fp = GR_REG (2);
2974 /* We need a throw away register here, r0 and r1 are reserved, so r2 is the
2975 first available call clobbered register. If there was a frame_pointer
2976 register, we may have swapped the names of r2 and HARD_FRAME_POINTER_REGNUM,
2977 so we have to make sure we're using the string "r2" when emitting
2978 the register name for the assembler. */
2979 if (current_frame_info.reg_fp && current_frame_info.reg_fp == GR_REG (2))
2980 fp = HARD_FRAME_POINTER_REGNUM;
2982 /* We must emit an alloc to force the input registers to become output
2983 registers. Otherwise, if the callee tries to pass its parameters
2984 through to another call without an intervening alloc, then these
2985 values get lost. */
2986 /* ??? We don't need to preserve all input registers. We only need to
2987 preserve those input registers used as arguments to the sibling call.
2988 It is unclear how to compute that number here. */
2989 if (current_frame_info.n_input_regs != 0)
2990 emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
2991 GEN_INT (0), GEN_INT (0),
2992 GEN_INT (current_frame_info.n_input_regs),
2993 GEN_INT (0)));
2997 /* Return 1 if br.ret can do all the work required to return from a
2998 function. */
3001 ia64_direct_return (void)
3003 if (reload_completed && ! frame_pointer_needed)
3005 ia64_compute_frame_size (get_frame_size ());
3007 return (current_frame_info.total_size == 0
3008 && current_frame_info.n_spilled == 0
3009 && current_frame_info.reg_save_b0 == 0
3010 && current_frame_info.reg_save_pr == 0
3011 && current_frame_info.reg_save_ar_pfs == 0
3012 && current_frame_info.reg_save_ar_unat == 0
3013 && current_frame_info.reg_save_ar_lc == 0);
3015 return 0;
3018 /* Return the magic cookie that we use to hold the return address
3019 during early compilation. */
3022 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
3024 if (count != 0)
3025 return NULL;
3026 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
3029 /* Split this value after reload, now that we know where the return
3030 address is saved. */
3032 void
3033 ia64_split_return_addr_rtx (rtx dest)
3035 rtx src;
3037 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3039 if (current_frame_info.reg_save_b0 != 0)
3040 src = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
3041 else
3043 HOST_WIDE_INT off;
3044 unsigned int regno;
3046 /* Compute offset from CFA for BR0. */
3047 /* ??? Must be kept in sync with ia64_expand_prologue. */
3048 off = (current_frame_info.spill_cfa_off
3049 + current_frame_info.spill_size);
3050 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3051 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3052 off -= 8;
3054 /* Convert CFA offset to a register based offset. */
3055 if (frame_pointer_needed)
3056 src = hard_frame_pointer_rtx;
3057 else
3059 src = stack_pointer_rtx;
3060 off += current_frame_info.total_size;
3063 /* Load address into scratch register. */
3064 if (CONST_OK_FOR_I (off))
3065 emit_insn (gen_adddi3 (dest, src, GEN_INT (off)));
3066 else
3068 emit_move_insn (dest, GEN_INT (off));
3069 emit_insn (gen_adddi3 (dest, src, dest));
3072 src = gen_rtx_MEM (Pmode, dest);
3075 else
3076 src = gen_rtx_REG (DImode, BR_REG (0));
3078 emit_move_insn (dest, src);
3082 ia64_hard_regno_rename_ok (int from, int to)
3084 /* Don't clobber any of the registers we reserved for the prologue. */
3085 if (to == current_frame_info.reg_fp
3086 || to == current_frame_info.reg_save_b0
3087 || to == current_frame_info.reg_save_pr
3088 || to == current_frame_info.reg_save_ar_pfs
3089 || to == current_frame_info.reg_save_ar_unat
3090 || to == current_frame_info.reg_save_ar_lc)
3091 return 0;
3093 if (from == current_frame_info.reg_fp
3094 || from == current_frame_info.reg_save_b0
3095 || from == current_frame_info.reg_save_pr
3096 || from == current_frame_info.reg_save_ar_pfs
3097 || from == current_frame_info.reg_save_ar_unat
3098 || from == current_frame_info.reg_save_ar_lc)
3099 return 0;
3101 /* Don't use output registers outside the register frame. */
3102 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
3103 return 0;
3105 /* Retain even/oddness on predicate register pairs. */
3106 if (PR_REGNO_P (from) && PR_REGNO_P (to))
3107 return (from & 1) == (to & 1);
3109 return 1;
3112 /* Target hook for assembling integer objects. Handle word-sized
3113 aligned objects and detect the cases when @fptr is needed. */
3115 static bool
3116 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
3118 if (size == POINTER_SIZE / BITS_PER_UNIT
3119 && aligned_p
3120 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
3121 && GET_CODE (x) == SYMBOL_REF
3122 && SYMBOL_REF_FUNCTION_P (x))
3124 if (POINTER_SIZE == 32)
3125 fputs ("\tdata4\t@fptr(", asm_out_file);
3126 else
3127 fputs ("\tdata8\t@fptr(", asm_out_file);
3128 output_addr_const (asm_out_file, x);
3129 fputs (")\n", asm_out_file);
3130 return true;
3132 return default_assemble_integer (x, size, aligned_p);
3135 /* Emit the function prologue. */
3137 static void
3138 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3140 int mask, grsave, grsave_prev;
3142 if (current_frame_info.need_regstk)
3143 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
3144 current_frame_info.n_input_regs,
3145 current_frame_info.n_local_regs,
3146 current_frame_info.n_output_regs,
3147 current_frame_info.n_rotate_regs);
3149 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3150 return;
3152 /* Emit the .prologue directive. */
3154 mask = 0;
3155 grsave = grsave_prev = 0;
3156 if (current_frame_info.reg_save_b0 != 0)
3158 mask |= 8;
3159 grsave = grsave_prev = current_frame_info.reg_save_b0;
3161 if (current_frame_info.reg_save_ar_pfs != 0
3162 && (grsave_prev == 0
3163 || current_frame_info.reg_save_ar_pfs == grsave_prev + 1))
3165 mask |= 4;
3166 if (grsave_prev == 0)
3167 grsave = current_frame_info.reg_save_ar_pfs;
3168 grsave_prev = current_frame_info.reg_save_ar_pfs;
3170 if (current_frame_info.reg_fp != 0
3171 && (grsave_prev == 0
3172 || current_frame_info.reg_fp == grsave_prev + 1))
3174 mask |= 2;
3175 if (grsave_prev == 0)
3176 grsave = HARD_FRAME_POINTER_REGNUM;
3177 grsave_prev = current_frame_info.reg_fp;
3179 if (current_frame_info.reg_save_pr != 0
3180 && (grsave_prev == 0
3181 || current_frame_info.reg_save_pr == grsave_prev + 1))
3183 mask |= 1;
3184 if (grsave_prev == 0)
3185 grsave = current_frame_info.reg_save_pr;
3188 if (mask && TARGET_GNU_AS)
3189 fprintf (file, "\t.prologue %d, %d\n", mask,
3190 ia64_dbx_register_number (grsave));
3191 else
3192 fputs ("\t.prologue\n", file);
3194 /* Emit a .spill directive, if necessary, to relocate the base of
3195 the register spill area. */
3196 if (current_frame_info.spill_cfa_off != -16)
3197 fprintf (file, "\t.spill %ld\n",
3198 (long) (current_frame_info.spill_cfa_off
3199 + current_frame_info.spill_size));
3202 /* Emit the .body directive at the scheduled end of the prologue. */
3204 static void
3205 ia64_output_function_end_prologue (FILE *file)
3207 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3208 return;
3210 fputs ("\t.body\n", file);
3213 /* Emit the function epilogue. */
3215 static void
3216 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
3217 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3219 int i;
3221 if (current_frame_info.reg_fp)
3223 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3224 reg_names[HARD_FRAME_POINTER_REGNUM]
3225 = reg_names[current_frame_info.reg_fp];
3226 reg_names[current_frame_info.reg_fp] = tmp;
3228 if (! TARGET_REG_NAMES)
3230 for (i = 0; i < current_frame_info.n_input_regs; i++)
3231 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
3232 for (i = 0; i < current_frame_info.n_local_regs; i++)
3233 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
3234 for (i = 0; i < current_frame_info.n_output_regs; i++)
3235 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
3238 current_frame_info.initialized = 0;
3242 ia64_dbx_register_number (int regno)
3244 /* In ia64_expand_prologue we quite literally renamed the frame pointer
3245 from its home at loc79 to something inside the register frame. We
3246 must perform the same renumbering here for the debug info. */
3247 if (current_frame_info.reg_fp)
3249 if (regno == HARD_FRAME_POINTER_REGNUM)
3250 regno = current_frame_info.reg_fp;
3251 else if (regno == current_frame_info.reg_fp)
3252 regno = HARD_FRAME_POINTER_REGNUM;
3255 if (IN_REGNO_P (regno))
3256 return 32 + regno - IN_REG (0);
3257 else if (LOC_REGNO_P (regno))
3258 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
3259 else if (OUT_REGNO_P (regno))
3260 return (32 + current_frame_info.n_input_regs
3261 + current_frame_info.n_local_regs + regno - OUT_REG (0));
3262 else
3263 return regno;
3266 void
3267 ia64_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
3269 rtx addr_reg, eight = GEN_INT (8);
3271 /* The Intel assembler requires that the global __ia64_trampoline symbol
3272 be declared explicitly */
3273 if (!TARGET_GNU_AS)
3275 static bool declared_ia64_trampoline = false;
3277 if (!declared_ia64_trampoline)
3279 declared_ia64_trampoline = true;
3280 (*targetm.asm_out.globalize_label) (asm_out_file,
3281 "__ia64_trampoline");
3285 /* Load up our iterator. */
3286 addr_reg = gen_reg_rtx (Pmode);
3287 emit_move_insn (addr_reg, addr);
3289 /* The first two words are the fake descriptor:
3290 __ia64_trampoline, ADDR+16. */
3291 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3292 gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline"));
3293 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3295 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3296 copy_to_reg (plus_constant (addr, 16)));
3297 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3299 /* The third word is the target descriptor. */
3300 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), fnaddr);
3301 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3303 /* The fourth word is the static chain. */
3304 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), static_chain);
3307 /* Do any needed setup for a variadic function. CUM has not been updated
3308 for the last named argument which has type TYPE and mode MODE.
3310 We generate the actual spill instructions during prologue generation. */
3312 void
3313 ia64_setup_incoming_varargs (CUMULATIVE_ARGS cum, int int_mode, tree type,
3314 int * pretend_size,
3315 int second_time ATTRIBUTE_UNUSED)
3317 /* Skip the current argument. */
3318 ia64_function_arg_advance (&cum, int_mode, type, 1);
3320 if (cum.words < MAX_ARGUMENT_SLOTS)
3322 int n = MAX_ARGUMENT_SLOTS - cum.words;
3323 *pretend_size = n * UNITS_PER_WORD;
3324 cfun->machine->n_varargs = n;
3328 /* Check whether TYPE is a homogeneous floating point aggregate. If
3329 it is, return the mode of the floating point type that appears
3330 in all leafs. If it is not, return VOIDmode.
3332 An aggregate is a homogeneous floating point aggregate is if all
3333 fields/elements in it have the same floating point type (e.g,
3334 SFmode). 128-bit quad-precision floats are excluded. */
3336 static enum machine_mode
3337 hfa_element_mode (tree type, int nested)
3339 enum machine_mode element_mode = VOIDmode;
3340 enum machine_mode mode;
3341 enum tree_code code = TREE_CODE (type);
3342 int know_element_mode = 0;
3343 tree t;
3345 switch (code)
3347 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
3348 case BOOLEAN_TYPE: case CHAR_TYPE: case POINTER_TYPE:
3349 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
3350 case FILE_TYPE: case SET_TYPE: case LANG_TYPE:
3351 case FUNCTION_TYPE:
3352 return VOIDmode;
3354 /* Fortran complex types are supposed to be HFAs, so we need to handle
3355 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
3356 types though. */
3357 case COMPLEX_TYPE:
3358 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
3359 && TYPE_MODE (type) != TCmode)
3360 return GET_MODE_INNER (TYPE_MODE (type));
3361 else
3362 return VOIDmode;
3364 case REAL_TYPE:
3365 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
3366 mode if this is contained within an aggregate. */
3367 if (nested && TYPE_MODE (type) != TFmode)
3368 return TYPE_MODE (type);
3369 else
3370 return VOIDmode;
3372 case ARRAY_TYPE:
3373 return hfa_element_mode (TREE_TYPE (type), 1);
3375 case RECORD_TYPE:
3376 case UNION_TYPE:
3377 case QUAL_UNION_TYPE:
3378 for (t = TYPE_FIELDS (type); t; t = TREE_CHAIN (t))
3380 if (TREE_CODE (t) != FIELD_DECL)
3381 continue;
3383 mode = hfa_element_mode (TREE_TYPE (t), 1);
3384 if (know_element_mode)
3386 if (mode != element_mode)
3387 return VOIDmode;
3389 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
3390 return VOIDmode;
3391 else
3393 know_element_mode = 1;
3394 element_mode = mode;
3397 return element_mode;
3399 default:
3400 /* If we reach here, we probably have some front-end specific type
3401 that the backend doesn't know about. This can happen via the
3402 aggregate_value_p call in init_function_start. All we can do is
3403 ignore unknown tree types. */
3404 return VOIDmode;
3407 return VOIDmode;
3410 /* Return rtx for register where argument is passed, or zero if it is passed
3411 on the stack. */
3413 /* ??? 128-bit quad-precision floats are always passed in general
3414 registers. */
3417 ia64_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
3418 int named, int incoming)
3420 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
3421 int words = (((mode == BLKmode ? int_size_in_bytes (type)
3422 : GET_MODE_SIZE (mode)) + UNITS_PER_WORD - 1)
3423 / UNITS_PER_WORD);
3424 int offset = 0;
3425 enum machine_mode hfa_mode = VOIDmode;
3427 /* Integer and float arguments larger than 8 bytes start at the next even
3428 boundary. Aggregates larger than 8 bytes start at the next even boundary
3429 if the aggregate has 16 byte alignment. Net effect is that types with
3430 alignment greater than 8 start at the next even boundary. */
3431 /* ??? The ABI does not specify how to handle aggregates with alignment from
3432 9 to 15 bytes, or greater than 16. We handle them all as if they had
3433 16 byte alignment. Such aggregates can occur only if gcc extensions are
3434 used. */
3435 if ((type ? (TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
3436 : (words > 1))
3437 && (cum->words & 1))
3438 offset = 1;
3440 /* If all argument slots are used, then it must go on the stack. */
3441 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
3442 return 0;
3444 /* Check for and handle homogeneous FP aggregates. */
3445 if (type)
3446 hfa_mode = hfa_element_mode (type, 0);
3448 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
3449 and unprototyped hfas are passed specially. */
3450 if (hfa_mode != VOIDmode && (! cum->prototype || named))
3452 rtx loc[16];
3453 int i = 0;
3454 int fp_regs = cum->fp_regs;
3455 int int_regs = cum->words + offset;
3456 int hfa_size = GET_MODE_SIZE (hfa_mode);
3457 int byte_size;
3458 int args_byte_size;
3460 /* If prototyped, pass it in FR regs then GR regs.
3461 If not prototyped, pass it in both FR and GR regs.
3463 If this is an SFmode aggregate, then it is possible to run out of
3464 FR regs while GR regs are still left. In that case, we pass the
3465 remaining part in the GR regs. */
3467 /* Fill the FP regs. We do this always. We stop if we reach the end
3468 of the argument, the last FP register, or the last argument slot. */
3470 byte_size = ((mode == BLKmode)
3471 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3472 args_byte_size = int_regs * UNITS_PER_WORD;
3473 offset = 0;
3474 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
3475 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
3477 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3478 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
3479 + fp_regs)),
3480 GEN_INT (offset));
3481 offset += hfa_size;
3482 args_byte_size += hfa_size;
3483 fp_regs++;
3486 /* If no prototype, then the whole thing must go in GR regs. */
3487 if (! cum->prototype)
3488 offset = 0;
3489 /* If this is an SFmode aggregate, then we might have some left over
3490 that needs to go in GR regs. */
3491 else if (byte_size != offset)
3492 int_regs += offset / UNITS_PER_WORD;
3494 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
3496 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
3498 enum machine_mode gr_mode = DImode;
3500 /* If we have an odd 4 byte hunk because we ran out of FR regs,
3501 then this goes in a GR reg left adjusted/little endian, right
3502 adjusted/big endian. */
3503 /* ??? Currently this is handled wrong, because 4-byte hunks are
3504 always right adjusted/little endian. */
3505 if (offset & 0x4)
3506 gr_mode = SImode;
3507 /* If we have an even 4 byte hunk because the aggregate is a
3508 multiple of 4 bytes in size, then this goes in a GR reg right
3509 adjusted/little endian. */
3510 else if (byte_size - offset == 4)
3511 gr_mode = SImode;
3512 /* Complex floats need to have float mode. */
3513 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
3514 gr_mode = hfa_mode;
3516 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3517 gen_rtx_REG (gr_mode, (basereg
3518 + int_regs)),
3519 GEN_INT (offset));
3520 offset += GET_MODE_SIZE (gr_mode);
3521 int_regs += GET_MODE_SIZE (gr_mode) <= UNITS_PER_WORD
3522 ? 1 : GET_MODE_SIZE (gr_mode) / UNITS_PER_WORD;
3525 /* If we ended up using just one location, just return that one loc, but
3526 change the mode back to the argument mode. */
3527 if (i == 1)
3528 return gen_rtx_REG (mode, REGNO (XEXP (loc[0], 0)));
3529 else
3530 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
3533 /* Integral and aggregates go in general registers. If we have run out of
3534 FR registers, then FP values must also go in general registers. This can
3535 happen when we have a SFmode HFA. */
3536 else if (mode == TFmode || mode == TCmode
3537 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
3539 int byte_size = ((mode == BLKmode)
3540 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3541 if (BYTES_BIG_ENDIAN
3542 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
3543 && byte_size < UNITS_PER_WORD
3544 && byte_size > 0)
3546 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
3547 gen_rtx_REG (DImode,
3548 (basereg + cum->words
3549 + offset)),
3550 const0_rtx);
3551 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
3553 else
3554 return gen_rtx_REG (mode, basereg + cum->words + offset);
3558 /* If there is a prototype, then FP values go in a FR register when
3559 named, and in a GR register when unnamed. */
3560 else if (cum->prototype)
3562 if (! named)
3563 return gen_rtx_REG (mode, basereg + cum->words + offset);
3564 else
3565 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
3567 /* If there is no prototype, then FP values go in both FR and GR
3568 registers. */
3569 else
3571 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
3572 gen_rtx_REG (mode, (FR_ARG_FIRST
3573 + cum->fp_regs)),
3574 const0_rtx);
3575 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
3576 gen_rtx_REG (mode,
3577 (basereg + cum->words
3578 + offset)),
3579 const0_rtx);
3581 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
3585 /* Return number of words, at the beginning of the argument, that must be
3586 put in registers. 0 is the argument is entirely in registers or entirely
3587 in memory. */
3590 ia64_function_arg_partial_nregs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3591 tree type, int named ATTRIBUTE_UNUSED)
3593 int words = (((mode == BLKmode ? int_size_in_bytes (type)
3594 : GET_MODE_SIZE (mode)) + UNITS_PER_WORD - 1)
3595 / UNITS_PER_WORD);
3596 int offset = 0;
3598 /* Arguments with alignment larger than 8 bytes start at the next even
3599 boundary. */
3600 if ((type ? (TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
3601 : (words > 1))
3602 && (cum->words & 1))
3603 offset = 1;
3605 /* If all argument slots are used, then it must go on the stack. */
3606 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
3607 return 0;
3609 /* It doesn't matter whether the argument goes in FR or GR regs. If
3610 it fits within the 8 argument slots, then it goes entirely in
3611 registers. If it extends past the last argument slot, then the rest
3612 goes on the stack. */
3614 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
3615 return 0;
3617 return MAX_ARGUMENT_SLOTS - cum->words - offset;
3620 /* Update CUM to point after this argument. This is patterned after
3621 ia64_function_arg. */
3623 void
3624 ia64_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3625 tree type, int named)
3627 int words = (((mode == BLKmode ? int_size_in_bytes (type)
3628 : GET_MODE_SIZE (mode)) + UNITS_PER_WORD - 1)
3629 / UNITS_PER_WORD);
3630 int offset = 0;
3631 enum machine_mode hfa_mode = VOIDmode;
3633 /* If all arg slots are already full, then there is nothing to do. */
3634 if (cum->words >= MAX_ARGUMENT_SLOTS)
3635 return;
3637 /* Arguments with alignment larger than 8 bytes start at the next even
3638 boundary. */
3639 if ((type ? (TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
3640 : (words > 1))
3641 && (cum->words & 1))
3642 offset = 1;
3644 cum->words += words + offset;
3646 /* Check for and handle homogeneous FP aggregates. */
3647 if (type)
3648 hfa_mode = hfa_element_mode (type, 0);
3650 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
3651 and unprototyped hfas are passed specially. */
3652 if (hfa_mode != VOIDmode && (! cum->prototype || named))
3654 int fp_regs = cum->fp_regs;
3655 /* This is the original value of cum->words + offset. */
3656 int int_regs = cum->words - words;
3657 int hfa_size = GET_MODE_SIZE (hfa_mode);
3658 int byte_size;
3659 int args_byte_size;
3661 /* If prototyped, pass it in FR regs then GR regs.
3662 If not prototyped, pass it in both FR and GR regs.
3664 If this is an SFmode aggregate, then it is possible to run out of
3665 FR regs while GR regs are still left. In that case, we pass the
3666 remaining part in the GR regs. */
3668 /* Fill the FP regs. We do this always. We stop if we reach the end
3669 of the argument, the last FP register, or the last argument slot. */
3671 byte_size = ((mode == BLKmode)
3672 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3673 args_byte_size = int_regs * UNITS_PER_WORD;
3674 offset = 0;
3675 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
3676 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
3678 offset += hfa_size;
3679 args_byte_size += hfa_size;
3680 fp_regs++;
3683 cum->fp_regs = fp_regs;
3686 /* Integral and aggregates go in general registers. If we have run out of
3687 FR registers, then FP values must also go in general registers. This can
3688 happen when we have a SFmode HFA. */
3689 else if (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS)
3690 cum->int_regs = cum->words;
3692 /* If there is a prototype, then FP values go in a FR register when
3693 named, and in a GR register when unnamed. */
3694 else if (cum->prototype)
3696 if (! named)
3697 cum->int_regs = cum->words;
3698 else
3699 /* ??? Complex types should not reach here. */
3700 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
3702 /* If there is no prototype, then FP values go in both FR and GR
3703 registers. */
3704 else
3706 /* ??? Complex types should not reach here. */
3707 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
3708 cum->int_regs = cum->words;
3712 /* Variable sized types are passed by reference. */
3713 /* ??? At present this is a GCC extension to the IA-64 ABI. */
3716 ia64_function_arg_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
3717 enum machine_mode mode ATTRIBUTE_UNUSED,
3718 tree type, int named ATTRIBUTE_UNUSED)
3720 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
3723 /* True if it is OK to do sibling call optimization for the specified
3724 call expression EXP. DECL will be the called function, or NULL if
3725 this is an indirect call. */
3726 static bool
3727 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
3729 /* We must always return with our current GP. This means we can
3730 only sibcall to functions defined in the current module. */
3731 return decl && (*targetm.binds_local_p) (decl);
3735 /* Implement va_arg. */
3738 ia64_va_arg (tree valist, tree type)
3740 tree t;
3742 /* Variable sized types are passed by reference. */
3743 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
3745 rtx addr = force_reg (ptr_mode,
3746 std_expand_builtin_va_arg (valist, build_pointer_type (type)));
3747 #ifdef POINTERS_EXTEND_UNSIGNED
3748 addr = convert_memory_address (Pmode, addr);
3749 #endif
3750 return gen_rtx_MEM (ptr_mode, addr);
3753 /* Arguments with alignment larger than 8 bytes start at the next even
3754 boundary. */
3755 if (TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
3757 t = build (PLUS_EXPR, TREE_TYPE (valist), valist,
3758 build_int_2 (2 * UNITS_PER_WORD - 1, 0));
3759 t = build (BIT_AND_EXPR, TREE_TYPE (t), t,
3760 build_int_2 (-2 * UNITS_PER_WORD, -1));
3761 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
3762 TREE_SIDE_EFFECTS (t) = 1;
3763 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3766 return std_expand_builtin_va_arg (valist, type);
3769 /* Return 1 if function return value returned in memory. Return 0 if it is
3770 in a register. */
3773 ia64_return_in_memory (tree valtype)
3775 enum machine_mode mode;
3776 enum machine_mode hfa_mode;
3777 HOST_WIDE_INT byte_size;
3779 mode = TYPE_MODE (valtype);
3780 byte_size = GET_MODE_SIZE (mode);
3781 if (mode == BLKmode)
3783 byte_size = int_size_in_bytes (valtype);
3784 if (byte_size < 0)
3785 return 1;
3788 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
3790 hfa_mode = hfa_element_mode (valtype, 0);
3791 if (hfa_mode != VOIDmode)
3793 int hfa_size = GET_MODE_SIZE (hfa_mode);
3795 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
3796 return 1;
3797 else
3798 return 0;
3800 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
3801 return 1;
3802 else
3803 return 0;
3806 /* Return rtx for register that holds the function return value. */
3809 ia64_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
3811 enum machine_mode mode;
3812 enum machine_mode hfa_mode;
3814 mode = TYPE_MODE (valtype);
3815 hfa_mode = hfa_element_mode (valtype, 0);
3817 if (hfa_mode != VOIDmode)
3819 rtx loc[8];
3820 int i;
3821 int hfa_size;
3822 int byte_size;
3823 int offset;
3825 hfa_size = GET_MODE_SIZE (hfa_mode);
3826 byte_size = ((mode == BLKmode)
3827 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
3828 offset = 0;
3829 for (i = 0; offset < byte_size; i++)
3831 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3832 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
3833 GEN_INT (offset));
3834 offset += hfa_size;
3837 if (i == 1)
3838 return XEXP (loc[0], 0);
3839 else
3840 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
3842 else if (FLOAT_TYPE_P (valtype) && mode != TFmode)
3843 return gen_rtx_REG (mode, FR_ARG_FIRST);
3844 else
3846 if (BYTES_BIG_ENDIAN
3847 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
3849 rtx loc[8];
3850 int offset;
3851 int bytesize;
3852 int i;
3854 offset = 0;
3855 bytesize = int_size_in_bytes (valtype);
3856 for (i = 0; offset < bytesize; i++)
3858 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3859 gen_rtx_REG (DImode,
3860 GR_RET_FIRST + i),
3861 GEN_INT (offset));
3862 offset += UNITS_PER_WORD;
3864 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
3866 else
3867 return gen_rtx_REG (mode, GR_RET_FIRST);
3871 /* This is called from dwarf2out.c via ASM_OUTPUT_DWARF_DTPREL.
3872 We need to emit DTP-relative relocations. */
3874 void
3875 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
3877 if (size != 8)
3878 abort ();
3879 fputs ("\tdata8.ua\t@dtprel(", file);
3880 output_addr_const (file, x);
3881 fputs (")", file);
3884 /* Print a memory address as an operand to reference that memory location. */
3886 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
3887 also call this from ia64_print_operand for memory addresses. */
3889 void
3890 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
3891 rtx address ATTRIBUTE_UNUSED)
3895 /* Print an operand to an assembler instruction.
3896 C Swap and print a comparison operator.
3897 D Print an FP comparison operator.
3898 E Print 32 - constant, for SImode shifts as extract.
3899 e Print 64 - constant, for DImode rotates.
3900 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
3901 a floating point register emitted normally.
3902 I Invert a predicate register by adding 1.
3903 J Select the proper predicate register for a condition.
3904 j Select the inverse predicate register for a condition.
3905 O Append .acq for volatile load.
3906 P Postincrement of a MEM.
3907 Q Append .rel for volatile store.
3908 S Shift amount for shladd instruction.
3909 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
3910 for Intel assembler.
3911 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
3912 for Intel assembler.
3913 r Print register name, or constant 0 as r0. HP compatibility for
3914 Linux kernel. */
3915 void
3916 ia64_print_operand (FILE * file, rtx x, int code)
3918 const char *str;
3920 switch (code)
3922 case 0:
3923 /* Handled below. */
3924 break;
3926 case 'C':
3928 enum rtx_code c = swap_condition (GET_CODE (x));
3929 fputs (GET_RTX_NAME (c), file);
3930 return;
3933 case 'D':
3934 switch (GET_CODE (x))
3936 case NE:
3937 str = "neq";
3938 break;
3939 case UNORDERED:
3940 str = "unord";
3941 break;
3942 case ORDERED:
3943 str = "ord";
3944 break;
3945 default:
3946 str = GET_RTX_NAME (GET_CODE (x));
3947 break;
3949 fputs (str, file);
3950 return;
3952 case 'E':
3953 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
3954 return;
3956 case 'e':
3957 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
3958 return;
3960 case 'F':
3961 if (x == CONST0_RTX (GET_MODE (x)))
3962 str = reg_names [FR_REG (0)];
3963 else if (x == CONST1_RTX (GET_MODE (x)))
3964 str = reg_names [FR_REG (1)];
3965 else if (GET_CODE (x) == REG)
3966 str = reg_names [REGNO (x)];
3967 else
3968 abort ();
3969 fputs (str, file);
3970 return;
3972 case 'I':
3973 fputs (reg_names [REGNO (x) + 1], file);
3974 return;
3976 case 'J':
3977 case 'j':
3979 unsigned int regno = REGNO (XEXP (x, 0));
3980 if (GET_CODE (x) == EQ)
3981 regno += 1;
3982 if (code == 'j')
3983 regno ^= 1;
3984 fputs (reg_names [regno], file);
3986 return;
3988 case 'O':
3989 if (MEM_VOLATILE_P (x))
3990 fputs(".acq", file);
3991 return;
3993 case 'P':
3995 HOST_WIDE_INT value;
3997 switch (GET_CODE (XEXP (x, 0)))
3999 default:
4000 return;
4002 case POST_MODIFY:
4003 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
4004 if (GET_CODE (x) == CONST_INT)
4005 value = INTVAL (x);
4006 else if (GET_CODE (x) == REG)
4008 fprintf (file, ", %s", reg_names[REGNO (x)]);
4009 return;
4011 else
4012 abort ();
4013 break;
4015 case POST_INC:
4016 value = GET_MODE_SIZE (GET_MODE (x));
4017 break;
4019 case POST_DEC:
4020 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
4021 break;
4024 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
4025 return;
4028 case 'Q':
4029 if (MEM_VOLATILE_P (x))
4030 fputs(".rel", file);
4031 return;
4033 case 'S':
4034 fprintf (file, "%d", exact_log2 (INTVAL (x)));
4035 return;
4037 case 'T':
4038 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4040 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
4041 return;
4043 break;
4045 case 'U':
4046 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4048 const char *prefix = "0x";
4049 if (INTVAL (x) & 0x80000000)
4051 fprintf (file, "0xffffffff");
4052 prefix = "";
4054 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
4055 return;
4057 break;
4059 case 'r':
4060 /* If this operand is the constant zero, write it as register zero.
4061 Any register, zero, or CONST_INT value is OK here. */
4062 if (GET_CODE (x) == REG)
4063 fputs (reg_names[REGNO (x)], file);
4064 else if (x == CONST0_RTX (GET_MODE (x)))
4065 fputs ("r0", file);
4066 else if (GET_CODE (x) == CONST_INT)
4067 output_addr_const (file, x);
4068 else
4069 output_operand_lossage ("invalid %%r value");
4070 return;
4072 case '+':
4074 const char *which;
4076 /* For conditional branches, returns or calls, substitute
4077 sptk, dptk, dpnt, or spnt for %s. */
4078 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
4079 if (x)
4081 int pred_val = INTVAL (XEXP (x, 0));
4083 /* Guess top and bottom 10% statically predicted. */
4084 if (pred_val < REG_BR_PROB_BASE / 50)
4085 which = ".spnt";
4086 else if (pred_val < REG_BR_PROB_BASE / 2)
4087 which = ".dpnt";
4088 else if (pred_val < REG_BR_PROB_BASE / 100 * 98)
4089 which = ".dptk";
4090 else
4091 which = ".sptk";
4093 else if (GET_CODE (current_output_insn) == CALL_INSN)
4094 which = ".sptk";
4095 else
4096 which = ".dptk";
4098 fputs (which, file);
4099 return;
4102 case ',':
4103 x = current_insn_predicate;
4104 if (x)
4106 unsigned int regno = REGNO (XEXP (x, 0));
4107 if (GET_CODE (x) == EQ)
4108 regno += 1;
4109 fprintf (file, "(%s) ", reg_names [regno]);
4111 return;
4113 default:
4114 output_operand_lossage ("ia64_print_operand: unknown code");
4115 return;
4118 switch (GET_CODE (x))
4120 /* This happens for the spill/restore instructions. */
4121 case POST_INC:
4122 case POST_DEC:
4123 case POST_MODIFY:
4124 x = XEXP (x, 0);
4125 /* ... fall through ... */
4127 case REG:
4128 fputs (reg_names [REGNO (x)], file);
4129 break;
4131 case MEM:
4133 rtx addr = XEXP (x, 0);
4134 if (GET_RTX_CLASS (GET_CODE (addr)) == 'a')
4135 addr = XEXP (addr, 0);
4136 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
4137 break;
4140 default:
4141 output_addr_const (file, x);
4142 break;
4145 return;
4148 /* Compute a (partial) cost for rtx X. Return true if the complete
4149 cost has been computed, and false if subexpressions should be
4150 scanned. In either case, *TOTAL contains the cost result. */
4151 /* ??? This is incomplete. */
4153 static bool
4154 ia64_rtx_costs (rtx x, int code, int outer_code, int *total)
4156 switch (code)
4158 case CONST_INT:
4159 switch (outer_code)
4161 case SET:
4162 *total = CONST_OK_FOR_J (INTVAL (x)) ? 0 : COSTS_N_INSNS (1);
4163 return true;
4164 case PLUS:
4165 if (CONST_OK_FOR_I (INTVAL (x)))
4166 *total = 0;
4167 else if (CONST_OK_FOR_J (INTVAL (x)))
4168 *total = 1;
4169 else
4170 *total = COSTS_N_INSNS (1);
4171 return true;
4172 default:
4173 if (CONST_OK_FOR_K (INTVAL (x)) || CONST_OK_FOR_L (INTVAL (x)))
4174 *total = 0;
4175 else
4176 *total = COSTS_N_INSNS (1);
4177 return true;
4180 case CONST_DOUBLE:
4181 *total = COSTS_N_INSNS (1);
4182 return true;
4184 case CONST:
4185 case SYMBOL_REF:
4186 case LABEL_REF:
4187 *total = COSTS_N_INSNS (3);
4188 return true;
4190 case MULT:
4191 /* For multiplies wider than HImode, we have to go to the FPU,
4192 which normally involves copies. Plus there's the latency
4193 of the multiply itself, and the latency of the instructions to
4194 transfer integer regs to FP regs. */
4195 /* ??? Check for FP mode. */
4196 if (GET_MODE_SIZE (GET_MODE (x)) > 2)
4197 *total = COSTS_N_INSNS (10);
4198 else
4199 *total = COSTS_N_INSNS (2);
4200 return true;
4202 case PLUS:
4203 case MINUS:
4204 case ASHIFT:
4205 case ASHIFTRT:
4206 case LSHIFTRT:
4207 *total = COSTS_N_INSNS (1);
4208 return true;
4210 case DIV:
4211 case UDIV:
4212 case MOD:
4213 case UMOD:
4214 /* We make divide expensive, so that divide-by-constant will be
4215 optimized to a multiply. */
4216 *total = COSTS_N_INSNS (60);
4217 return true;
4219 default:
4220 return false;
4224 /* Calculate the cost of moving data from a register in class FROM to
4225 one in class TO, using MODE. */
4228 ia64_register_move_cost (enum machine_mode mode, enum reg_class from,
4229 enum reg_class to)
4231 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
4232 if (to == ADDL_REGS)
4233 to = GR_REGS;
4234 if (from == ADDL_REGS)
4235 from = GR_REGS;
4237 /* All costs are symmetric, so reduce cases by putting the
4238 lower number class as the destination. */
4239 if (from < to)
4241 enum reg_class tmp = to;
4242 to = from, from = tmp;
4245 /* Moving from FR<->GR in XFmode must be more expensive than 2,
4246 so that we get secondary memory reloads. Between FR_REGS,
4247 we have to make this at least as expensive as MEMORY_MOVE_COST
4248 to avoid spectacularly poor register class preferencing. */
4249 if (mode == XFmode)
4251 if (to != GR_REGS || from != GR_REGS)
4252 return MEMORY_MOVE_COST (mode, to, 0);
4253 else
4254 return 3;
4257 switch (to)
4259 case PR_REGS:
4260 /* Moving between PR registers takes two insns. */
4261 if (from == PR_REGS)
4262 return 3;
4263 /* Moving between PR and anything but GR is impossible. */
4264 if (from != GR_REGS)
4265 return MEMORY_MOVE_COST (mode, to, 0);
4266 break;
4268 case BR_REGS:
4269 /* Moving between BR and anything but GR is impossible. */
4270 if (from != GR_REGS && from != GR_AND_BR_REGS)
4271 return MEMORY_MOVE_COST (mode, to, 0);
4272 break;
4274 case AR_I_REGS:
4275 case AR_M_REGS:
4276 /* Moving between AR and anything but GR is impossible. */
4277 if (from != GR_REGS)
4278 return MEMORY_MOVE_COST (mode, to, 0);
4279 break;
4281 case GR_REGS:
4282 case FR_REGS:
4283 case GR_AND_FR_REGS:
4284 case GR_AND_BR_REGS:
4285 case ALL_REGS:
4286 break;
4288 default:
4289 abort ();
4292 return 2;
4295 /* This function returns the register class required for a secondary
4296 register when copying between one of the registers in CLASS, and X,
4297 using MODE. A return value of NO_REGS means that no secondary register
4298 is required. */
4300 enum reg_class
4301 ia64_secondary_reload_class (enum reg_class class,
4302 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
4304 int regno = -1;
4306 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
4307 regno = true_regnum (x);
4309 switch (class)
4311 case BR_REGS:
4312 case AR_M_REGS:
4313 case AR_I_REGS:
4314 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
4315 interaction. We end up with two pseudos with overlapping lifetimes
4316 both of which are equiv to the same constant, and both which need
4317 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
4318 changes depending on the path length, which means the qty_first_reg
4319 check in make_regs_eqv can give different answers at different times.
4320 At some point I'll probably need a reload_indi pattern to handle
4321 this.
4323 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
4324 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
4325 non-general registers for good measure. */
4326 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
4327 return GR_REGS;
4329 /* This is needed if a pseudo used as a call_operand gets spilled to a
4330 stack slot. */
4331 if (GET_CODE (x) == MEM)
4332 return GR_REGS;
4333 break;
4335 case FR_REGS:
4336 /* Need to go through general registers to get to other class regs. */
4337 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
4338 return GR_REGS;
4340 /* This can happen when a paradoxical subreg is an operand to the
4341 muldi3 pattern. */
4342 /* ??? This shouldn't be necessary after instruction scheduling is
4343 enabled, because paradoxical subregs are not accepted by
4344 register_operand when INSN_SCHEDULING is defined. Or alternatively,
4345 stop the paradoxical subreg stupidity in the *_operand functions
4346 in recog.c. */
4347 if (GET_CODE (x) == MEM
4348 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
4349 || GET_MODE (x) == QImode))
4350 return GR_REGS;
4352 /* This can happen because of the ior/and/etc patterns that accept FP
4353 registers as operands. If the third operand is a constant, then it
4354 needs to be reloaded into a FP register. */
4355 if (GET_CODE (x) == CONST_INT)
4356 return GR_REGS;
4358 /* This can happen because of register elimination in a muldi3 insn.
4359 E.g. `26107 * (unsigned long)&u'. */
4360 if (GET_CODE (x) == PLUS)
4361 return GR_REGS;
4362 break;
4364 case PR_REGS:
4365 /* ??? This happens if we cse/gcse a BImode value across a call,
4366 and the function has a nonlocal goto. This is because global
4367 does not allocate call crossing pseudos to hard registers when
4368 current_function_has_nonlocal_goto is true. This is relatively
4369 common for C++ programs that use exceptions. To reproduce,
4370 return NO_REGS and compile libstdc++. */
4371 if (GET_CODE (x) == MEM)
4372 return GR_REGS;
4374 /* This can happen when we take a BImode subreg of a DImode value,
4375 and that DImode value winds up in some non-GR register. */
4376 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
4377 return GR_REGS;
4378 break;
4380 case GR_REGS:
4381 /* Since we have no offsettable memory addresses, we need a temporary
4382 to hold the address of the second word. */
4383 if (mode == TImode)
4384 return GR_REGS;
4385 break;
4387 default:
4388 break;
4391 return NO_REGS;
4395 /* Emit text to declare externally defined variables and functions, because
4396 the Intel assembler does not support undefined externals. */
4398 void
4399 ia64_asm_output_external (FILE *file, tree decl, const char *name)
4401 int save_referenced;
4403 /* GNU as does not need anything here, but the HP linker does need
4404 something for external functions. */
4406 if (TARGET_GNU_AS
4407 && (!TARGET_HPUX_LD
4408 || TREE_CODE (decl) != FUNCTION_DECL
4409 || strstr (name, "__builtin_") == name))
4410 return;
4412 /* ??? The Intel assembler creates a reference that needs to be satisfied by
4413 the linker when we do this, so we need to be careful not to do this for
4414 builtin functions which have no library equivalent. Unfortunately, we
4415 can't tell here whether or not a function will actually be called by
4416 expand_expr, so we pull in library functions even if we may not need
4417 them later. */
4418 if (! strcmp (name, "__builtin_next_arg")
4419 || ! strcmp (name, "alloca")
4420 || ! strcmp (name, "__builtin_constant_p")
4421 || ! strcmp (name, "__builtin_args_info"))
4422 return;
4424 if (TARGET_HPUX_LD)
4425 ia64_hpux_add_extern_decl (name);
4426 else
4428 /* assemble_name will set TREE_SYMBOL_REFERENCED, so we must save and
4429 restore it. */
4430 save_referenced = TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl));
4431 if (TREE_CODE (decl) == FUNCTION_DECL)
4432 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
4433 (*targetm.asm_out.globalize_label) (file, name);
4434 TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)) = save_referenced;
4438 /* Parse the -mfixed-range= option string. */
4440 static void
4441 fix_range (const char *const_str)
4443 int i, first, last;
4444 char *str, *dash, *comma;
4446 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
4447 REG2 are either register names or register numbers. The effect
4448 of this option is to mark the registers in the range from REG1 to
4449 REG2 as ``fixed'' so they won't be used by the compiler. This is
4450 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
4452 i = strlen (const_str);
4453 str = (char *) alloca (i + 1);
4454 memcpy (str, const_str, i + 1);
4456 while (1)
4458 dash = strchr (str, '-');
4459 if (!dash)
4461 warning ("value of -mfixed-range must have form REG1-REG2");
4462 return;
4464 *dash = '\0';
4466 comma = strchr (dash + 1, ',');
4467 if (comma)
4468 *comma = '\0';
4470 first = decode_reg_name (str);
4471 if (first < 0)
4473 warning ("unknown register name: %s", str);
4474 return;
4477 last = decode_reg_name (dash + 1);
4478 if (last < 0)
4480 warning ("unknown register name: %s", dash + 1);
4481 return;
4484 *dash = '-';
4486 if (first > last)
4488 warning ("%s-%s is an empty range", str, dash + 1);
4489 return;
4492 for (i = first; i <= last; ++i)
4493 fixed_regs[i] = call_used_regs[i] = 1;
4495 if (!comma)
4496 break;
4498 *comma = ',';
4499 str = comma + 1;
4503 static struct machine_function *
4504 ia64_init_machine_status (void)
4506 return ggc_alloc_cleared (sizeof (struct machine_function));
4509 /* Handle TARGET_OPTIONS switches. */
4511 void
4512 ia64_override_options (void)
4514 static struct pta
4516 const char *const name; /* processor name or nickname. */
4517 const enum processor_type processor;
4519 const processor_alias_table[] =
4521 {"itanium", PROCESSOR_ITANIUM},
4522 {"itanium1", PROCESSOR_ITANIUM},
4523 {"merced", PROCESSOR_ITANIUM},
4524 {"itanium2", PROCESSOR_ITANIUM2},
4525 {"mckinley", PROCESSOR_ITANIUM2},
4528 int const pta_size = ARRAY_SIZE (processor_alias_table);
4529 int i;
4531 if (TARGET_AUTO_PIC)
4532 target_flags |= MASK_CONST_GP;
4534 if (TARGET_INLINE_FLOAT_DIV_LAT && TARGET_INLINE_FLOAT_DIV_THR)
4536 warning ("cannot optimize floating point division for both latency and throughput");
4537 target_flags &= ~MASK_INLINE_FLOAT_DIV_THR;
4540 if (TARGET_INLINE_INT_DIV_LAT && TARGET_INLINE_INT_DIV_THR)
4542 warning ("cannot optimize integer division for both latency and throughput");
4543 target_flags &= ~MASK_INLINE_INT_DIV_THR;
4546 if (TARGET_INLINE_SQRT_LAT && TARGET_INLINE_SQRT_THR)
4548 warning ("cannot optimize square root for both latency and throughput");
4549 target_flags &= ~MASK_INLINE_SQRT_THR;
4552 if (TARGET_INLINE_SQRT_LAT)
4554 warning ("not yet implemented: latency-optimized inline square root");
4555 target_flags &= ~MASK_INLINE_SQRT_LAT;
4558 if (ia64_fixed_range_string)
4559 fix_range (ia64_fixed_range_string);
4561 if (ia64_tls_size_string)
4563 char *end;
4564 unsigned long tmp = strtoul (ia64_tls_size_string, &end, 10);
4565 if (*end || (tmp != 14 && tmp != 22 && tmp != 64))
4566 error ("bad value (%s) for -mtls-size= switch", ia64_tls_size_string);
4567 else
4568 ia64_tls_size = tmp;
4571 if (!ia64_tune_string)
4572 ia64_tune_string = "itanium2";
4574 for (i = 0; i < pta_size; i++)
4575 if (! strcmp (ia64_tune_string, processor_alias_table[i].name))
4577 ia64_tune = processor_alias_table[i].processor;
4578 break;
4581 if (i == pta_size)
4582 error ("bad value (%s) for -tune= switch", ia64_tune_string);
4584 ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
4585 flag_schedule_insns_after_reload = 0;
4587 ia64_section_threshold = g_switch_set ? g_switch_value : IA64_DEFAULT_GVALUE;
4589 init_machine_status = ia64_init_machine_status;
4592 static enum attr_itanium_class ia64_safe_itanium_class (rtx);
4593 static enum attr_type ia64_safe_type (rtx);
4595 static enum attr_itanium_class
4596 ia64_safe_itanium_class (rtx insn)
4598 if (recog_memoized (insn) >= 0)
4599 return get_attr_itanium_class (insn);
4600 else
4601 return ITANIUM_CLASS_UNKNOWN;
4604 static enum attr_type
4605 ia64_safe_type (rtx insn)
4607 if (recog_memoized (insn) >= 0)
4608 return get_attr_type (insn);
4609 else
4610 return TYPE_UNKNOWN;
4613 /* The following collection of routines emit instruction group stop bits as
4614 necessary to avoid dependencies. */
4616 /* Need to track some additional registers as far as serialization is
4617 concerned so we can properly handle br.call and br.ret. We could
4618 make these registers visible to gcc, but since these registers are
4619 never explicitly used in gcc generated code, it seems wasteful to
4620 do so (plus it would make the call and return patterns needlessly
4621 complex). */
4622 #define REG_GP (GR_REG (1))
4623 #define REG_RP (BR_REG (0))
4624 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
4625 /* This is used for volatile asms which may require a stop bit immediately
4626 before and after them. */
4627 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
4628 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
4629 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
4631 /* For each register, we keep track of how it has been written in the
4632 current instruction group.
4634 If a register is written unconditionally (no qualifying predicate),
4635 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
4637 If a register is written if its qualifying predicate P is true, we
4638 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
4639 may be written again by the complement of P (P^1) and when this happens,
4640 WRITE_COUNT gets set to 2.
4642 The result of this is that whenever an insn attempts to write a register
4643 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
4645 If a predicate register is written by a floating-point insn, we set
4646 WRITTEN_BY_FP to true.
4648 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
4649 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
4651 struct reg_write_state
4653 unsigned int write_count : 2;
4654 unsigned int first_pred : 16;
4655 unsigned int written_by_fp : 1;
4656 unsigned int written_by_and : 1;
4657 unsigned int written_by_or : 1;
4660 /* Cumulative info for the current instruction group. */
4661 struct reg_write_state rws_sum[NUM_REGS];
4662 /* Info for the current instruction. This gets copied to rws_sum after a
4663 stop bit is emitted. */
4664 struct reg_write_state rws_insn[NUM_REGS];
4666 /* Indicates whether this is the first instruction after a stop bit,
4667 in which case we don't need another stop bit. Without this, we hit
4668 the abort in ia64_variable_issue when scheduling an alloc. */
4669 static int first_instruction;
4671 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
4672 RTL for one instruction. */
4673 struct reg_flags
4675 unsigned int is_write : 1; /* Is register being written? */
4676 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
4677 unsigned int is_branch : 1; /* Is register used as part of a branch? */
4678 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
4679 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
4680 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
4683 static void rws_update (struct reg_write_state *, int, struct reg_flags, int);
4684 static int rws_access_regno (int, struct reg_flags, int);
4685 static int rws_access_reg (rtx, struct reg_flags, int);
4686 static void update_set_flags (rtx, struct reg_flags *, int *, rtx *);
4687 static int set_src_needs_barrier (rtx, struct reg_flags, int, rtx);
4688 static int rtx_needs_barrier (rtx, struct reg_flags, int);
4689 static void init_insn_group_barriers (void);
4690 static int group_barrier_needed_p (rtx);
4691 static int safe_group_barrier_needed_p (rtx);
4693 /* Update *RWS for REGNO, which is being written by the current instruction,
4694 with predicate PRED, and associated register flags in FLAGS. */
4696 static void
4697 rws_update (struct reg_write_state *rws, int regno, struct reg_flags flags, int pred)
4699 if (pred)
4700 rws[regno].write_count++;
4701 else
4702 rws[regno].write_count = 2;
4703 rws[regno].written_by_fp |= flags.is_fp;
4704 /* ??? Not tracking and/or across differing predicates. */
4705 rws[regno].written_by_and = flags.is_and;
4706 rws[regno].written_by_or = flags.is_or;
4707 rws[regno].first_pred = pred;
4710 /* Handle an access to register REGNO of type FLAGS using predicate register
4711 PRED. Update rws_insn and rws_sum arrays. Return 1 if this access creates
4712 a dependency with an earlier instruction in the same group. */
4714 static int
4715 rws_access_regno (int regno, struct reg_flags flags, int pred)
4717 int need_barrier = 0;
4719 if (regno >= NUM_REGS)
4720 abort ();
4722 if (! PR_REGNO_P (regno))
4723 flags.is_and = flags.is_or = 0;
4725 if (flags.is_write)
4727 int write_count;
4729 /* One insn writes same reg multiple times? */
4730 if (rws_insn[regno].write_count > 0)
4731 abort ();
4733 /* Update info for current instruction. */
4734 rws_update (rws_insn, regno, flags, pred);
4735 write_count = rws_sum[regno].write_count;
4737 switch (write_count)
4739 case 0:
4740 /* The register has not been written yet. */
4741 rws_update (rws_sum, regno, flags, pred);
4742 break;
4744 case 1:
4745 /* The register has been written via a predicate. If this is
4746 not a complementary predicate, then we need a barrier. */
4747 /* ??? This assumes that P and P+1 are always complementary
4748 predicates for P even. */
4749 if (flags.is_and && rws_sum[regno].written_by_and)
4751 else if (flags.is_or && rws_sum[regno].written_by_or)
4753 else if ((rws_sum[regno].first_pred ^ 1) != pred)
4754 need_barrier = 1;
4755 rws_update (rws_sum, regno, flags, pred);
4756 break;
4758 case 2:
4759 /* The register has been unconditionally written already. We
4760 need a barrier. */
4761 if (flags.is_and && rws_sum[regno].written_by_and)
4763 else if (flags.is_or && rws_sum[regno].written_by_or)
4765 else
4766 need_barrier = 1;
4767 rws_sum[regno].written_by_and = flags.is_and;
4768 rws_sum[regno].written_by_or = flags.is_or;
4769 break;
4771 default:
4772 abort ();
4775 else
4777 if (flags.is_branch)
4779 /* Branches have several RAW exceptions that allow to avoid
4780 barriers. */
4782 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
4783 /* RAW dependencies on branch regs are permissible as long
4784 as the writer is a non-branch instruction. Since we
4785 never generate code that uses a branch register written
4786 by a branch instruction, handling this case is
4787 easy. */
4788 return 0;
4790 if (REGNO_REG_CLASS (regno) == PR_REGS
4791 && ! rws_sum[regno].written_by_fp)
4792 /* The predicates of a branch are available within the
4793 same insn group as long as the predicate was written by
4794 something other than a floating-point instruction. */
4795 return 0;
4798 if (flags.is_and && rws_sum[regno].written_by_and)
4799 return 0;
4800 if (flags.is_or && rws_sum[regno].written_by_or)
4801 return 0;
4803 switch (rws_sum[regno].write_count)
4805 case 0:
4806 /* The register has not been written yet. */
4807 break;
4809 case 1:
4810 /* The register has been written via a predicate. If this is
4811 not a complementary predicate, then we need a barrier. */
4812 /* ??? This assumes that P and P+1 are always complementary
4813 predicates for P even. */
4814 if ((rws_sum[regno].first_pred ^ 1) != pred)
4815 need_barrier = 1;
4816 break;
4818 case 2:
4819 /* The register has been unconditionally written already. We
4820 need a barrier. */
4821 need_barrier = 1;
4822 break;
4824 default:
4825 abort ();
4829 return need_barrier;
4832 static int
4833 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
4835 int regno = REGNO (reg);
4836 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
4838 if (n == 1)
4839 return rws_access_regno (regno, flags, pred);
4840 else
4842 int need_barrier = 0;
4843 while (--n >= 0)
4844 need_barrier |= rws_access_regno (regno + n, flags, pred);
4845 return need_barrier;
4849 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
4850 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
4852 static void
4853 update_set_flags (rtx x, struct reg_flags *pflags, int *ppred, rtx *pcond)
4855 rtx src = SET_SRC (x);
4857 *pcond = 0;
4859 switch (GET_CODE (src))
4861 case CALL:
4862 return;
4864 case IF_THEN_ELSE:
4865 if (SET_DEST (x) == pc_rtx)
4866 /* X is a conditional branch. */
4867 return;
4868 else
4870 int is_complemented = 0;
4872 /* X is a conditional move. */
4873 rtx cond = XEXP (src, 0);
4874 if (GET_CODE (cond) == EQ)
4875 is_complemented = 1;
4876 cond = XEXP (cond, 0);
4877 if (GET_CODE (cond) != REG
4878 && REGNO_REG_CLASS (REGNO (cond)) != PR_REGS)
4879 abort ();
4880 *pcond = cond;
4881 if (XEXP (src, 1) == SET_DEST (x)
4882 || XEXP (src, 2) == SET_DEST (x))
4884 /* X is a conditional move that conditionally writes the
4885 destination. */
4887 /* We need another complement in this case. */
4888 if (XEXP (src, 1) == SET_DEST (x))
4889 is_complemented = ! is_complemented;
4891 *ppred = REGNO (cond);
4892 if (is_complemented)
4893 ++*ppred;
4896 /* ??? If this is a conditional write to the dest, then this
4897 instruction does not actually read one source. This probably
4898 doesn't matter, because that source is also the dest. */
4899 /* ??? Multiple writes to predicate registers are allowed
4900 if they are all AND type compares, or if they are all OR
4901 type compares. We do not generate such instructions
4902 currently. */
4904 /* ... fall through ... */
4906 default:
4907 if (GET_RTX_CLASS (GET_CODE (src)) == '<'
4908 && GET_MODE_CLASS (GET_MODE (XEXP (src, 0))) == MODE_FLOAT)
4909 /* Set pflags->is_fp to 1 so that we know we're dealing
4910 with a floating point comparison when processing the
4911 destination of the SET. */
4912 pflags->is_fp = 1;
4914 /* Discover if this is a parallel comparison. We only handle
4915 and.orcm and or.andcm at present, since we must retain a
4916 strict inverse on the predicate pair. */
4917 else if (GET_CODE (src) == AND)
4918 pflags->is_and = 1;
4919 else if (GET_CODE (src) == IOR)
4920 pflags->is_or = 1;
4922 break;
4926 /* Subroutine of rtx_needs_barrier; this function determines whether the
4927 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
4928 are as in rtx_needs_barrier. COND is an rtx that holds the condition
4929 for this insn. */
4931 static int
4932 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred, rtx cond)
4934 int need_barrier = 0;
4935 rtx dst;
4936 rtx src = SET_SRC (x);
4938 if (GET_CODE (src) == CALL)
4939 /* We don't need to worry about the result registers that
4940 get written by subroutine call. */
4941 return rtx_needs_barrier (src, flags, pred);
4942 else if (SET_DEST (x) == pc_rtx)
4944 /* X is a conditional branch. */
4945 /* ??? This seems redundant, as the caller sets this bit for
4946 all JUMP_INSNs. */
4947 flags.is_branch = 1;
4948 return rtx_needs_barrier (src, flags, pred);
4951 need_barrier = rtx_needs_barrier (src, flags, pred);
4953 /* This instruction unconditionally uses a predicate register. */
4954 if (cond)
4955 need_barrier |= rws_access_reg (cond, flags, 0);
4957 dst = SET_DEST (x);
4958 if (GET_CODE (dst) == ZERO_EXTRACT)
4960 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
4961 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
4962 dst = XEXP (dst, 0);
4964 return need_barrier;
4967 /* Handle an access to rtx X of type FLAGS using predicate register
4968 PRED. Return 1 if this access creates a dependency with an earlier
4969 instruction in the same group. */
4971 static int
4972 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
4974 int i, j;
4975 int is_complemented = 0;
4976 int need_barrier = 0;
4977 const char *format_ptr;
4978 struct reg_flags new_flags;
4979 rtx cond = 0;
4981 if (! x)
4982 return 0;
4984 new_flags = flags;
4986 switch (GET_CODE (x))
4988 case SET:
4989 update_set_flags (x, &new_flags, &pred, &cond);
4990 need_barrier = set_src_needs_barrier (x, new_flags, pred, cond);
4991 if (GET_CODE (SET_SRC (x)) != CALL)
4993 new_flags.is_write = 1;
4994 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
4996 break;
4998 case CALL:
4999 new_flags.is_write = 0;
5000 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5002 /* Avoid multiple register writes, in case this is a pattern with
5003 multiple CALL rtx. This avoids an abort in rws_access_reg. */
5004 if (! flags.is_sibcall && ! rws_insn[REG_AR_CFM].write_count)
5006 new_flags.is_write = 1;
5007 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
5008 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
5009 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5011 break;
5013 case COND_EXEC:
5014 /* X is a predicated instruction. */
5016 cond = COND_EXEC_TEST (x);
5017 if (pred)
5018 abort ();
5019 need_barrier = rtx_needs_barrier (cond, flags, 0);
5021 if (GET_CODE (cond) == EQ)
5022 is_complemented = 1;
5023 cond = XEXP (cond, 0);
5024 if (GET_CODE (cond) != REG
5025 && REGNO_REG_CLASS (REGNO (cond)) != PR_REGS)
5026 abort ();
5027 pred = REGNO (cond);
5028 if (is_complemented)
5029 ++pred;
5031 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
5032 return need_barrier;
5034 case CLOBBER:
5035 case USE:
5036 /* Clobber & use are for earlier compiler-phases only. */
5037 break;
5039 case ASM_OPERANDS:
5040 case ASM_INPUT:
5041 /* We always emit stop bits for traditional asms. We emit stop bits
5042 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
5043 if (GET_CODE (x) != ASM_OPERANDS
5044 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
5046 /* Avoid writing the register multiple times if we have multiple
5047 asm outputs. This avoids an abort in rws_access_reg. */
5048 if (! rws_insn[REG_VOLATILE].write_count)
5050 new_flags.is_write = 1;
5051 rws_access_regno (REG_VOLATILE, new_flags, pred);
5053 return 1;
5056 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
5057 We can not just fall through here since then we would be confused
5058 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
5059 traditional asms unlike their normal usage. */
5061 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
5062 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
5063 need_barrier = 1;
5064 break;
5066 case PARALLEL:
5067 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5069 rtx pat = XVECEXP (x, 0, i);
5070 if (GET_CODE (pat) == SET)
5072 update_set_flags (pat, &new_flags, &pred, &cond);
5073 need_barrier |= set_src_needs_barrier (pat, new_flags, pred, cond);
5075 else if (GET_CODE (pat) == USE
5076 || GET_CODE (pat) == CALL
5077 || GET_CODE (pat) == ASM_OPERANDS)
5078 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5079 else if (GET_CODE (pat) != CLOBBER && GET_CODE (pat) != RETURN)
5080 abort ();
5082 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5084 rtx pat = XVECEXP (x, 0, i);
5085 if (GET_CODE (pat) == SET)
5087 if (GET_CODE (SET_SRC (pat)) != CALL)
5089 new_flags.is_write = 1;
5090 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
5091 pred);
5094 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
5095 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5097 break;
5099 case SUBREG:
5100 x = SUBREG_REG (x);
5101 /* FALLTHRU */
5102 case REG:
5103 if (REGNO (x) == AR_UNAT_REGNUM)
5105 for (i = 0; i < 64; ++i)
5106 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
5108 else
5109 need_barrier = rws_access_reg (x, flags, pred);
5110 break;
5112 case MEM:
5113 /* Find the regs used in memory address computation. */
5114 new_flags.is_write = 0;
5115 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5116 break;
5118 case CONST_INT: case CONST_DOUBLE:
5119 case SYMBOL_REF: case LABEL_REF: case CONST:
5120 break;
5122 /* Operators with side-effects. */
5123 case POST_INC: case POST_DEC:
5124 if (GET_CODE (XEXP (x, 0)) != REG)
5125 abort ();
5127 new_flags.is_write = 0;
5128 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5129 new_flags.is_write = 1;
5130 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5131 break;
5133 case POST_MODIFY:
5134 if (GET_CODE (XEXP (x, 0)) != REG)
5135 abort ();
5137 new_flags.is_write = 0;
5138 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5139 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5140 new_flags.is_write = 1;
5141 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5142 break;
5144 /* Handle common unary and binary ops for efficiency. */
5145 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
5146 case MOD: case UDIV: case UMOD: case AND: case IOR:
5147 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
5148 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
5149 case NE: case EQ: case GE: case GT: case LE:
5150 case LT: case GEU: case GTU: case LEU: case LTU:
5151 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5152 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5153 break;
5155 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
5156 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
5157 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
5158 case SQRT: case FFS: case POPCOUNT:
5159 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5160 break;
5162 case UNSPEC:
5163 switch (XINT (x, 1))
5165 case UNSPEC_LTOFF_DTPMOD:
5166 case UNSPEC_LTOFF_DTPREL:
5167 case UNSPEC_DTPREL:
5168 case UNSPEC_LTOFF_TPREL:
5169 case UNSPEC_TPREL:
5170 case UNSPEC_PRED_REL_MUTEX:
5171 case UNSPEC_PIC_CALL:
5172 case UNSPEC_MF:
5173 case UNSPEC_FETCHADD_ACQ:
5174 case UNSPEC_BSP_VALUE:
5175 case UNSPEC_FLUSHRS:
5176 case UNSPEC_BUNDLE_SELECTOR:
5177 break;
5179 case UNSPEC_GR_SPILL:
5180 case UNSPEC_GR_RESTORE:
5182 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
5183 HOST_WIDE_INT bit = (offset >> 3) & 63;
5185 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5186 new_flags.is_write = (XINT (x, 1) == 1);
5187 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
5188 new_flags, pred);
5189 break;
5192 case UNSPEC_FR_SPILL:
5193 case UNSPEC_FR_RESTORE:
5194 case UNSPEC_GETF_EXP:
5195 case UNSPEC_SETF_EXP:
5196 case UNSPEC_ADDP4:
5197 case UNSPEC_FR_SQRT_RECIP_APPROX:
5198 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5199 break;
5201 case UNSPEC_FR_RECIP_APPROX:
5202 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5203 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5204 break;
5206 case UNSPEC_CMPXCHG_ACQ:
5207 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5208 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
5209 break;
5211 default:
5212 abort ();
5214 break;
5216 case UNSPEC_VOLATILE:
5217 switch (XINT (x, 1))
5219 case UNSPECV_ALLOC:
5220 /* Alloc must always be the first instruction of a group.
5221 We force this by always returning true. */
5222 /* ??? We might get better scheduling if we explicitly check for
5223 input/local/output register dependencies, and modify the
5224 scheduler so that alloc is always reordered to the start of
5225 the current group. We could then eliminate all of the
5226 first_instruction code. */
5227 rws_access_regno (AR_PFS_REGNUM, flags, pred);
5229 new_flags.is_write = 1;
5230 rws_access_regno (REG_AR_CFM, new_flags, pred);
5231 return 1;
5233 case UNSPECV_SET_BSP:
5234 need_barrier = 1;
5235 break;
5237 case UNSPECV_BLOCKAGE:
5238 case UNSPECV_INSN_GROUP_BARRIER:
5239 case UNSPECV_BREAK:
5240 case UNSPECV_PSAC_ALL:
5241 case UNSPECV_PSAC_NORMAL:
5242 return 0;
5244 default:
5245 abort ();
5247 break;
5249 case RETURN:
5250 new_flags.is_write = 0;
5251 need_barrier = rws_access_regno (REG_RP, flags, pred);
5252 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
5254 new_flags.is_write = 1;
5255 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5256 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5257 break;
5259 default:
5260 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
5261 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5262 switch (format_ptr[i])
5264 case '0': /* unused field */
5265 case 'i': /* integer */
5266 case 'n': /* note */
5267 case 'w': /* wide integer */
5268 case 's': /* pointer to string */
5269 case 'S': /* optional pointer to string */
5270 break;
5272 case 'e':
5273 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
5274 need_barrier = 1;
5275 break;
5277 case 'E':
5278 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
5279 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
5280 need_barrier = 1;
5281 break;
5283 default:
5284 abort ();
5286 break;
5288 return need_barrier;
5291 /* Clear out the state for group_barrier_needed_p at the start of a
5292 sequence of insns. */
5294 static void
5295 init_insn_group_barriers (void)
5297 memset (rws_sum, 0, sizeof (rws_sum));
5298 first_instruction = 1;
5301 /* Given the current state, recorded by previous calls to this function,
5302 determine whether a group barrier (a stop bit) is necessary before INSN.
5303 Return nonzero if so. */
5305 static int
5306 group_barrier_needed_p (rtx insn)
5308 rtx pat;
5309 int need_barrier = 0;
5310 struct reg_flags flags;
5312 memset (&flags, 0, sizeof (flags));
5313 switch (GET_CODE (insn))
5315 case NOTE:
5316 break;
5318 case BARRIER:
5319 /* A barrier doesn't imply an instruction group boundary. */
5320 break;
5322 case CODE_LABEL:
5323 memset (rws_insn, 0, sizeof (rws_insn));
5324 return 1;
5326 case CALL_INSN:
5327 flags.is_branch = 1;
5328 flags.is_sibcall = SIBLING_CALL_P (insn);
5329 memset (rws_insn, 0, sizeof (rws_insn));
5331 /* Don't bundle a call following another call. */
5332 if ((pat = prev_active_insn (insn))
5333 && GET_CODE (pat) == CALL_INSN)
5335 need_barrier = 1;
5336 break;
5339 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
5340 break;
5342 case JUMP_INSN:
5343 flags.is_branch = 1;
5345 /* Don't bundle a jump following a call. */
5346 if ((pat = prev_active_insn (insn))
5347 && GET_CODE (pat) == CALL_INSN)
5349 need_barrier = 1;
5350 break;
5352 /* FALLTHRU */
5354 case INSN:
5355 if (GET_CODE (PATTERN (insn)) == USE
5356 || GET_CODE (PATTERN (insn)) == CLOBBER)
5357 /* Don't care about USE and CLOBBER "insns"---those are used to
5358 indicate to the optimizer that it shouldn't get rid of
5359 certain operations. */
5360 break;
5362 pat = PATTERN (insn);
5364 /* Ug. Hack hacks hacked elsewhere. */
5365 switch (recog_memoized (insn))
5367 /* We play dependency tricks with the epilogue in order
5368 to get proper schedules. Undo this for dv analysis. */
5369 case CODE_FOR_epilogue_deallocate_stack:
5370 case CODE_FOR_prologue_allocate_stack:
5371 pat = XVECEXP (pat, 0, 0);
5372 break;
5374 /* The pattern we use for br.cloop confuses the code above.
5375 The second element of the vector is representative. */
5376 case CODE_FOR_doloop_end_internal:
5377 pat = XVECEXP (pat, 0, 1);
5378 break;
5380 /* Doesn't generate code. */
5381 case CODE_FOR_pred_rel_mutex:
5382 case CODE_FOR_prologue_use:
5383 return 0;
5385 default:
5386 break;
5389 memset (rws_insn, 0, sizeof (rws_insn));
5390 need_barrier = rtx_needs_barrier (pat, flags, 0);
5392 /* Check to see if the previous instruction was a volatile
5393 asm. */
5394 if (! need_barrier)
5395 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
5396 break;
5398 default:
5399 abort ();
5402 if (first_instruction && INSN_P (insn)
5403 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
5404 && GET_CODE (PATTERN (insn)) != USE
5405 && GET_CODE (PATTERN (insn)) != CLOBBER)
5407 need_barrier = 0;
5408 first_instruction = 0;
5411 return need_barrier;
5414 /* Like group_barrier_needed_p, but do not clobber the current state. */
5416 static int
5417 safe_group_barrier_needed_p (rtx insn)
5419 struct reg_write_state rws_saved[NUM_REGS];
5420 int saved_first_instruction;
5421 int t;
5423 memcpy (rws_saved, rws_sum, NUM_REGS * sizeof *rws_saved);
5424 saved_first_instruction = first_instruction;
5426 t = group_barrier_needed_p (insn);
5428 memcpy (rws_sum, rws_saved, NUM_REGS * sizeof *rws_saved);
5429 first_instruction = saved_first_instruction;
5431 return t;
5434 /* Scan the current function and insert stop bits as necessary to
5435 eliminate dependencies. This function assumes that a final
5436 instruction scheduling pass has been run which has already
5437 inserted most of the necessary stop bits. This function only
5438 inserts new ones at basic block boundaries, since these are
5439 invisible to the scheduler. */
5441 static void
5442 emit_insn_group_barriers (FILE *dump)
5444 rtx insn;
5445 rtx last_label = 0;
5446 int insns_since_last_label = 0;
5448 init_insn_group_barriers ();
5450 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5452 if (GET_CODE (insn) == CODE_LABEL)
5454 if (insns_since_last_label)
5455 last_label = insn;
5456 insns_since_last_label = 0;
5458 else if (GET_CODE (insn) == NOTE
5459 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
5461 if (insns_since_last_label)
5462 last_label = insn;
5463 insns_since_last_label = 0;
5465 else if (GET_CODE (insn) == INSN
5466 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
5467 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
5469 init_insn_group_barriers ();
5470 last_label = 0;
5472 else if (INSN_P (insn))
5474 insns_since_last_label = 1;
5476 if (group_barrier_needed_p (insn))
5478 if (last_label)
5480 if (dump)
5481 fprintf (dump, "Emitting stop before label %d\n",
5482 INSN_UID (last_label));
5483 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
5484 insn = last_label;
5486 init_insn_group_barriers ();
5487 last_label = 0;
5494 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
5495 This function has to emit all necessary group barriers. */
5497 static void
5498 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
5500 rtx insn;
5502 init_insn_group_barriers ();
5504 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5506 if (GET_CODE (insn) == BARRIER)
5508 rtx last = prev_active_insn (insn);
5510 if (! last)
5511 continue;
5512 if (GET_CODE (last) == JUMP_INSN
5513 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
5514 last = prev_active_insn (last);
5515 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
5516 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
5518 init_insn_group_barriers ();
5520 else if (INSN_P (insn))
5522 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
5523 init_insn_group_barriers ();
5524 else if (group_barrier_needed_p (insn))
5526 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5527 init_insn_group_barriers ();
5528 group_barrier_needed_p (insn);
5535 static int errata_find_address_regs (rtx *, void *);
5536 static void errata_emit_nops (rtx);
5537 static void fixup_errata (void);
5539 /* This structure is used to track some details about the previous insns
5540 groups so we can determine if it may be necessary to insert NOPs to
5541 workaround hardware errata. */
5542 static struct group
5544 HARD_REG_SET p_reg_set;
5545 HARD_REG_SET gr_reg_conditionally_set;
5546 } last_group[2];
5548 /* Index into the last_group array. */
5549 static int group_idx;
5551 /* Called through for_each_rtx; determines if a hard register that was
5552 conditionally set in the previous group is used as an address register.
5553 It ensures that for_each_rtx returns 1 in that case. */
5554 static int
5555 errata_find_address_regs (rtx *xp, void *data ATTRIBUTE_UNUSED)
5557 rtx x = *xp;
5558 if (GET_CODE (x) != MEM)
5559 return 0;
5560 x = XEXP (x, 0);
5561 if (GET_CODE (x) == POST_MODIFY)
5562 x = XEXP (x, 0);
5563 if (GET_CODE (x) == REG)
5565 struct group *prev_group = last_group + (group_idx ^ 1);
5566 if (TEST_HARD_REG_BIT (prev_group->gr_reg_conditionally_set,
5567 REGNO (x)))
5568 return 1;
5569 return -1;
5571 return 0;
5574 /* Called for each insn; this function keeps track of the state in
5575 last_group and emits additional NOPs if necessary to work around
5576 an Itanium A/B step erratum. */
5577 static void
5578 errata_emit_nops (rtx insn)
5580 struct group *this_group = last_group + group_idx;
5581 struct group *prev_group = last_group + (group_idx ^ 1);
5582 rtx pat = PATTERN (insn);
5583 rtx cond = GET_CODE (pat) == COND_EXEC ? COND_EXEC_TEST (pat) : 0;
5584 rtx real_pat = cond ? COND_EXEC_CODE (pat) : pat;
5585 enum attr_type type;
5586 rtx set = real_pat;
5588 if (GET_CODE (real_pat) == USE
5589 || GET_CODE (real_pat) == CLOBBER
5590 || GET_CODE (real_pat) == ASM_INPUT
5591 || GET_CODE (real_pat) == ADDR_VEC
5592 || GET_CODE (real_pat) == ADDR_DIFF_VEC
5593 || asm_noperands (PATTERN (insn)) >= 0)
5594 return;
5596 /* single_set doesn't work for COND_EXEC insns, so we have to duplicate
5597 parts of it. */
5599 if (GET_CODE (set) == PARALLEL)
5601 int i;
5602 set = XVECEXP (real_pat, 0, 0);
5603 for (i = 1; i < XVECLEN (real_pat, 0); i++)
5604 if (GET_CODE (XVECEXP (real_pat, 0, i)) != USE
5605 && GET_CODE (XVECEXP (real_pat, 0, i)) != CLOBBER)
5607 set = 0;
5608 break;
5612 if (set && GET_CODE (set) != SET)
5613 set = 0;
5615 type = get_attr_type (insn);
5617 if (type == TYPE_F
5618 && set && REG_P (SET_DEST (set)) && PR_REGNO_P (REGNO (SET_DEST (set))))
5619 SET_HARD_REG_BIT (this_group->p_reg_set, REGNO (SET_DEST (set)));
5621 if ((type == TYPE_M || type == TYPE_A) && cond && set
5622 && REG_P (SET_DEST (set))
5623 && GET_CODE (SET_SRC (set)) != PLUS
5624 && GET_CODE (SET_SRC (set)) != MINUS
5625 && (GET_CODE (SET_SRC (set)) != ASHIFT
5626 || !shladd_operand (XEXP (SET_SRC (set), 1), VOIDmode))
5627 && (GET_CODE (SET_SRC (set)) != MEM
5628 || GET_CODE (XEXP (SET_SRC (set), 0)) != POST_MODIFY)
5629 && GENERAL_REGNO_P (REGNO (SET_DEST (set))))
5631 if (GET_RTX_CLASS (GET_CODE (cond)) != '<'
5632 || ! REG_P (XEXP (cond, 0)))
5633 abort ();
5635 if (TEST_HARD_REG_BIT (prev_group->p_reg_set, REGNO (XEXP (cond, 0))))
5636 SET_HARD_REG_BIT (this_group->gr_reg_conditionally_set, REGNO (SET_DEST (set)));
5638 if (for_each_rtx (&real_pat, errata_find_address_regs, NULL))
5640 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5641 emit_insn_before (gen_nop (), insn);
5642 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5643 group_idx = 0;
5644 memset (last_group, 0, sizeof last_group);
5648 /* Emit extra nops if they are required to work around hardware errata. */
5650 static void
5651 fixup_errata (void)
5653 rtx insn;
5655 if (! TARGET_B_STEP)
5656 return;
5658 group_idx = 0;
5659 memset (last_group, 0, sizeof last_group);
5661 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5663 if (!INSN_P (insn))
5664 continue;
5666 if (ia64_safe_type (insn) == TYPE_S)
5668 group_idx ^= 1;
5669 memset (last_group + group_idx, 0, sizeof last_group[group_idx]);
5671 else
5672 errata_emit_nops (insn);
5677 /* Instruction scheduling support. */
5679 #define NR_BUNDLES 10
5681 /* A list of names of all available bundles. */
5683 static const char *bundle_name [NR_BUNDLES] =
5685 ".mii",
5686 ".mmi",
5687 ".mfi",
5688 ".mmf",
5689 #if NR_BUNDLES == 10
5690 ".bbb",
5691 ".mbb",
5692 #endif
5693 ".mib",
5694 ".mmb",
5695 ".mfb",
5696 ".mlx"
5699 /* Nonzero if we should insert stop bits into the schedule. */
5701 int ia64_final_schedule = 0;
5703 /* Codes of the corresponding quieryied units: */
5705 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
5706 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
5708 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
5709 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
5711 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
5713 /* The following variable value is an insn group barrier. */
5715 static rtx dfa_stop_insn;
5717 /* The following variable value is the last issued insn. */
5719 static rtx last_scheduled_insn;
5721 /* The following variable value is size of the DFA state. */
5723 static size_t dfa_state_size;
5725 /* The following variable value is pointer to a DFA state used as
5726 temporary variable. */
5728 static state_t temp_dfa_state = NULL;
5730 /* The following variable value is DFA state after issuing the last
5731 insn. */
5733 static state_t prev_cycle_state = NULL;
5735 /* The following array element values are TRUE if the corresponding
5736 insn requires to add stop bits before it. */
5738 static char *stops_p;
5740 /* The following variable is used to set up the mentioned above array. */
5742 static int stop_before_p = 0;
5744 /* The following variable value is length of the arrays `clocks' and
5745 `add_cycles'. */
5747 static int clocks_length;
5749 /* The following array element values are cycles on which the
5750 corresponding insn will be issued. The array is used only for
5751 Itanium1. */
5753 static int *clocks;
5755 /* The following array element values are numbers of cycles should be
5756 added to improve insn scheduling for MM_insns for Itanium1. */
5758 static int *add_cycles;
5760 static rtx ia64_single_set (rtx);
5761 static void ia64_emit_insn_before (rtx, rtx);
5763 /* Map a bundle number to its pseudo-op. */
5765 const char *
5766 get_bundle_name (int b)
5768 return bundle_name[b];
5772 /* Return the maximum number of instructions a cpu can issue. */
5774 static int
5775 ia64_issue_rate (void)
5777 return 6;
5780 /* Helper function - like single_set, but look inside COND_EXEC. */
5782 static rtx
5783 ia64_single_set (rtx insn)
5785 rtx x = PATTERN (insn), ret;
5786 if (GET_CODE (x) == COND_EXEC)
5787 x = COND_EXEC_CODE (x);
5788 if (GET_CODE (x) == SET)
5789 return x;
5791 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
5792 Although they are not classical single set, the second set is there just
5793 to protect it from moving past FP-relative stack accesses. */
5794 switch (recog_memoized (insn))
5796 case CODE_FOR_prologue_allocate_stack:
5797 case CODE_FOR_epilogue_deallocate_stack:
5798 ret = XVECEXP (x, 0, 0);
5799 break;
5801 default:
5802 ret = single_set_2 (insn, x);
5803 break;
5806 return ret;
5809 /* Adjust the cost of a scheduling dependency. Return the new cost of
5810 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
5812 static int
5813 ia64_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
5815 enum attr_itanium_class dep_class;
5816 enum attr_itanium_class insn_class;
5818 if (REG_NOTE_KIND (link) != REG_DEP_OUTPUT)
5819 return cost;
5821 insn_class = ia64_safe_itanium_class (insn);
5822 dep_class = ia64_safe_itanium_class (dep_insn);
5823 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
5824 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
5825 return 0;
5827 return cost;
5830 /* Like emit_insn_before, but skip cycle_display notes.
5831 ??? When cycle display notes are implemented, update this. */
5833 static void
5834 ia64_emit_insn_before (rtx insn, rtx before)
5836 emit_insn_before (insn, before);
5839 /* The following function marks insns who produce addresses for load
5840 and store insns. Such insns will be placed into M slots because it
5841 decrease latency time for Itanium1 (see function
5842 `ia64_produce_address_p' and the DFA descriptions). */
5844 static void
5845 ia64_dependencies_evaluation_hook (rtx head, rtx tail)
5847 rtx insn, link, next, next_tail;
5849 next_tail = NEXT_INSN (tail);
5850 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
5851 if (INSN_P (insn))
5852 insn->call = 0;
5853 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
5854 if (INSN_P (insn)
5855 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
5857 for (link = INSN_DEPEND (insn); link != 0; link = XEXP (link, 1))
5859 next = XEXP (link, 0);
5860 if ((ia64_safe_itanium_class (next) == ITANIUM_CLASS_ST
5861 || ia64_safe_itanium_class (next) == ITANIUM_CLASS_STF)
5862 && ia64_st_address_bypass_p (insn, next))
5863 break;
5864 else if ((ia64_safe_itanium_class (next) == ITANIUM_CLASS_LD
5865 || ia64_safe_itanium_class (next)
5866 == ITANIUM_CLASS_FLD)
5867 && ia64_ld_address_bypass_p (insn, next))
5868 break;
5870 insn->call = link != 0;
5874 /* We're beginning a new block. Initialize data structures as necessary. */
5876 static void
5877 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
5878 int sched_verbose ATTRIBUTE_UNUSED,
5879 int max_ready ATTRIBUTE_UNUSED)
5881 #ifdef ENABLE_CHECKING
5882 rtx insn;
5884 if (reload_completed)
5885 for (insn = NEXT_INSN (current_sched_info->prev_head);
5886 insn != current_sched_info->next_tail;
5887 insn = NEXT_INSN (insn))
5888 if (SCHED_GROUP_P (insn))
5889 abort ();
5890 #endif
5891 last_scheduled_insn = NULL_RTX;
5892 init_insn_group_barriers ();
5895 /* We are about to being issuing insns for this clock cycle.
5896 Override the default sort algorithm to better slot instructions. */
5898 static int
5899 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
5900 int *pn_ready, int clock_var ATTRIBUTE_UNUSED,
5901 int reorder_type)
5903 int n_asms;
5904 int n_ready = *pn_ready;
5905 rtx *e_ready = ready + n_ready;
5906 rtx *insnp;
5908 if (sched_verbose)
5909 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
5911 if (reorder_type == 0)
5913 /* First, move all USEs, CLOBBERs and other crud out of the way. */
5914 n_asms = 0;
5915 for (insnp = ready; insnp < e_ready; insnp++)
5916 if (insnp < e_ready)
5918 rtx insn = *insnp;
5919 enum attr_type t = ia64_safe_type (insn);
5920 if (t == TYPE_UNKNOWN)
5922 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
5923 || asm_noperands (PATTERN (insn)) >= 0)
5925 rtx lowest = ready[n_asms];
5926 ready[n_asms] = insn;
5927 *insnp = lowest;
5928 n_asms++;
5930 else
5932 rtx highest = ready[n_ready - 1];
5933 ready[n_ready - 1] = insn;
5934 *insnp = highest;
5935 return 1;
5940 if (n_asms < n_ready)
5942 /* Some normal insns to process. Skip the asms. */
5943 ready += n_asms;
5944 n_ready -= n_asms;
5946 else if (n_ready > 0)
5947 return 1;
5950 if (ia64_final_schedule)
5952 int deleted = 0;
5953 int nr_need_stop = 0;
5955 for (insnp = ready; insnp < e_ready; insnp++)
5956 if (safe_group_barrier_needed_p (*insnp))
5957 nr_need_stop++;
5959 if (reorder_type == 1 && n_ready == nr_need_stop)
5960 return 0;
5961 if (reorder_type == 0)
5962 return 1;
5963 insnp = e_ready;
5964 /* Move down everything that needs a stop bit, preserving
5965 relative order. */
5966 while (insnp-- > ready + deleted)
5967 while (insnp >= ready + deleted)
5969 rtx insn = *insnp;
5970 if (! safe_group_barrier_needed_p (insn))
5971 break;
5972 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
5973 *ready = insn;
5974 deleted++;
5976 n_ready -= deleted;
5977 ready += deleted;
5980 return 1;
5983 /* We are about to being issuing insns for this clock cycle. Override
5984 the default sort algorithm to better slot instructions. */
5986 static int
5987 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
5988 int clock_var)
5990 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
5991 pn_ready, clock_var, 0);
5994 /* Like ia64_sched_reorder, but called after issuing each insn.
5995 Override the default sort algorithm to better slot instructions. */
5997 static int
5998 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
5999 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
6000 int *pn_ready, int clock_var)
6002 if (ia64_tune == PROCESSOR_ITANIUM && reload_completed && last_scheduled_insn)
6003 clocks [INSN_UID (last_scheduled_insn)] = clock_var;
6004 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
6005 clock_var, 1);
6008 /* We are about to issue INSN. Return the number of insns left on the
6009 ready queue that can be issued this cycle. */
6011 static int
6012 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
6013 int sched_verbose ATTRIBUTE_UNUSED,
6014 rtx insn ATTRIBUTE_UNUSED,
6015 int can_issue_more ATTRIBUTE_UNUSED)
6017 last_scheduled_insn = insn;
6018 memcpy (prev_cycle_state, curr_state, dfa_state_size);
6019 if (reload_completed)
6021 if (group_barrier_needed_p (insn))
6022 abort ();
6023 if (GET_CODE (insn) == CALL_INSN)
6024 init_insn_group_barriers ();
6025 stops_p [INSN_UID (insn)] = stop_before_p;
6026 stop_before_p = 0;
6028 return 1;
6031 /* We are choosing insn from the ready queue. Return nonzero if INSN
6032 can be chosen. */
6034 static int
6035 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn)
6037 if (insn == NULL_RTX || !INSN_P (insn))
6038 abort ();
6039 return (!reload_completed
6040 || !safe_group_barrier_needed_p (insn));
6043 /* The following variable value is pseudo-insn used by the DFA insn
6044 scheduler to change the DFA state when the simulated clock is
6045 increased. */
6047 static rtx dfa_pre_cycle_insn;
6049 /* We are about to being issuing INSN. Return nonzero if we can not
6050 issue it on given cycle CLOCK and return zero if we should not sort
6051 the ready queue on the next clock start. */
6053 static int
6054 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
6055 int clock, int *sort_p)
6057 int setup_clocks_p = FALSE;
6059 if (insn == NULL_RTX || !INSN_P (insn))
6060 abort ();
6061 if ((reload_completed && safe_group_barrier_needed_p (insn))
6062 || (last_scheduled_insn
6063 && (GET_CODE (last_scheduled_insn) == CALL_INSN
6064 || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6065 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
6067 init_insn_group_barriers ();
6068 if (verbose && dump)
6069 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
6070 last_clock == clock ? " + cycle advance" : "");
6071 stop_before_p = 1;
6072 if (last_clock == clock)
6074 state_transition (curr_state, dfa_stop_insn);
6075 if (TARGET_EARLY_STOP_BITS)
6076 *sort_p = (last_scheduled_insn == NULL_RTX
6077 || GET_CODE (last_scheduled_insn) != CALL_INSN);
6078 else
6079 *sort_p = 0;
6080 return 1;
6082 else if (reload_completed)
6083 setup_clocks_p = TRUE;
6084 memcpy (curr_state, prev_cycle_state, dfa_state_size);
6085 state_transition (curr_state, dfa_stop_insn);
6086 state_transition (curr_state, dfa_pre_cycle_insn);
6087 state_transition (curr_state, NULL);
6089 else if (reload_completed)
6090 setup_clocks_p = TRUE;
6091 if (setup_clocks_p && ia64_tune == PROCESSOR_ITANIUM)
6093 enum attr_itanium_class c = ia64_safe_itanium_class (insn);
6095 if (c != ITANIUM_CLASS_MMMUL && c != ITANIUM_CLASS_MMSHF)
6097 rtx link;
6098 int d = -1;
6100 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
6101 if (REG_NOTE_KIND (link) == 0)
6103 enum attr_itanium_class dep_class;
6104 rtx dep_insn = XEXP (link, 0);
6106 dep_class = ia64_safe_itanium_class (dep_insn);
6107 if ((dep_class == ITANIUM_CLASS_MMMUL
6108 || dep_class == ITANIUM_CLASS_MMSHF)
6109 && last_clock - clocks [INSN_UID (dep_insn)] < 4
6110 && (d < 0
6111 || last_clock - clocks [INSN_UID (dep_insn)] < d))
6112 d = last_clock - clocks [INSN_UID (dep_insn)];
6114 if (d >= 0)
6115 add_cycles [INSN_UID (insn)] = 3 - d;
6118 return 0;
6123 /* The following page contains abstract data `bundle states' which are
6124 used for bundling insns (inserting nops and template generation). */
6126 /* The following describes state of insn bundling. */
6128 struct bundle_state
6130 /* Unique bundle state number to identify them in the debugging
6131 output */
6132 int unique_num;
6133 rtx insn; /* corresponding insn, NULL for the 1st and the last state */
6134 /* number nops before and after the insn */
6135 short before_nops_num, after_nops_num;
6136 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
6137 insn */
6138 int cost; /* cost of the state in cycles */
6139 int accumulated_insns_num; /* number of all previous insns including
6140 nops. L is considered as 2 insns */
6141 int branch_deviation; /* deviation of previous branches from 3rd slots */
6142 struct bundle_state *next; /* next state with the same insn_num */
6143 struct bundle_state *originator; /* originator (previous insn state) */
6144 /* All bundle states are in the following chain. */
6145 struct bundle_state *allocated_states_chain;
6146 /* The DFA State after issuing the insn and the nops. */
6147 state_t dfa_state;
6150 /* The following is map insn number to the corresponding bundle state. */
6152 static struct bundle_state **index_to_bundle_states;
6154 /* The unique number of next bundle state. */
6156 static int bundle_states_num;
6158 /* All allocated bundle states are in the following chain. */
6160 static struct bundle_state *allocated_bundle_states_chain;
6162 /* All allocated but not used bundle states are in the following
6163 chain. */
6165 static struct bundle_state *free_bundle_state_chain;
6168 /* The following function returns a free bundle state. */
6170 static struct bundle_state *
6171 get_free_bundle_state (void)
6173 struct bundle_state *result;
6175 if (free_bundle_state_chain != NULL)
6177 result = free_bundle_state_chain;
6178 free_bundle_state_chain = result->next;
6180 else
6182 result = xmalloc (sizeof (struct bundle_state));
6183 result->dfa_state = xmalloc (dfa_state_size);
6184 result->allocated_states_chain = allocated_bundle_states_chain;
6185 allocated_bundle_states_chain = result;
6187 result->unique_num = bundle_states_num++;
6188 return result;
6192 /* The following function frees given bundle state. */
6194 static void
6195 free_bundle_state (struct bundle_state *state)
6197 state->next = free_bundle_state_chain;
6198 free_bundle_state_chain = state;
6201 /* Start work with abstract data `bundle states'. */
6203 static void
6204 initiate_bundle_states (void)
6206 bundle_states_num = 0;
6207 free_bundle_state_chain = NULL;
6208 allocated_bundle_states_chain = NULL;
6211 /* Finish work with abstract data `bundle states'. */
6213 static void
6214 finish_bundle_states (void)
6216 struct bundle_state *curr_state, *next_state;
6218 for (curr_state = allocated_bundle_states_chain;
6219 curr_state != NULL;
6220 curr_state = next_state)
6222 next_state = curr_state->allocated_states_chain;
6223 free (curr_state->dfa_state);
6224 free (curr_state);
6228 /* Hash table of the bundle states. The key is dfa_state and insn_num
6229 of the bundle states. */
6231 static htab_t bundle_state_table;
6233 /* The function returns hash of BUNDLE_STATE. */
6235 static unsigned
6236 bundle_state_hash (const void *bundle_state)
6238 const struct bundle_state *state = (struct bundle_state *) bundle_state;
6239 unsigned result, i;
6241 for (result = i = 0; i < dfa_state_size; i++)
6242 result += (((unsigned char *) state->dfa_state) [i]
6243 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
6244 return result + state->insn_num;
6247 /* The function returns nonzero if the bundle state keys are equal. */
6249 static int
6250 bundle_state_eq_p (const void *bundle_state_1, const void *bundle_state_2)
6252 const struct bundle_state * state1 = (struct bundle_state *) bundle_state_1;
6253 const struct bundle_state * state2 = (struct bundle_state *) bundle_state_2;
6255 return (state1->insn_num == state2->insn_num
6256 && memcmp (state1->dfa_state, state2->dfa_state,
6257 dfa_state_size) == 0);
6260 /* The function inserts the BUNDLE_STATE into the hash table. The
6261 function returns nonzero if the bundle has been inserted into the
6262 table. The table contains the best bundle state with given key. */
6264 static int
6265 insert_bundle_state (struct bundle_state *bundle_state)
6267 void **entry_ptr;
6269 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, 1);
6270 if (*entry_ptr == NULL)
6272 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
6273 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
6274 *entry_ptr = (void *) bundle_state;
6275 return TRUE;
6277 else if (bundle_state->cost < ((struct bundle_state *) *entry_ptr)->cost
6278 || (bundle_state->cost == ((struct bundle_state *) *entry_ptr)->cost
6279 && (((struct bundle_state *)*entry_ptr)->accumulated_insns_num
6280 > bundle_state->accumulated_insns_num
6281 || (((struct bundle_state *)
6282 *entry_ptr)->accumulated_insns_num
6283 == bundle_state->accumulated_insns_num
6284 && ((struct bundle_state *)
6285 *entry_ptr)->branch_deviation
6286 > bundle_state->branch_deviation))))
6289 struct bundle_state temp;
6291 temp = *(struct bundle_state *) *entry_ptr;
6292 *(struct bundle_state *) *entry_ptr = *bundle_state;
6293 ((struct bundle_state *) *entry_ptr)->next = temp.next;
6294 *bundle_state = temp;
6296 return FALSE;
6299 /* Start work with the hash table. */
6301 static void
6302 initiate_bundle_state_table (void)
6304 bundle_state_table = htab_create (50, bundle_state_hash, bundle_state_eq_p,
6305 (htab_del) 0);
6308 /* Finish work with the hash table. */
6310 static void
6311 finish_bundle_state_table (void)
6313 htab_delete (bundle_state_table);
6318 /* The following variable is a insn `nop' used to check bundle states
6319 with different number of inserted nops. */
6321 static rtx ia64_nop;
6323 /* The following function tries to issue NOPS_NUM nops for the current
6324 state without advancing processor cycle. If it failed, the
6325 function returns FALSE and frees the current state. */
6327 static int
6328 try_issue_nops (struct bundle_state *curr_state, int nops_num)
6330 int i;
6332 for (i = 0; i < nops_num; i++)
6333 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
6335 free_bundle_state (curr_state);
6336 return FALSE;
6338 return TRUE;
6341 /* The following function tries to issue INSN for the current
6342 state without advancing processor cycle. If it failed, the
6343 function returns FALSE and frees the current state. */
6345 static int
6346 try_issue_insn (struct bundle_state *curr_state, rtx insn)
6348 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
6350 free_bundle_state (curr_state);
6351 return FALSE;
6353 return TRUE;
6356 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
6357 starting with ORIGINATOR without advancing processor cycle. If
6358 TRY_BUNDLE_END_P is TRUE, the function also/only (if
6359 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
6360 If it was successful, the function creates new bundle state and
6361 insert into the hash table and into `index_to_bundle_states'. */
6363 static void
6364 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
6365 rtx insn, int try_bundle_end_p, int only_bundle_end_p)
6367 struct bundle_state *curr_state;
6369 curr_state = get_free_bundle_state ();
6370 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
6371 curr_state->insn = insn;
6372 curr_state->insn_num = originator->insn_num + 1;
6373 curr_state->cost = originator->cost;
6374 curr_state->originator = originator;
6375 curr_state->before_nops_num = before_nops_num;
6376 curr_state->after_nops_num = 0;
6377 curr_state->accumulated_insns_num
6378 = originator->accumulated_insns_num + before_nops_num;
6379 curr_state->branch_deviation = originator->branch_deviation;
6380 if (insn == NULL_RTX)
6381 abort ();
6382 else if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
6384 if (GET_MODE (insn) == TImode)
6385 abort ();
6386 if (!try_issue_nops (curr_state, before_nops_num))
6387 return;
6388 if (!try_issue_insn (curr_state, insn))
6389 return;
6390 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
6391 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
6392 && curr_state->accumulated_insns_num % 3 != 0)
6394 free_bundle_state (curr_state);
6395 return;
6398 else if (GET_MODE (insn) != TImode)
6400 if (!try_issue_nops (curr_state, before_nops_num))
6401 return;
6402 if (!try_issue_insn (curr_state, insn))
6403 return;
6404 curr_state->accumulated_insns_num++;
6405 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6406 || asm_noperands (PATTERN (insn)) >= 0)
6407 abort ();
6408 if (ia64_safe_type (insn) == TYPE_L)
6409 curr_state->accumulated_insns_num++;
6411 else
6413 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
6414 state_transition (curr_state->dfa_state, NULL);
6415 curr_state->cost++;
6416 if (!try_issue_nops (curr_state, before_nops_num))
6417 return;
6418 if (!try_issue_insn (curr_state, insn))
6419 return;
6420 curr_state->accumulated_insns_num++;
6421 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6422 || asm_noperands (PATTERN (insn)) >= 0)
6424 /* Finish bundle containing asm insn. */
6425 curr_state->after_nops_num
6426 = 3 - curr_state->accumulated_insns_num % 3;
6427 curr_state->accumulated_insns_num
6428 += 3 - curr_state->accumulated_insns_num % 3;
6430 else if (ia64_safe_type (insn) == TYPE_L)
6431 curr_state->accumulated_insns_num++;
6433 if (ia64_safe_type (insn) == TYPE_B)
6434 curr_state->branch_deviation
6435 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
6436 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
6438 if (!only_bundle_end_p && insert_bundle_state (curr_state))
6440 state_t dfa_state;
6441 struct bundle_state *curr_state1;
6442 struct bundle_state *allocated_states_chain;
6444 curr_state1 = get_free_bundle_state ();
6445 dfa_state = curr_state1->dfa_state;
6446 allocated_states_chain = curr_state1->allocated_states_chain;
6447 *curr_state1 = *curr_state;
6448 curr_state1->dfa_state = dfa_state;
6449 curr_state1->allocated_states_chain = allocated_states_chain;
6450 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
6451 dfa_state_size);
6452 curr_state = curr_state1;
6454 if (!try_issue_nops (curr_state,
6455 3 - curr_state->accumulated_insns_num % 3))
6456 return;
6457 curr_state->after_nops_num
6458 = 3 - curr_state->accumulated_insns_num % 3;
6459 curr_state->accumulated_insns_num
6460 += 3 - curr_state->accumulated_insns_num % 3;
6462 if (!insert_bundle_state (curr_state))
6463 free_bundle_state (curr_state);
6464 return;
6467 /* The following function returns position in the two window bundle
6468 for given STATE. */
6470 static int
6471 get_max_pos (state_t state)
6473 if (cpu_unit_reservation_p (state, pos_6))
6474 return 6;
6475 else if (cpu_unit_reservation_p (state, pos_5))
6476 return 5;
6477 else if (cpu_unit_reservation_p (state, pos_4))
6478 return 4;
6479 else if (cpu_unit_reservation_p (state, pos_3))
6480 return 3;
6481 else if (cpu_unit_reservation_p (state, pos_2))
6482 return 2;
6483 else if (cpu_unit_reservation_p (state, pos_1))
6484 return 1;
6485 else
6486 return 0;
6489 /* The function returns code of a possible template for given position
6490 and state. The function should be called only with 2 values of
6491 position equal to 3 or 6. */
6493 static int
6494 get_template (state_t state, int pos)
6496 switch (pos)
6498 case 3:
6499 if (cpu_unit_reservation_p (state, _0mii_))
6500 return 0;
6501 else if (cpu_unit_reservation_p (state, _0mmi_))
6502 return 1;
6503 else if (cpu_unit_reservation_p (state, _0mfi_))
6504 return 2;
6505 else if (cpu_unit_reservation_p (state, _0mmf_))
6506 return 3;
6507 else if (cpu_unit_reservation_p (state, _0bbb_))
6508 return 4;
6509 else if (cpu_unit_reservation_p (state, _0mbb_))
6510 return 5;
6511 else if (cpu_unit_reservation_p (state, _0mib_))
6512 return 6;
6513 else if (cpu_unit_reservation_p (state, _0mmb_))
6514 return 7;
6515 else if (cpu_unit_reservation_p (state, _0mfb_))
6516 return 8;
6517 else if (cpu_unit_reservation_p (state, _0mlx_))
6518 return 9;
6519 else
6520 abort ();
6521 case 6:
6522 if (cpu_unit_reservation_p (state, _1mii_))
6523 return 0;
6524 else if (cpu_unit_reservation_p (state, _1mmi_))
6525 return 1;
6526 else if (cpu_unit_reservation_p (state, _1mfi_))
6527 return 2;
6528 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
6529 return 3;
6530 else if (cpu_unit_reservation_p (state, _1bbb_))
6531 return 4;
6532 else if (cpu_unit_reservation_p (state, _1mbb_))
6533 return 5;
6534 else if (cpu_unit_reservation_p (state, _1mib_))
6535 return 6;
6536 else if (cpu_unit_reservation_p (state, _1mmb_))
6537 return 7;
6538 else if (cpu_unit_reservation_p (state, _1mfb_))
6539 return 8;
6540 else if (cpu_unit_reservation_p (state, _1mlx_))
6541 return 9;
6542 else
6543 abort ();
6544 default:
6545 abort ();
6549 /* The following function returns an insn important for insn bundling
6550 followed by INSN and before TAIL. */
6552 static rtx
6553 get_next_important_insn (rtx insn, rtx tail)
6555 for (; insn && insn != tail; insn = NEXT_INSN (insn))
6556 if (INSN_P (insn)
6557 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
6558 && GET_CODE (PATTERN (insn)) != USE
6559 && GET_CODE (PATTERN (insn)) != CLOBBER)
6560 return insn;
6561 return NULL_RTX;
6564 /* The following function does insn bundling. Bundling means
6565 inserting templates and nop insns to fit insn groups into permitted
6566 templates. Instruction scheduling uses NDFA (non-deterministic
6567 finite automata) encoding informations about the templates and the
6568 inserted nops. Nondeterminism of the automata permits follows
6569 all possible insn sequences very fast.
6571 Unfortunately it is not possible to get information about inserting
6572 nop insns and used templates from the automata states. The
6573 automata only says that we can issue an insn possibly inserting
6574 some nops before it and using some template. Therefore insn
6575 bundling in this function is implemented by using DFA
6576 (deterministic finite automata). We follows all possible insn
6577 sequences by inserting 0-2 nops (that is what the NDFA describe for
6578 insn scheduling) before/after each insn being bundled. We know the
6579 start of simulated processor cycle from insn scheduling (insn
6580 starting a new cycle has TImode).
6582 Simple implementation of insn bundling would create enormous
6583 number of possible insn sequences satisfying information about new
6584 cycle ticks taken from the insn scheduling. To make the algorithm
6585 practical we use dynamic programming. Each decision (about
6586 inserting nops and implicitly about previous decisions) is described
6587 by structure bundle_state (see above). If we generate the same
6588 bundle state (key is automaton state after issuing the insns and
6589 nops for it), we reuse already generated one. As consequence we
6590 reject some decisions which can not improve the solution and
6591 reduce memory for the algorithm.
6593 When we reach the end of EBB (extended basic block), we choose the
6594 best sequence and then, moving back in EBB, insert templates for
6595 the best alternative. The templates are taken from querying
6596 automaton state for each insn in chosen bundle states.
6598 So the algorithm makes two (forward and backward) passes through
6599 EBB. There is an additional forward pass through EBB for Itanium1
6600 processor. This pass inserts more nops to make dependency between
6601 a producer insn and MMMUL/MMSHF at least 4 cycles long. */
6603 static void
6604 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
6606 struct bundle_state *curr_state, *next_state, *best_state;
6607 rtx insn, next_insn;
6608 int insn_num;
6609 int i, bundle_end_p, only_bundle_end_p, asm_p;
6610 int pos = 0, max_pos, template0, template1;
6611 rtx b;
6612 rtx nop;
6613 enum attr_type type;
6615 insn_num = 0;
6616 /* Count insns in the EBB. */
6617 for (insn = NEXT_INSN (prev_head_insn);
6618 insn && insn != tail;
6619 insn = NEXT_INSN (insn))
6620 if (INSN_P (insn))
6621 insn_num++;
6622 if (insn_num == 0)
6623 return;
6624 bundling_p = 1;
6625 dfa_clean_insn_cache ();
6626 initiate_bundle_state_table ();
6627 index_to_bundle_states = xmalloc ((insn_num + 2)
6628 * sizeof (struct bundle_state *));
6629 /* First (forward) pass -- generation of bundle states. */
6630 curr_state = get_free_bundle_state ();
6631 curr_state->insn = NULL;
6632 curr_state->before_nops_num = 0;
6633 curr_state->after_nops_num = 0;
6634 curr_state->insn_num = 0;
6635 curr_state->cost = 0;
6636 curr_state->accumulated_insns_num = 0;
6637 curr_state->branch_deviation = 0;
6638 curr_state->next = NULL;
6639 curr_state->originator = NULL;
6640 state_reset (curr_state->dfa_state);
6641 index_to_bundle_states [0] = curr_state;
6642 insn_num = 0;
6643 /* Shift cycle mark if it is put on insn which could be ignored. */
6644 for (insn = NEXT_INSN (prev_head_insn);
6645 insn != tail;
6646 insn = NEXT_INSN (insn))
6647 if (INSN_P (insn)
6648 && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
6649 || GET_CODE (PATTERN (insn)) == USE
6650 || GET_CODE (PATTERN (insn)) == CLOBBER)
6651 && GET_MODE (insn) == TImode)
6653 PUT_MODE (insn, VOIDmode);
6654 for (next_insn = NEXT_INSN (insn);
6655 next_insn != tail;
6656 next_insn = NEXT_INSN (next_insn))
6657 if (INSN_P (next_insn)
6658 && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
6659 && GET_CODE (PATTERN (next_insn)) != USE
6660 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
6662 PUT_MODE (next_insn, TImode);
6663 break;
6666 /* Froward pass: generation of bundle states. */
6667 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
6668 insn != NULL_RTX;
6669 insn = next_insn)
6671 if (!INSN_P (insn)
6672 || ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
6673 || GET_CODE (PATTERN (insn)) == USE
6674 || GET_CODE (PATTERN (insn)) == CLOBBER)
6675 abort ();
6676 type = ia64_safe_type (insn);
6677 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
6678 insn_num++;
6679 index_to_bundle_states [insn_num] = NULL;
6680 for (curr_state = index_to_bundle_states [insn_num - 1];
6681 curr_state != NULL;
6682 curr_state = next_state)
6684 pos = curr_state->accumulated_insns_num % 3;
6685 next_state = curr_state->next;
6686 /* We must fill up the current bundle in order to start a
6687 subsequent asm insn in a new bundle. Asm insn is always
6688 placed in a separate bundle. */
6689 only_bundle_end_p
6690 = (next_insn != NULL_RTX
6691 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
6692 && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
6693 /* We may fill up the current bundle if it is the cycle end
6694 without a group barrier. */
6695 bundle_end_p
6696 = (only_bundle_end_p || next_insn == NULL_RTX
6697 || (GET_MODE (next_insn) == TImode
6698 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
6699 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
6700 || type == TYPE_S
6701 /* We need to insert 2 nops for cases like M_MII. To
6702 guarantee issuing all insns on the same cycle for
6703 Itanium 1, we need to issue 2 nops after the first M
6704 insn (MnnMII where n is a nop insn). */
6705 || (type == TYPE_M && ia64_tune == PROCESSOR_ITANIUM
6706 && !bundle_end_p && pos == 1))
6707 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
6708 only_bundle_end_p);
6709 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
6710 only_bundle_end_p);
6711 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
6712 only_bundle_end_p);
6714 if (index_to_bundle_states [insn_num] == NULL)
6715 abort ();
6716 for (curr_state = index_to_bundle_states [insn_num];
6717 curr_state != NULL;
6718 curr_state = curr_state->next)
6719 if (verbose >= 2 && dump)
6721 /* This structure is taken from generated code of the
6722 pipeline hazard recognizer (see file insn-attrtab.c).
6723 Please don't forget to change the structure if a new
6724 automaton is added to .md file. */
6725 struct DFA_chip
6727 unsigned short one_automaton_state;
6728 unsigned short oneb_automaton_state;
6729 unsigned short two_automaton_state;
6730 unsigned short twob_automaton_state;
6733 fprintf
6734 (dump,
6735 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
6736 curr_state->unique_num,
6737 (curr_state->originator == NULL
6738 ? -1 : curr_state->originator->unique_num),
6739 curr_state->cost,
6740 curr_state->before_nops_num, curr_state->after_nops_num,
6741 curr_state->accumulated_insns_num, curr_state->branch_deviation,
6742 (ia64_tune == PROCESSOR_ITANIUM
6743 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
6744 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
6745 INSN_UID (insn));
6748 if (index_to_bundle_states [insn_num] == NULL)
6749 /* We should find a solution because the 2nd insn scheduling has
6750 found one. */
6751 abort ();
6752 /* Find a state corresponding to the best insn sequence. */
6753 best_state = NULL;
6754 for (curr_state = index_to_bundle_states [insn_num];
6755 curr_state != NULL;
6756 curr_state = curr_state->next)
6757 /* We are just looking at the states with fully filled up last
6758 bundle. The first we prefer insn sequences with minimal cost
6759 then with minimal inserted nops and finally with branch insns
6760 placed in the 3rd slots. */
6761 if (curr_state->accumulated_insns_num % 3 == 0
6762 && (best_state == NULL || best_state->cost > curr_state->cost
6763 || (best_state->cost == curr_state->cost
6764 && (curr_state->accumulated_insns_num
6765 < best_state->accumulated_insns_num
6766 || (curr_state->accumulated_insns_num
6767 == best_state->accumulated_insns_num
6768 && curr_state->branch_deviation
6769 < best_state->branch_deviation)))))
6770 best_state = curr_state;
6771 /* Second (backward) pass: adding nops and templates. */
6772 insn_num = best_state->before_nops_num;
6773 template0 = template1 = -1;
6774 for (curr_state = best_state;
6775 curr_state->originator != NULL;
6776 curr_state = curr_state->originator)
6778 insn = curr_state->insn;
6779 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
6780 || asm_noperands (PATTERN (insn)) >= 0);
6781 insn_num++;
6782 if (verbose >= 2 && dump)
6784 struct DFA_chip
6786 unsigned short one_automaton_state;
6787 unsigned short oneb_automaton_state;
6788 unsigned short two_automaton_state;
6789 unsigned short twob_automaton_state;
6792 fprintf
6793 (dump,
6794 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
6795 curr_state->unique_num,
6796 (curr_state->originator == NULL
6797 ? -1 : curr_state->originator->unique_num),
6798 curr_state->cost,
6799 curr_state->before_nops_num, curr_state->after_nops_num,
6800 curr_state->accumulated_insns_num, curr_state->branch_deviation,
6801 (ia64_tune == PROCESSOR_ITANIUM
6802 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
6803 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
6804 INSN_UID (insn));
6806 /* Find the position in the current bundle window. The window can
6807 contain at most two bundles. Two bundle window means that
6808 the processor will make two bundle rotation. */
6809 max_pos = get_max_pos (curr_state->dfa_state);
6810 if (max_pos == 6
6811 /* The following (negative template number) means that the
6812 processor did one bundle rotation. */
6813 || (max_pos == 3 && template0 < 0))
6815 /* We are at the end of the window -- find template(s) for
6816 its bundle(s). */
6817 pos = max_pos;
6818 if (max_pos == 3)
6819 template0 = get_template (curr_state->dfa_state, 3);
6820 else
6822 template1 = get_template (curr_state->dfa_state, 3);
6823 template0 = get_template (curr_state->dfa_state, 6);
6826 if (max_pos > 3 && template1 < 0)
6827 /* It may happen when we have the stop inside a bundle. */
6829 if (pos > 3)
6830 abort ();
6831 template1 = get_template (curr_state->dfa_state, 3);
6832 pos += 3;
6834 if (!asm_p)
6835 /* Emit nops after the current insn. */
6836 for (i = 0; i < curr_state->after_nops_num; i++)
6838 nop = gen_nop ();
6839 emit_insn_after (nop, insn);
6840 pos--;
6841 if (pos < 0)
6842 abort ();
6843 if (pos % 3 == 0)
6845 /* We are at the start of a bundle: emit the template
6846 (it should be defined). */
6847 if (template0 < 0)
6848 abort ();
6849 b = gen_bundle_selector (GEN_INT (template0));
6850 ia64_emit_insn_before (b, nop);
6851 /* If we have two bundle window, we make one bundle
6852 rotation. Otherwise template0 will be undefined
6853 (negative value). */
6854 template0 = template1;
6855 template1 = -1;
6858 /* Move the position backward in the window. Group barrier has
6859 no slot. Asm insn takes all bundle. */
6860 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
6861 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6862 && asm_noperands (PATTERN (insn)) < 0)
6863 pos--;
6864 /* Long insn takes 2 slots. */
6865 if (ia64_safe_type (insn) == TYPE_L)
6866 pos--;
6867 if (pos < 0)
6868 abort ();
6869 if (pos % 3 == 0
6870 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
6871 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6872 && asm_noperands (PATTERN (insn)) < 0)
6874 /* The current insn is at the bundle start: emit the
6875 template. */
6876 if (template0 < 0)
6877 abort ();
6878 b = gen_bundle_selector (GEN_INT (template0));
6879 ia64_emit_insn_before (b, insn);
6880 b = PREV_INSN (insn);
6881 insn = b;
6882 /* See comment above in analogous place for emiting nops
6883 after the insn. */
6884 template0 = template1;
6885 template1 = -1;
6887 /* Emit nops after the current insn. */
6888 for (i = 0; i < curr_state->before_nops_num; i++)
6890 nop = gen_nop ();
6891 ia64_emit_insn_before (nop, insn);
6892 nop = PREV_INSN (insn);
6893 insn = nop;
6894 pos--;
6895 if (pos < 0)
6896 abort ();
6897 if (pos % 3 == 0)
6899 /* See comment above in analogous place for emiting nops
6900 after the insn. */
6901 if (template0 < 0)
6902 abort ();
6903 b = gen_bundle_selector (GEN_INT (template0));
6904 ia64_emit_insn_before (b, insn);
6905 b = PREV_INSN (insn);
6906 insn = b;
6907 template0 = template1;
6908 template1 = -1;
6912 if (ia64_tune == PROCESSOR_ITANIUM)
6913 /* Insert additional cycles for MM-insns (MMMUL and MMSHF).
6914 Itanium1 has a strange design, if the distance between an insn
6915 and dependent MM-insn is less 4 then we have a 6 additional
6916 cycles stall. So we make the distance equal to 4 cycles if it
6917 is less. */
6918 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
6919 insn != NULL_RTX;
6920 insn = next_insn)
6922 if (!INSN_P (insn)
6923 || ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
6924 || GET_CODE (PATTERN (insn)) == USE
6925 || GET_CODE (PATTERN (insn)) == CLOBBER)
6926 abort ();
6927 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
6928 if (INSN_UID (insn) < clocks_length && add_cycles [INSN_UID (insn)])
6929 /* We found a MM-insn which needs additional cycles. */
6931 rtx last;
6932 int i, j, n;
6933 int pred_stop_p;
6935 /* Now we are searching for a template of the bundle in
6936 which the MM-insn is placed and the position of the
6937 insn in the bundle (0, 1, 2). Also we are searching
6938 for that there is a stop before the insn. */
6939 last = prev_active_insn (insn);
6940 pred_stop_p = recog_memoized (last) == CODE_FOR_insn_group_barrier;
6941 if (pred_stop_p)
6942 last = prev_active_insn (last);
6943 n = 0;
6944 for (;; last = prev_active_insn (last))
6945 if (recog_memoized (last) == CODE_FOR_bundle_selector)
6947 template0 = XINT (XVECEXP (PATTERN (last), 0, 0), 0);
6948 if (template0 == 9)
6949 /* The insn is in MLX bundle. Change the template
6950 onto MFI because we will add nops before the
6951 insn. It simplifies subsequent code a lot. */
6952 PATTERN (last)
6953 = gen_bundle_selector (GEN_INT (2)); /* -> MFI */
6954 break;
6956 else if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
6957 n++;
6958 /* Some check of correctness: the stop is not at the
6959 bundle start, there are no more 3 insns in the bundle,
6960 and the MM-insn is not at the start of bundle with
6961 template MLX. */
6962 if ((pred_stop_p && n == 0) || n > 2
6963 || (template0 == 9 && n != 0))
6964 abort ();
6965 /* Put nops after the insn in the bundle. */
6966 for (j = 3 - n; j > 0; j --)
6967 ia64_emit_insn_before (gen_nop (), insn);
6968 /* It takes into account that we will add more N nops
6969 before the insn lately -- please see code below. */
6970 add_cycles [INSN_UID (insn)]--;
6971 if (!pred_stop_p || add_cycles [INSN_UID (insn)])
6972 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
6973 insn);
6974 if (pred_stop_p)
6975 add_cycles [INSN_UID (insn)]--;
6976 for (i = add_cycles [INSN_UID (insn)]; i > 0; i--)
6978 /* Insert "MII;" template. */
6979 ia64_emit_insn_before (gen_bundle_selector (GEN_INT (0)),
6980 insn);
6981 ia64_emit_insn_before (gen_nop (), insn);
6982 ia64_emit_insn_before (gen_nop (), insn);
6983 if (i > 1)
6985 /* To decrease code size, we use "MI;I;"
6986 template. */
6987 ia64_emit_insn_before
6988 (gen_insn_group_barrier (GEN_INT (3)), insn);
6989 i--;
6991 ia64_emit_insn_before (gen_nop (), insn);
6992 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
6993 insn);
6995 /* Put the MM-insn in the same slot of a bundle with the
6996 same template as the original one. */
6997 ia64_emit_insn_before (gen_bundle_selector (GEN_INT (template0)),
6998 insn);
6999 /* To put the insn in the same slot, add necessary number
7000 of nops. */
7001 for (j = n; j > 0; j --)
7002 ia64_emit_insn_before (gen_nop (), insn);
7003 /* Put the stop if the original bundle had it. */
7004 if (pred_stop_p)
7005 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7006 insn);
7009 free (index_to_bundle_states);
7010 finish_bundle_state_table ();
7011 bundling_p = 0;
7012 dfa_clean_insn_cache ();
7015 /* The following function is called at the end of scheduling BB or
7016 EBB. After reload, it inserts stop bits and does insn bundling. */
7018 static void
7019 ia64_sched_finish (FILE *dump, int sched_verbose)
7021 if (sched_verbose)
7022 fprintf (dump, "// Finishing schedule.\n");
7023 if (!reload_completed)
7024 return;
7025 if (reload_completed)
7027 final_emit_insn_group_barriers (dump);
7028 bundling (dump, sched_verbose, current_sched_info->prev_head,
7029 current_sched_info->next_tail);
7030 if (sched_verbose && dump)
7031 fprintf (dump, "// finishing %d-%d\n",
7032 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
7033 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
7035 return;
7039 /* The following function inserts stop bits in scheduled BB or EBB. */
7041 static void
7042 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
7044 rtx insn;
7045 int need_barrier_p = 0;
7046 rtx prev_insn = NULL_RTX;
7048 init_insn_group_barriers ();
7050 for (insn = NEXT_INSN (current_sched_info->prev_head);
7051 insn != current_sched_info->next_tail;
7052 insn = NEXT_INSN (insn))
7054 if (GET_CODE (insn) == BARRIER)
7056 rtx last = prev_active_insn (insn);
7058 if (! last)
7059 continue;
7060 if (GET_CODE (last) == JUMP_INSN
7061 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
7062 last = prev_active_insn (last);
7063 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
7064 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
7066 init_insn_group_barriers ();
7067 need_barrier_p = 0;
7068 prev_insn = NULL_RTX;
7070 else if (INSN_P (insn))
7072 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
7074 init_insn_group_barriers ();
7075 need_barrier_p = 0;
7076 prev_insn = NULL_RTX;
7078 else if (need_barrier_p || group_barrier_needed_p (insn))
7080 if (TARGET_EARLY_STOP_BITS)
7082 rtx last;
7084 for (last = insn;
7085 last != current_sched_info->prev_head;
7086 last = PREV_INSN (last))
7087 if (INSN_P (last) && GET_MODE (last) == TImode
7088 && stops_p [INSN_UID (last)])
7089 break;
7090 if (last == current_sched_info->prev_head)
7091 last = insn;
7092 last = prev_active_insn (last);
7093 if (last
7094 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
7095 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
7096 last);
7097 init_insn_group_barriers ();
7098 for (last = NEXT_INSN (last);
7099 last != insn;
7100 last = NEXT_INSN (last))
7101 if (INSN_P (last))
7102 group_barrier_needed_p (last);
7104 else
7106 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7107 insn);
7108 init_insn_group_barriers ();
7110 group_barrier_needed_p (insn);
7111 prev_insn = NULL_RTX;
7113 else if (recog_memoized (insn) >= 0)
7114 prev_insn = insn;
7115 need_barrier_p = (GET_CODE (insn) == CALL_INSN
7116 || GET_CODE (PATTERN (insn)) == ASM_INPUT
7117 || asm_noperands (PATTERN (insn)) >= 0);
7124 /* If the following function returns TRUE, we will use the the DFA
7125 insn scheduler. */
7127 static int
7128 ia64_use_dfa_pipeline_interface (void)
7130 return 1;
7133 /* If the following function returns TRUE, we will use the the DFA
7134 insn scheduler. */
7136 static int
7137 ia64_first_cycle_multipass_dfa_lookahead (void)
7139 return (reload_completed ? 6 : 4);
7142 /* The following function initiates variable `dfa_pre_cycle_insn'. */
7144 static void
7145 ia64_init_dfa_pre_cycle_insn (void)
7147 if (temp_dfa_state == NULL)
7149 dfa_state_size = state_size ();
7150 temp_dfa_state = xmalloc (dfa_state_size);
7151 prev_cycle_state = xmalloc (dfa_state_size);
7153 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
7154 PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
7155 recog_memoized (dfa_pre_cycle_insn);
7156 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
7157 PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX;
7158 recog_memoized (dfa_stop_insn);
7161 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
7162 used by the DFA insn scheduler. */
7164 static rtx
7165 ia64_dfa_pre_cycle_insn (void)
7167 return dfa_pre_cycle_insn;
7170 /* The following function returns TRUE if PRODUCER (of type ilog or
7171 ld) produces address for CONSUMER (of type st or stf). */
7174 ia64_st_address_bypass_p (rtx producer, rtx consumer)
7176 rtx dest, reg, mem;
7178 if (producer == NULL_RTX || consumer == NULL_RTX)
7179 abort ();
7180 dest = ia64_single_set (producer);
7181 if (dest == NULL_RTX || (reg = SET_DEST (dest)) == NULL_RTX
7182 || (GET_CODE (reg) != REG && GET_CODE (reg) != SUBREG))
7183 abort ();
7184 if (GET_CODE (reg) == SUBREG)
7185 reg = SUBREG_REG (reg);
7186 dest = ia64_single_set (consumer);
7187 if (dest == NULL_RTX || (mem = SET_DEST (dest)) == NULL_RTX
7188 || GET_CODE (mem) != MEM)
7189 abort ();
7190 return reg_mentioned_p (reg, mem);
7193 /* The following function returns TRUE if PRODUCER (of type ilog or
7194 ld) produces address for CONSUMER (of type ld or fld). */
7197 ia64_ld_address_bypass_p (rtx producer, rtx consumer)
7199 rtx dest, src, reg, mem;
7201 if (producer == NULL_RTX || consumer == NULL_RTX)
7202 abort ();
7203 dest = ia64_single_set (producer);
7204 if (dest == NULL_RTX || (reg = SET_DEST (dest)) == NULL_RTX
7205 || (GET_CODE (reg) != REG && GET_CODE (reg) != SUBREG))
7206 abort ();
7207 if (GET_CODE (reg) == SUBREG)
7208 reg = SUBREG_REG (reg);
7209 src = ia64_single_set (consumer);
7210 if (src == NULL_RTX || (mem = SET_SRC (src)) == NULL_RTX)
7211 abort ();
7212 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
7213 mem = XVECEXP (mem, 0, 0);
7214 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
7215 mem = XEXP (mem, 0);
7217 /* Note that LO_SUM is used for GOT loads. */
7218 if (GET_CODE (mem) != LO_SUM && GET_CODE (mem) != MEM)
7219 abort ();
7221 return reg_mentioned_p (reg, mem);
7224 /* The following function returns TRUE if INSN produces address for a
7225 load/store insn. We will place such insns into M slot because it
7226 decreases its latency time. */
7229 ia64_produce_address_p (rtx insn)
7231 return insn->call;
7235 /* Emit pseudo-ops for the assembler to describe predicate relations.
7236 At present this assumes that we only consider predicate pairs to
7237 be mutex, and that the assembler can deduce proper values from
7238 straight-line code. */
7240 static void
7241 emit_predicate_relation_info (void)
7243 basic_block bb;
7245 FOR_EACH_BB_REVERSE (bb)
7247 int r;
7248 rtx head = BB_HEAD (bb);
7250 /* We only need such notes at code labels. */
7251 if (GET_CODE (head) != CODE_LABEL)
7252 continue;
7253 if (GET_CODE (NEXT_INSN (head)) == NOTE
7254 && NOTE_LINE_NUMBER (NEXT_INSN (head)) == NOTE_INSN_BASIC_BLOCK)
7255 head = NEXT_INSN (head);
7257 for (r = PR_REG (0); r < PR_REG (64); r += 2)
7258 if (REGNO_REG_SET_P (bb->global_live_at_start, r))
7260 rtx p = gen_rtx_REG (BImode, r);
7261 rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
7262 if (head == BB_END (bb))
7263 BB_END (bb) = n;
7264 head = n;
7268 /* Look for conditional calls that do not return, and protect predicate
7269 relations around them. Otherwise the assembler will assume the call
7270 returns, and complain about uses of call-clobbered predicates after
7271 the call. */
7272 FOR_EACH_BB_REVERSE (bb)
7274 rtx insn = BB_HEAD (bb);
7276 while (1)
7278 if (GET_CODE (insn) == CALL_INSN
7279 && GET_CODE (PATTERN (insn)) == COND_EXEC
7280 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
7282 rtx b = emit_insn_before (gen_safe_across_calls_all (), insn);
7283 rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn);
7284 if (BB_HEAD (bb) == insn)
7285 BB_HEAD (bb) = b;
7286 if (BB_END (bb) == insn)
7287 BB_END (bb) = a;
7290 if (insn == BB_END (bb))
7291 break;
7292 insn = NEXT_INSN (insn);
7297 /* Perform machine dependent operations on the rtl chain INSNS. */
7299 static void
7300 ia64_reorg (void)
7302 /* We are freeing block_for_insn in the toplev to keep compatibility
7303 with old MDEP_REORGS that are not CFG based. Recompute it now. */
7304 compute_bb_for_insn ();
7306 /* If optimizing, we'll have split before scheduling. */
7307 if (optimize == 0)
7308 split_all_insns (0);
7310 /* ??? update_life_info_in_dirty_blocks fails to terminate during
7311 non-optimizing bootstrap. */
7312 update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES, PROP_DEATH_NOTES);
7314 if (ia64_flag_schedule_insns2)
7316 timevar_push (TV_SCHED2);
7317 ia64_final_schedule = 1;
7319 initiate_bundle_states ();
7320 ia64_nop = make_insn_raw (gen_nop ());
7321 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
7322 recog_memoized (ia64_nop);
7323 clocks_length = get_max_uid () + 1;
7324 stops_p = xcalloc (1, clocks_length);
7325 if (ia64_tune == PROCESSOR_ITANIUM)
7327 clocks = xcalloc (clocks_length, sizeof (int));
7328 add_cycles = xcalloc (clocks_length, sizeof (int));
7330 if (ia64_tune == PROCESSOR_ITANIUM2)
7332 pos_1 = get_cpu_unit_code ("2_1");
7333 pos_2 = get_cpu_unit_code ("2_2");
7334 pos_3 = get_cpu_unit_code ("2_3");
7335 pos_4 = get_cpu_unit_code ("2_4");
7336 pos_5 = get_cpu_unit_code ("2_5");
7337 pos_6 = get_cpu_unit_code ("2_6");
7338 _0mii_ = get_cpu_unit_code ("2b_0mii.");
7339 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
7340 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
7341 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
7342 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
7343 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
7344 _0mib_ = get_cpu_unit_code ("2b_0mib.");
7345 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
7346 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
7347 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
7348 _1mii_ = get_cpu_unit_code ("2b_1mii.");
7349 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
7350 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
7351 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
7352 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
7353 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
7354 _1mib_ = get_cpu_unit_code ("2b_1mib.");
7355 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
7356 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
7357 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
7359 else
7361 pos_1 = get_cpu_unit_code ("1_1");
7362 pos_2 = get_cpu_unit_code ("1_2");
7363 pos_3 = get_cpu_unit_code ("1_3");
7364 pos_4 = get_cpu_unit_code ("1_4");
7365 pos_5 = get_cpu_unit_code ("1_5");
7366 pos_6 = get_cpu_unit_code ("1_6");
7367 _0mii_ = get_cpu_unit_code ("1b_0mii.");
7368 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
7369 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
7370 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
7371 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
7372 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
7373 _0mib_ = get_cpu_unit_code ("1b_0mib.");
7374 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
7375 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
7376 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
7377 _1mii_ = get_cpu_unit_code ("1b_1mii.");
7378 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
7379 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
7380 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
7381 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
7382 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
7383 _1mib_ = get_cpu_unit_code ("1b_1mib.");
7384 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
7385 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
7386 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
7388 schedule_ebbs (rtl_dump_file);
7389 finish_bundle_states ();
7390 if (ia64_tune == PROCESSOR_ITANIUM)
7392 free (add_cycles);
7393 free (clocks);
7395 free (stops_p);
7396 emit_insn_group_barriers (rtl_dump_file);
7398 ia64_final_schedule = 0;
7399 timevar_pop (TV_SCHED2);
7401 else
7402 emit_all_insn_group_barriers (rtl_dump_file);
7404 /* A call must not be the last instruction in a function, so that the
7405 return address is still within the function, so that unwinding works
7406 properly. Note that IA-64 differs from dwarf2 on this point. */
7407 if (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
7409 rtx insn;
7410 int saw_stop = 0;
7412 insn = get_last_insn ();
7413 if (! INSN_P (insn))
7414 insn = prev_active_insn (insn);
7415 if (GET_CODE (insn) == INSN
7416 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
7417 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
7419 saw_stop = 1;
7420 insn = prev_active_insn (insn);
7422 if (GET_CODE (insn) == CALL_INSN)
7424 if (! saw_stop)
7425 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7426 emit_insn (gen_break_f ());
7427 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7431 fixup_errata ();
7432 emit_predicate_relation_info ();
7435 /* Return true if REGNO is used by the epilogue. */
7438 ia64_epilogue_uses (int regno)
7440 switch (regno)
7442 case R_GR (1):
7443 /* With a call to a function in another module, we will write a new
7444 value to "gp". After returning from such a call, we need to make
7445 sure the function restores the original gp-value, even if the
7446 function itself does not use the gp anymore. */
7447 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
7449 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
7450 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
7451 /* For functions defined with the syscall_linkage attribute, all
7452 input registers are marked as live at all function exits. This
7453 prevents the register allocator from using the input registers,
7454 which in turn makes it possible to restart a system call after
7455 an interrupt without having to save/restore the input registers.
7456 This also prevents kernel data from leaking to application code. */
7457 return lookup_attribute ("syscall_linkage",
7458 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
7460 case R_BR (0):
7461 /* Conditional return patterns can't represent the use of `b0' as
7462 the return address, so we force the value live this way. */
7463 return 1;
7465 case AR_PFS_REGNUM:
7466 /* Likewise for ar.pfs, which is used by br.ret. */
7467 return 1;
7469 default:
7470 return 0;
7474 /* Return true if REGNO is used by the frame unwinder. */
7477 ia64_eh_uses (int regno)
7479 if (! reload_completed)
7480 return 0;
7482 if (current_frame_info.reg_save_b0
7483 && regno == current_frame_info.reg_save_b0)
7484 return 1;
7485 if (current_frame_info.reg_save_pr
7486 && regno == current_frame_info.reg_save_pr)
7487 return 1;
7488 if (current_frame_info.reg_save_ar_pfs
7489 && regno == current_frame_info.reg_save_ar_pfs)
7490 return 1;
7491 if (current_frame_info.reg_save_ar_unat
7492 && regno == current_frame_info.reg_save_ar_unat)
7493 return 1;
7494 if (current_frame_info.reg_save_ar_lc
7495 && regno == current_frame_info.reg_save_ar_lc)
7496 return 1;
7498 return 0;
7501 /* Return true if this goes in small data/bss. */
7503 /* ??? We could also support own long data here. Generating movl/add/ld8
7504 instead of addl,ld8/ld8. This makes the code bigger, but should make the
7505 code faster because there is one less load. This also includes incomplete
7506 types which can't go in sdata/sbss. */
7508 static bool
7509 ia64_in_small_data_p (tree exp)
7511 if (TARGET_NO_SDATA)
7512 return false;
7514 /* We want to merge strings, so we never consider them small data. */
7515 if (TREE_CODE (exp) == STRING_CST)
7516 return false;
7518 /* Functions are never small data. */
7519 if (TREE_CODE (exp) == FUNCTION_DECL)
7520 return false;
7522 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
7524 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
7525 if (strcmp (section, ".sdata") == 0
7526 || strcmp (section, ".sbss") == 0)
7527 return true;
7529 else
7531 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
7533 /* If this is an incomplete type with size 0, then we can't put it
7534 in sdata because it might be too big when completed. */
7535 if (size > 0 && size <= ia64_section_threshold)
7536 return true;
7539 return false;
7542 /* Output assembly directives for prologue regions. */
7544 /* The current basic block number. */
7546 static bool last_block;
7548 /* True if we need a copy_state command at the start of the next block. */
7550 static bool need_copy_state;
7552 /* The function emits unwind directives for the start of an epilogue. */
7554 static void
7555 process_epilogue (void)
7557 /* If this isn't the last block of the function, then we need to label the
7558 current state, and copy it back in at the start of the next block. */
7560 if (!last_block)
7562 fprintf (asm_out_file, "\t.label_state 1\n");
7563 need_copy_state = true;
7566 fprintf (asm_out_file, "\t.restore sp\n");
7569 /* This function processes a SET pattern looking for specific patterns
7570 which result in emitting an assembly directive required for unwinding. */
7572 static int
7573 process_set (FILE *asm_out_file, rtx pat)
7575 rtx src = SET_SRC (pat);
7576 rtx dest = SET_DEST (pat);
7577 int src_regno, dest_regno;
7579 /* Look for the ALLOC insn. */
7580 if (GET_CODE (src) == UNSPEC_VOLATILE
7581 && XINT (src, 1) == UNSPECV_ALLOC
7582 && GET_CODE (dest) == REG)
7584 dest_regno = REGNO (dest);
7586 /* If this isn't the final destination for ar.pfs, the alloc
7587 shouldn't have been marked frame related. */
7588 if (dest_regno != current_frame_info.reg_save_ar_pfs)
7589 abort ();
7591 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
7592 ia64_dbx_register_number (dest_regno));
7593 return 1;
7596 /* Look for SP = .... */
7597 if (GET_CODE (dest) == REG && REGNO (dest) == STACK_POINTER_REGNUM)
7599 if (GET_CODE (src) == PLUS)
7601 rtx op0 = XEXP (src, 0);
7602 rtx op1 = XEXP (src, 1);
7603 if (op0 == dest && GET_CODE (op1) == CONST_INT)
7605 if (INTVAL (op1) < 0)
7606 fprintf (asm_out_file, "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
7607 -INTVAL (op1));
7608 else
7609 process_epilogue ();
7611 else
7612 abort ();
7614 else if (GET_CODE (src) == REG
7615 && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
7616 process_epilogue ();
7617 else
7618 abort ();
7620 return 1;
7623 /* Register move we need to look at. */
7624 if (GET_CODE (dest) == REG && GET_CODE (src) == REG)
7626 src_regno = REGNO (src);
7627 dest_regno = REGNO (dest);
7629 switch (src_regno)
7631 case BR_REG (0):
7632 /* Saving return address pointer. */
7633 if (dest_regno != current_frame_info.reg_save_b0)
7634 abort ();
7635 fprintf (asm_out_file, "\t.save rp, r%d\n",
7636 ia64_dbx_register_number (dest_regno));
7637 return 1;
7639 case PR_REG (0):
7640 if (dest_regno != current_frame_info.reg_save_pr)
7641 abort ();
7642 fprintf (asm_out_file, "\t.save pr, r%d\n",
7643 ia64_dbx_register_number (dest_regno));
7644 return 1;
7646 case AR_UNAT_REGNUM:
7647 if (dest_regno != current_frame_info.reg_save_ar_unat)
7648 abort ();
7649 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
7650 ia64_dbx_register_number (dest_regno));
7651 return 1;
7653 case AR_LC_REGNUM:
7654 if (dest_regno != current_frame_info.reg_save_ar_lc)
7655 abort ();
7656 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
7657 ia64_dbx_register_number (dest_regno));
7658 return 1;
7660 case STACK_POINTER_REGNUM:
7661 if (dest_regno != HARD_FRAME_POINTER_REGNUM
7662 || ! frame_pointer_needed)
7663 abort ();
7664 fprintf (asm_out_file, "\t.vframe r%d\n",
7665 ia64_dbx_register_number (dest_regno));
7666 return 1;
7668 default:
7669 /* Everything else should indicate being stored to memory. */
7670 abort ();
7674 /* Memory store we need to look at. */
7675 if (GET_CODE (dest) == MEM && GET_CODE (src) == REG)
7677 long off;
7678 rtx base;
7679 const char *saveop;
7681 if (GET_CODE (XEXP (dest, 0)) == REG)
7683 base = XEXP (dest, 0);
7684 off = 0;
7686 else if (GET_CODE (XEXP (dest, 0)) == PLUS
7687 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT)
7689 base = XEXP (XEXP (dest, 0), 0);
7690 off = INTVAL (XEXP (XEXP (dest, 0), 1));
7692 else
7693 abort ();
7695 if (base == hard_frame_pointer_rtx)
7697 saveop = ".savepsp";
7698 off = - off;
7700 else if (base == stack_pointer_rtx)
7701 saveop = ".savesp";
7702 else
7703 abort ();
7705 src_regno = REGNO (src);
7706 switch (src_regno)
7708 case BR_REG (0):
7709 if (current_frame_info.reg_save_b0 != 0)
7710 abort ();
7711 fprintf (asm_out_file, "\t%s rp, %ld\n", saveop, off);
7712 return 1;
7714 case PR_REG (0):
7715 if (current_frame_info.reg_save_pr != 0)
7716 abort ();
7717 fprintf (asm_out_file, "\t%s pr, %ld\n", saveop, off);
7718 return 1;
7720 case AR_LC_REGNUM:
7721 if (current_frame_info.reg_save_ar_lc != 0)
7722 abort ();
7723 fprintf (asm_out_file, "\t%s ar.lc, %ld\n", saveop, off);
7724 return 1;
7726 case AR_PFS_REGNUM:
7727 if (current_frame_info.reg_save_ar_pfs != 0)
7728 abort ();
7729 fprintf (asm_out_file, "\t%s ar.pfs, %ld\n", saveop, off);
7730 return 1;
7732 case AR_UNAT_REGNUM:
7733 if (current_frame_info.reg_save_ar_unat != 0)
7734 abort ();
7735 fprintf (asm_out_file, "\t%s ar.unat, %ld\n", saveop, off);
7736 return 1;
7738 case GR_REG (4):
7739 case GR_REG (5):
7740 case GR_REG (6):
7741 case GR_REG (7):
7742 fprintf (asm_out_file, "\t.save.g 0x%x\n",
7743 1 << (src_regno - GR_REG (4)));
7744 return 1;
7746 case BR_REG (1):
7747 case BR_REG (2):
7748 case BR_REG (3):
7749 case BR_REG (4):
7750 case BR_REG (5):
7751 fprintf (asm_out_file, "\t.save.b 0x%x\n",
7752 1 << (src_regno - BR_REG (1)));
7753 return 1;
7755 case FR_REG (2):
7756 case FR_REG (3):
7757 case FR_REG (4):
7758 case FR_REG (5):
7759 fprintf (asm_out_file, "\t.save.f 0x%x\n",
7760 1 << (src_regno - FR_REG (2)));
7761 return 1;
7763 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
7764 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
7765 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
7766 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
7767 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
7768 1 << (src_regno - FR_REG (12)));
7769 return 1;
7771 default:
7772 return 0;
7776 return 0;
7780 /* This function looks at a single insn and emits any directives
7781 required to unwind this insn. */
7782 void
7783 process_for_unwind_directive (FILE *asm_out_file, rtx insn)
7785 if (flag_unwind_tables
7786 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
7788 rtx pat;
7790 if (GET_CODE (insn) == NOTE
7791 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
7793 last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
7795 /* Restore unwind state from immediately before the epilogue. */
7796 if (need_copy_state)
7798 fprintf (asm_out_file, "\t.body\n");
7799 fprintf (asm_out_file, "\t.copy_state 1\n");
7800 need_copy_state = false;
7804 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
7805 return;
7807 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
7808 if (pat)
7809 pat = XEXP (pat, 0);
7810 else
7811 pat = PATTERN (insn);
7813 switch (GET_CODE (pat))
7815 case SET:
7816 process_set (asm_out_file, pat);
7817 break;
7819 case PARALLEL:
7821 int par_index;
7822 int limit = XVECLEN (pat, 0);
7823 for (par_index = 0; par_index < limit; par_index++)
7825 rtx x = XVECEXP (pat, 0, par_index);
7826 if (GET_CODE (x) == SET)
7827 process_set (asm_out_file, x);
7829 break;
7832 default:
7833 abort ();
7839 void
7840 ia64_init_builtins (void)
7842 tree psi_type_node = build_pointer_type (integer_type_node);
7843 tree pdi_type_node = build_pointer_type (long_integer_type_node);
7845 /* __sync_val_compare_and_swap_si, __sync_bool_compare_and_swap_si */
7846 tree si_ftype_psi_si_si
7847 = build_function_type_list (integer_type_node,
7848 psi_type_node, integer_type_node,
7849 integer_type_node, NULL_TREE);
7851 /* __sync_val_compare_and_swap_di */
7852 tree di_ftype_pdi_di_di
7853 = build_function_type_list (long_integer_type_node,
7854 pdi_type_node, long_integer_type_node,
7855 long_integer_type_node, NULL_TREE);
7856 /* __sync_bool_compare_and_swap_di */
7857 tree si_ftype_pdi_di_di
7858 = build_function_type_list (integer_type_node,
7859 pdi_type_node, long_integer_type_node,
7860 long_integer_type_node, NULL_TREE);
7861 /* __sync_synchronize */
7862 tree void_ftype_void
7863 = build_function_type (void_type_node, void_list_node);
7865 /* __sync_lock_test_and_set_si */
7866 tree si_ftype_psi_si
7867 = build_function_type_list (integer_type_node,
7868 psi_type_node, integer_type_node, NULL_TREE);
7870 /* __sync_lock_test_and_set_di */
7871 tree di_ftype_pdi_di
7872 = build_function_type_list (long_integer_type_node,
7873 pdi_type_node, long_integer_type_node,
7874 NULL_TREE);
7876 /* __sync_lock_release_si */
7877 tree void_ftype_psi
7878 = build_function_type_list (void_type_node, psi_type_node, NULL_TREE);
7880 /* __sync_lock_release_di */
7881 tree void_ftype_pdi
7882 = build_function_type_list (void_type_node, pdi_type_node, NULL_TREE);
7884 tree fpreg_type;
7885 tree float80_type;
7887 /* The __fpreg type. */
7888 fpreg_type = make_node (REAL_TYPE);
7889 /* ??? The back end should know to load/save __fpreg variables using
7890 the ldf.fill and stf.spill instructions. */
7891 TYPE_PRECISION (fpreg_type) = 96;
7892 layout_type (fpreg_type);
7893 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
7895 /* The __float80 type. */
7896 float80_type = make_node (REAL_TYPE);
7897 TYPE_PRECISION (float80_type) = 96;
7898 layout_type (float80_type);
7899 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
7901 /* The __float128 type. */
7902 if (!TARGET_HPUX)
7904 tree float128_type = make_node (REAL_TYPE);
7905 TYPE_PRECISION (float128_type) = 128;
7906 layout_type (float128_type);
7907 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
7909 else
7910 /* Under HPUX, this is a synonym for "long double". */
7911 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
7912 "__float128");
7914 #define def_builtin(name, type, code) \
7915 builtin_function ((name), (type), (code), BUILT_IN_MD, NULL, NULL_TREE)
7917 def_builtin ("__sync_val_compare_and_swap_si", si_ftype_psi_si_si,
7918 IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI);
7919 def_builtin ("__sync_val_compare_and_swap_di", di_ftype_pdi_di_di,
7920 IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI);
7921 def_builtin ("__sync_bool_compare_and_swap_si", si_ftype_psi_si_si,
7922 IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI);
7923 def_builtin ("__sync_bool_compare_and_swap_di", si_ftype_pdi_di_di,
7924 IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI);
7926 def_builtin ("__sync_synchronize", void_ftype_void,
7927 IA64_BUILTIN_SYNCHRONIZE);
7929 def_builtin ("__sync_lock_test_and_set_si", si_ftype_psi_si,
7930 IA64_BUILTIN_LOCK_TEST_AND_SET_SI);
7931 def_builtin ("__sync_lock_test_and_set_di", di_ftype_pdi_di,
7932 IA64_BUILTIN_LOCK_TEST_AND_SET_DI);
7933 def_builtin ("__sync_lock_release_si", void_ftype_psi,
7934 IA64_BUILTIN_LOCK_RELEASE_SI);
7935 def_builtin ("__sync_lock_release_di", void_ftype_pdi,
7936 IA64_BUILTIN_LOCK_RELEASE_DI);
7938 def_builtin ("__builtin_ia64_bsp",
7939 build_function_type (ptr_type_node, void_list_node),
7940 IA64_BUILTIN_BSP);
7942 def_builtin ("__builtin_ia64_flushrs",
7943 build_function_type (void_type_node, void_list_node),
7944 IA64_BUILTIN_FLUSHRS);
7946 def_builtin ("__sync_fetch_and_add_si", si_ftype_psi_si,
7947 IA64_BUILTIN_FETCH_AND_ADD_SI);
7948 def_builtin ("__sync_fetch_and_sub_si", si_ftype_psi_si,
7949 IA64_BUILTIN_FETCH_AND_SUB_SI);
7950 def_builtin ("__sync_fetch_and_or_si", si_ftype_psi_si,
7951 IA64_BUILTIN_FETCH_AND_OR_SI);
7952 def_builtin ("__sync_fetch_and_and_si", si_ftype_psi_si,
7953 IA64_BUILTIN_FETCH_AND_AND_SI);
7954 def_builtin ("__sync_fetch_and_xor_si", si_ftype_psi_si,
7955 IA64_BUILTIN_FETCH_AND_XOR_SI);
7956 def_builtin ("__sync_fetch_and_nand_si", si_ftype_psi_si,
7957 IA64_BUILTIN_FETCH_AND_NAND_SI);
7959 def_builtin ("__sync_add_and_fetch_si", si_ftype_psi_si,
7960 IA64_BUILTIN_ADD_AND_FETCH_SI);
7961 def_builtin ("__sync_sub_and_fetch_si", si_ftype_psi_si,
7962 IA64_BUILTIN_SUB_AND_FETCH_SI);
7963 def_builtin ("__sync_or_and_fetch_si", si_ftype_psi_si,
7964 IA64_BUILTIN_OR_AND_FETCH_SI);
7965 def_builtin ("__sync_and_and_fetch_si", si_ftype_psi_si,
7966 IA64_BUILTIN_AND_AND_FETCH_SI);
7967 def_builtin ("__sync_xor_and_fetch_si", si_ftype_psi_si,
7968 IA64_BUILTIN_XOR_AND_FETCH_SI);
7969 def_builtin ("__sync_nand_and_fetch_si", si_ftype_psi_si,
7970 IA64_BUILTIN_NAND_AND_FETCH_SI);
7972 def_builtin ("__sync_fetch_and_add_di", di_ftype_pdi_di,
7973 IA64_BUILTIN_FETCH_AND_ADD_DI);
7974 def_builtin ("__sync_fetch_and_sub_di", di_ftype_pdi_di,
7975 IA64_BUILTIN_FETCH_AND_SUB_DI);
7976 def_builtin ("__sync_fetch_and_or_di", di_ftype_pdi_di,
7977 IA64_BUILTIN_FETCH_AND_OR_DI);
7978 def_builtin ("__sync_fetch_and_and_di", di_ftype_pdi_di,
7979 IA64_BUILTIN_FETCH_AND_AND_DI);
7980 def_builtin ("__sync_fetch_and_xor_di", di_ftype_pdi_di,
7981 IA64_BUILTIN_FETCH_AND_XOR_DI);
7982 def_builtin ("__sync_fetch_and_nand_di", di_ftype_pdi_di,
7983 IA64_BUILTIN_FETCH_AND_NAND_DI);
7985 def_builtin ("__sync_add_and_fetch_di", di_ftype_pdi_di,
7986 IA64_BUILTIN_ADD_AND_FETCH_DI);
7987 def_builtin ("__sync_sub_and_fetch_di", di_ftype_pdi_di,
7988 IA64_BUILTIN_SUB_AND_FETCH_DI);
7989 def_builtin ("__sync_or_and_fetch_di", di_ftype_pdi_di,
7990 IA64_BUILTIN_OR_AND_FETCH_DI);
7991 def_builtin ("__sync_and_and_fetch_di", di_ftype_pdi_di,
7992 IA64_BUILTIN_AND_AND_FETCH_DI);
7993 def_builtin ("__sync_xor_and_fetch_di", di_ftype_pdi_di,
7994 IA64_BUILTIN_XOR_AND_FETCH_DI);
7995 def_builtin ("__sync_nand_and_fetch_di", di_ftype_pdi_di,
7996 IA64_BUILTIN_NAND_AND_FETCH_DI);
7998 #undef def_builtin
8001 /* Expand fetch_and_op intrinsics. The basic code sequence is:
8004 tmp = [ptr];
8005 do {
8006 ret = tmp;
8007 ar.ccv = tmp;
8008 tmp <op>= value;
8009 cmpxchgsz.acq tmp = [ptr], tmp
8010 } while (tmp != ret)
8013 static rtx
8014 ia64_expand_fetch_and_op (optab binoptab, enum machine_mode mode,
8015 tree arglist, rtx target)
8017 rtx ret, label, tmp, ccv, insn, mem, value;
8018 tree arg0, arg1;
8020 arg0 = TREE_VALUE (arglist);
8021 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8022 mem = expand_expr (arg0, NULL_RTX, Pmode, 0);
8023 #ifdef POINTERS_EXTEND_UNSIGNED
8024 if (GET_MODE(mem) != Pmode)
8025 mem = convert_memory_address (Pmode, mem);
8026 #endif
8027 value = expand_expr (arg1, NULL_RTX, mode, 0);
8029 mem = gen_rtx_MEM (mode, force_reg (Pmode, mem));
8030 MEM_VOLATILE_P (mem) = 1;
8032 if (target && register_operand (target, mode))
8033 ret = target;
8034 else
8035 ret = gen_reg_rtx (mode);
8037 emit_insn (gen_mf ());
8039 /* Special case for fetchadd instructions. */
8040 if (binoptab == add_optab && fetchadd_operand (value, VOIDmode))
8042 if (mode == SImode)
8043 insn = gen_fetchadd_acq_si (ret, mem, value);
8044 else
8045 insn = gen_fetchadd_acq_di (ret, mem, value);
8046 emit_insn (insn);
8047 return ret;
8050 tmp = gen_reg_rtx (mode);
8051 /* ar.ccv must always be loaded with a zero-extended DImode value. */
8052 ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
8053 emit_move_insn (tmp, mem);
8055 label = gen_label_rtx ();
8056 emit_label (label);
8057 emit_move_insn (ret, tmp);
8058 convert_move (ccv, tmp, /*unsignedp=*/1);
8060 /* Perform the specific operation. Special case NAND by noticing
8061 one_cmpl_optab instead. */
8062 if (binoptab == one_cmpl_optab)
8064 tmp = expand_unop (mode, binoptab, tmp, NULL, OPTAB_WIDEN);
8065 binoptab = and_optab;
8067 tmp = expand_binop (mode, binoptab, tmp, value, tmp, 1, OPTAB_WIDEN);
8069 if (mode == SImode)
8070 insn = gen_cmpxchg_acq_si (tmp, mem, tmp, ccv);
8071 else
8072 insn = gen_cmpxchg_acq_di (tmp, mem, tmp, ccv);
8073 emit_insn (insn);
8075 emit_cmp_and_jump_insns (tmp, ret, NE, 0, mode, 1, label);
8077 return ret;
8080 /* Expand op_and_fetch intrinsics. The basic code sequence is:
8083 tmp = [ptr];
8084 do {
8085 old = tmp;
8086 ar.ccv = tmp;
8087 ret = tmp <op> value;
8088 cmpxchgsz.acq tmp = [ptr], ret
8089 } while (tmp != old)
8092 static rtx
8093 ia64_expand_op_and_fetch (optab binoptab, enum machine_mode mode,
8094 tree arglist, rtx target)
8096 rtx old, label, tmp, ret, ccv, insn, mem, value;
8097 tree arg0, arg1;
8099 arg0 = TREE_VALUE (arglist);
8100 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8101 mem = expand_expr (arg0, NULL_RTX, Pmode, 0);
8102 #ifdef POINTERS_EXTEND_UNSIGNED
8103 if (GET_MODE(mem) != Pmode)
8104 mem = convert_memory_address (Pmode, mem);
8105 #endif
8107 value = expand_expr (arg1, NULL_RTX, mode, 0);
8109 mem = gen_rtx_MEM (mode, force_reg (Pmode, mem));
8110 MEM_VOLATILE_P (mem) = 1;
8112 if (target && ! register_operand (target, mode))
8113 target = NULL_RTX;
8115 emit_insn (gen_mf ());
8116 tmp = gen_reg_rtx (mode);
8117 old = gen_reg_rtx (mode);
8118 /* ar.ccv must always be loaded with a zero-extended DImode value. */
8119 ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
8121 emit_move_insn (tmp, mem);
8123 label = gen_label_rtx ();
8124 emit_label (label);
8125 emit_move_insn (old, tmp);
8126 convert_move (ccv, tmp, /*unsignedp=*/1);
8128 /* Perform the specific operation. Special case NAND by noticing
8129 one_cmpl_optab instead. */
8130 if (binoptab == one_cmpl_optab)
8132 tmp = expand_unop (mode, binoptab, tmp, NULL, OPTAB_WIDEN);
8133 binoptab = and_optab;
8135 ret = expand_binop (mode, binoptab, tmp, value, target, 1, OPTAB_WIDEN);
8137 if (mode == SImode)
8138 insn = gen_cmpxchg_acq_si (tmp, mem, ret, ccv);
8139 else
8140 insn = gen_cmpxchg_acq_di (tmp, mem, ret, ccv);
8141 emit_insn (insn);
8143 emit_cmp_and_jump_insns (tmp, old, NE, 0, mode, 1, label);
8145 return ret;
8148 /* Expand val_ and bool_compare_and_swap. For val_ we want:
8150 ar.ccv = oldval
8152 cmpxchgsz.acq ret = [ptr], newval, ar.ccv
8153 return ret
8155 For bool_ it's the same except return ret == oldval.
8158 static rtx
8159 ia64_expand_compare_and_swap (enum machine_mode rmode, enum machine_mode mode,
8160 int boolp, tree arglist, rtx target)
8162 tree arg0, arg1, arg2;
8163 rtx mem, old, new, ccv, tmp, insn;
8165 arg0 = TREE_VALUE (arglist);
8166 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8167 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
8168 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
8169 old = expand_expr (arg1, NULL_RTX, mode, 0);
8170 new = expand_expr (arg2, NULL_RTX, mode, 0);
8172 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
8173 MEM_VOLATILE_P (mem) = 1;
8175 if (GET_MODE (old) != mode)
8176 old = convert_to_mode (mode, old, /*unsignedp=*/1);
8177 if (GET_MODE (new) != mode)
8178 new = convert_to_mode (mode, new, /*unsignedp=*/1);
8180 if (! register_operand (old, mode))
8181 old = copy_to_mode_reg (mode, old);
8182 if (! register_operand (new, mode))
8183 new = copy_to_mode_reg (mode, new);
8185 if (! boolp && target && register_operand (target, mode))
8186 tmp = target;
8187 else
8188 tmp = gen_reg_rtx (mode);
8190 ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
8191 convert_move (ccv, old, /*unsignedp=*/1);
8192 emit_insn (gen_mf ());
8193 if (mode == SImode)
8194 insn = gen_cmpxchg_acq_si (tmp, mem, new, ccv);
8195 else
8196 insn = gen_cmpxchg_acq_di (tmp, mem, new, ccv);
8197 emit_insn (insn);
8199 if (boolp)
8201 if (! target)
8202 target = gen_reg_rtx (rmode);
8203 return emit_store_flag_force (target, EQ, tmp, old, mode, 1, 1);
8205 else
8206 return tmp;
8209 /* Expand lock_test_and_set. I.e. `xchgsz ret = [ptr], new'. */
8211 static rtx
8212 ia64_expand_lock_test_and_set (enum machine_mode mode, tree arglist,
8213 rtx target)
8215 tree arg0, arg1;
8216 rtx mem, new, ret, insn;
8218 arg0 = TREE_VALUE (arglist);
8219 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8220 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
8221 new = expand_expr (arg1, NULL_RTX, mode, 0);
8223 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
8224 MEM_VOLATILE_P (mem) = 1;
8225 if (! register_operand (new, mode))
8226 new = copy_to_mode_reg (mode, new);
8228 if (target && register_operand (target, mode))
8229 ret = target;
8230 else
8231 ret = gen_reg_rtx (mode);
8233 if (mode == SImode)
8234 insn = gen_xchgsi (ret, mem, new);
8235 else
8236 insn = gen_xchgdi (ret, mem, new);
8237 emit_insn (insn);
8239 return ret;
8242 /* Expand lock_release. I.e. `stsz.rel [ptr] = r0'. */
8244 static rtx
8245 ia64_expand_lock_release (enum machine_mode mode, tree arglist,
8246 rtx target ATTRIBUTE_UNUSED)
8248 tree arg0;
8249 rtx mem;
8251 arg0 = TREE_VALUE (arglist);
8252 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
8254 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
8255 MEM_VOLATILE_P (mem) = 1;
8257 emit_move_insn (mem, const0_rtx);
8259 return const0_rtx;
8263 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
8264 enum machine_mode mode ATTRIBUTE_UNUSED,
8265 int ignore ATTRIBUTE_UNUSED)
8267 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
8268 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
8269 tree arglist = TREE_OPERAND (exp, 1);
8270 enum machine_mode rmode = VOIDmode;
8272 switch (fcode)
8274 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI:
8275 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI:
8276 mode = SImode;
8277 rmode = SImode;
8278 break;
8280 case IA64_BUILTIN_LOCK_TEST_AND_SET_SI:
8281 case IA64_BUILTIN_LOCK_RELEASE_SI:
8282 case IA64_BUILTIN_FETCH_AND_ADD_SI:
8283 case IA64_BUILTIN_FETCH_AND_SUB_SI:
8284 case IA64_BUILTIN_FETCH_AND_OR_SI:
8285 case IA64_BUILTIN_FETCH_AND_AND_SI:
8286 case IA64_BUILTIN_FETCH_AND_XOR_SI:
8287 case IA64_BUILTIN_FETCH_AND_NAND_SI:
8288 case IA64_BUILTIN_ADD_AND_FETCH_SI:
8289 case IA64_BUILTIN_SUB_AND_FETCH_SI:
8290 case IA64_BUILTIN_OR_AND_FETCH_SI:
8291 case IA64_BUILTIN_AND_AND_FETCH_SI:
8292 case IA64_BUILTIN_XOR_AND_FETCH_SI:
8293 case IA64_BUILTIN_NAND_AND_FETCH_SI:
8294 mode = SImode;
8295 break;
8297 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI:
8298 mode = DImode;
8299 rmode = SImode;
8300 break;
8302 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI:
8303 mode = DImode;
8304 rmode = DImode;
8305 break;
8307 case IA64_BUILTIN_LOCK_TEST_AND_SET_DI:
8308 case IA64_BUILTIN_LOCK_RELEASE_DI:
8309 case IA64_BUILTIN_FETCH_AND_ADD_DI:
8310 case IA64_BUILTIN_FETCH_AND_SUB_DI:
8311 case IA64_BUILTIN_FETCH_AND_OR_DI:
8312 case IA64_BUILTIN_FETCH_AND_AND_DI:
8313 case IA64_BUILTIN_FETCH_AND_XOR_DI:
8314 case IA64_BUILTIN_FETCH_AND_NAND_DI:
8315 case IA64_BUILTIN_ADD_AND_FETCH_DI:
8316 case IA64_BUILTIN_SUB_AND_FETCH_DI:
8317 case IA64_BUILTIN_OR_AND_FETCH_DI:
8318 case IA64_BUILTIN_AND_AND_FETCH_DI:
8319 case IA64_BUILTIN_XOR_AND_FETCH_DI:
8320 case IA64_BUILTIN_NAND_AND_FETCH_DI:
8321 mode = DImode;
8322 break;
8324 default:
8325 break;
8328 switch (fcode)
8330 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI:
8331 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI:
8332 return ia64_expand_compare_and_swap (rmode, mode, 1, arglist,
8333 target);
8335 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI:
8336 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI:
8337 return ia64_expand_compare_and_swap (rmode, mode, 0, arglist,
8338 target);
8340 case IA64_BUILTIN_SYNCHRONIZE:
8341 emit_insn (gen_mf ());
8342 return const0_rtx;
8344 case IA64_BUILTIN_LOCK_TEST_AND_SET_SI:
8345 case IA64_BUILTIN_LOCK_TEST_AND_SET_DI:
8346 return ia64_expand_lock_test_and_set (mode, arglist, target);
8348 case IA64_BUILTIN_LOCK_RELEASE_SI:
8349 case IA64_BUILTIN_LOCK_RELEASE_DI:
8350 return ia64_expand_lock_release (mode, arglist, target);
8352 case IA64_BUILTIN_BSP:
8353 if (! target || ! register_operand (target, DImode))
8354 target = gen_reg_rtx (DImode);
8355 emit_insn (gen_bsp_value (target));
8356 #ifdef POINTERS_EXTEND_UNSIGNED
8357 target = convert_memory_address (ptr_mode, target);
8358 #endif
8359 return target;
8361 case IA64_BUILTIN_FLUSHRS:
8362 emit_insn (gen_flushrs ());
8363 return const0_rtx;
8365 case IA64_BUILTIN_FETCH_AND_ADD_SI:
8366 case IA64_BUILTIN_FETCH_AND_ADD_DI:
8367 return ia64_expand_fetch_and_op (add_optab, mode, arglist, target);
8369 case IA64_BUILTIN_FETCH_AND_SUB_SI:
8370 case IA64_BUILTIN_FETCH_AND_SUB_DI:
8371 return ia64_expand_fetch_and_op (sub_optab, mode, arglist, target);
8373 case IA64_BUILTIN_FETCH_AND_OR_SI:
8374 case IA64_BUILTIN_FETCH_AND_OR_DI:
8375 return ia64_expand_fetch_and_op (ior_optab, mode, arglist, target);
8377 case IA64_BUILTIN_FETCH_AND_AND_SI:
8378 case IA64_BUILTIN_FETCH_AND_AND_DI:
8379 return ia64_expand_fetch_and_op (and_optab, mode, arglist, target);
8381 case IA64_BUILTIN_FETCH_AND_XOR_SI:
8382 case IA64_BUILTIN_FETCH_AND_XOR_DI:
8383 return ia64_expand_fetch_and_op (xor_optab, mode, arglist, target);
8385 case IA64_BUILTIN_FETCH_AND_NAND_SI:
8386 case IA64_BUILTIN_FETCH_AND_NAND_DI:
8387 return ia64_expand_fetch_and_op (one_cmpl_optab, mode, arglist, target);
8389 case IA64_BUILTIN_ADD_AND_FETCH_SI:
8390 case IA64_BUILTIN_ADD_AND_FETCH_DI:
8391 return ia64_expand_op_and_fetch (add_optab, mode, arglist, target);
8393 case IA64_BUILTIN_SUB_AND_FETCH_SI:
8394 case IA64_BUILTIN_SUB_AND_FETCH_DI:
8395 return ia64_expand_op_and_fetch (sub_optab, mode, arglist, target);
8397 case IA64_BUILTIN_OR_AND_FETCH_SI:
8398 case IA64_BUILTIN_OR_AND_FETCH_DI:
8399 return ia64_expand_op_and_fetch (ior_optab, mode, arglist, target);
8401 case IA64_BUILTIN_AND_AND_FETCH_SI:
8402 case IA64_BUILTIN_AND_AND_FETCH_DI:
8403 return ia64_expand_op_and_fetch (and_optab, mode, arglist, target);
8405 case IA64_BUILTIN_XOR_AND_FETCH_SI:
8406 case IA64_BUILTIN_XOR_AND_FETCH_DI:
8407 return ia64_expand_op_and_fetch (xor_optab, mode, arglist, target);
8409 case IA64_BUILTIN_NAND_AND_FETCH_SI:
8410 case IA64_BUILTIN_NAND_AND_FETCH_DI:
8411 return ia64_expand_op_and_fetch (one_cmpl_optab, mode, arglist, target);
8413 default:
8414 break;
8417 return NULL_RTX;
8420 /* For the HP-UX IA64 aggregate parameters are passed stored in the
8421 most significant bits of the stack slot. */
8423 enum direction
8424 ia64_hpux_function_arg_padding (enum machine_mode mode, tree type)
8426 /* Exception to normal case for structures/unions/etc. */
8428 if (type && AGGREGATE_TYPE_P (type)
8429 && int_size_in_bytes (type) < UNITS_PER_WORD)
8430 return upward;
8432 /* Fall back to the default. */
8433 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
8436 /* Linked list of all external functions that are to be emitted by GCC.
8437 We output the name if and only if TREE_SYMBOL_REFERENCED is set in
8438 order to avoid putting out names that are never really used. */
8440 struct extern_func_list
8442 struct extern_func_list *next; /* next external */
8443 char *name; /* name of the external */
8444 } *extern_func_head = 0;
8446 static void
8447 ia64_hpux_add_extern_decl (const char *name)
8449 struct extern_func_list *p;
8451 p = (struct extern_func_list *) xmalloc (sizeof (struct extern_func_list));
8452 p->name = xmalloc (strlen (name) + 1);
8453 strcpy(p->name, name);
8454 p->next = extern_func_head;
8455 extern_func_head = p;
8458 /* Print out the list of used global functions. */
8460 static void
8461 ia64_hpux_file_end (void)
8463 while (extern_func_head)
8465 const char *real_name;
8466 tree decl;
8468 real_name = (* targetm.strip_name_encoding) (extern_func_head->name);
8469 decl = maybe_get_identifier (real_name);
8471 if (!decl
8472 || (! TREE_ASM_WRITTEN (decl) && TREE_SYMBOL_REFERENCED (decl)))
8474 if (decl)
8475 TREE_ASM_WRITTEN (decl) = 1;
8476 (*targetm.asm_out.globalize_label) (asm_out_file,
8477 extern_func_head->name);
8478 fputs (TYPE_ASM_OP, asm_out_file);
8479 assemble_name (asm_out_file, extern_func_head->name);
8480 putc (',', asm_out_file);
8481 fprintf (asm_out_file, TYPE_OPERAND_FMT, "function");
8482 putc ('\n', asm_out_file);
8484 extern_func_head = extern_func_head->next;
8488 /* Rename all the TFmode libfuncs using the HPUX conventions. */
8490 static void
8491 ia64_hpux_init_libfuncs (void)
8493 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
8494 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
8495 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
8496 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
8497 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
8498 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
8499 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
8500 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
8502 /* ia64_expand_compare uses this. */
8503 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
8505 /* These should never be used. */
8506 set_optab_libfunc (eq_optab, TFmode, 0);
8507 set_optab_libfunc (ne_optab, TFmode, 0);
8508 set_optab_libfunc (gt_optab, TFmode, 0);
8509 set_optab_libfunc (ge_optab, TFmode, 0);
8510 set_optab_libfunc (lt_optab, TFmode, 0);
8511 set_optab_libfunc (le_optab, TFmode, 0);
8513 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
8514 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
8515 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
8516 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
8517 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
8518 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
8520 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
8521 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
8522 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
8523 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
8525 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
8526 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
8529 /* Rename the division and modulus functions in VMS. */
8531 static void
8532 ia64_vms_init_libfuncs (void)
8534 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
8535 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
8536 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
8537 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
8538 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
8539 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
8540 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
8541 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
8544 /* Switch to the section to which we should output X. The only thing
8545 special we do here is to honor small data. */
8547 static void
8548 ia64_select_rtx_section (enum machine_mode mode, rtx x,
8549 unsigned HOST_WIDE_INT align)
8551 if (GET_MODE_SIZE (mode) > 0
8552 && GET_MODE_SIZE (mode) <= ia64_section_threshold)
8553 sdata_section ();
8554 else
8555 default_elf_select_rtx_section (mode, x, align);
8558 /* It is illegal to have relocations in shared segments on AIX and HPUX.
8559 Pretend flag_pic is always set. */
8561 static void
8562 ia64_rwreloc_select_section (tree exp, int reloc, unsigned HOST_WIDE_INT align)
8564 default_elf_select_section_1 (exp, reloc, align, true);
8567 static void
8568 ia64_rwreloc_unique_section (tree decl, int reloc)
8570 default_unique_section_1 (decl, reloc, true);
8573 static void
8574 ia64_rwreloc_select_rtx_section (enum machine_mode mode, rtx x,
8575 unsigned HOST_WIDE_INT align)
8577 int save_pic = flag_pic;
8578 flag_pic = 1;
8579 ia64_select_rtx_section (mode, x, align);
8580 flag_pic = save_pic;
8583 static unsigned int
8584 ia64_rwreloc_section_type_flags (tree decl, const char *name, int reloc)
8586 return default_section_type_flags_1 (decl, name, reloc, true);
8590 /* Output the assembler code for a thunk function. THUNK_DECL is the
8591 declaration for the thunk function itself, FUNCTION is the decl for
8592 the target function. DELTA is an immediate constant offset to be
8593 added to THIS. If VCALL_OFFSET is nonzero, the word at
8594 *(*this + vcall_offset) should be added to THIS. */
8596 static void
8597 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
8598 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8599 tree function)
8601 rtx this, insn, funexp;
8603 reload_completed = 1;
8604 epilogue_completed = 1;
8605 no_new_pseudos = 1;
8607 /* Set things up as ia64_expand_prologue might. */
8608 last_scratch_gr_reg = 15;
8610 memset (&current_frame_info, 0, sizeof (current_frame_info));
8611 current_frame_info.spill_cfa_off = -16;
8612 current_frame_info.n_input_regs = 1;
8613 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
8615 if (!TARGET_REG_NAMES)
8616 reg_names[IN_REG (0)] = ia64_reg_numbers[0];
8618 /* Mark the end of the (empty) prologue. */
8619 emit_note (NOTE_INSN_PROLOGUE_END);
8621 this = gen_rtx_REG (Pmode, IN_REG (0));
8622 if (TARGET_ILP32)
8624 rtx tmp = gen_rtx_REG (ptr_mode, IN_REG (0));
8625 REG_POINTER (tmp) = 1;
8626 if (delta && CONST_OK_FOR_I (delta))
8628 emit_insn (gen_ptr_extend_plus_imm (this, tmp, GEN_INT (delta)));
8629 delta = 0;
8631 else
8632 emit_insn (gen_ptr_extend (this, tmp));
8635 /* Apply the constant offset, if required. */
8636 if (delta)
8638 rtx delta_rtx = GEN_INT (delta);
8640 if (!CONST_OK_FOR_I (delta))
8642 rtx tmp = gen_rtx_REG (Pmode, 2);
8643 emit_move_insn (tmp, delta_rtx);
8644 delta_rtx = tmp;
8646 emit_insn (gen_adddi3 (this, this, delta_rtx));
8649 /* Apply the offset from the vtable, if required. */
8650 if (vcall_offset)
8652 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8653 rtx tmp = gen_rtx_REG (Pmode, 2);
8655 if (TARGET_ILP32)
8657 rtx t = gen_rtx_REG (ptr_mode, 2);
8658 REG_POINTER (t) = 1;
8659 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this));
8660 if (CONST_OK_FOR_I (vcall_offset))
8662 emit_insn (gen_ptr_extend_plus_imm (tmp, t,
8663 vcall_offset_rtx));
8664 vcall_offset = 0;
8666 else
8667 emit_insn (gen_ptr_extend (tmp, t));
8669 else
8670 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8672 if (vcall_offset)
8674 if (!CONST_OK_FOR_J (vcall_offset))
8676 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
8677 emit_move_insn (tmp2, vcall_offset_rtx);
8678 vcall_offset_rtx = tmp2;
8680 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
8683 if (TARGET_ILP32)
8684 emit_move_insn (gen_rtx_REG (ptr_mode, 2),
8685 gen_rtx_MEM (ptr_mode, tmp));
8686 else
8687 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
8689 emit_insn (gen_adddi3 (this, this, tmp));
8692 /* Generate a tail call to the target function. */
8693 if (! TREE_USED (function))
8695 assemble_external (function);
8696 TREE_USED (function) = 1;
8698 funexp = XEXP (DECL_RTL (function), 0);
8699 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8700 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
8701 insn = get_last_insn ();
8702 SIBLING_CALL_P (insn) = 1;
8704 /* Code generation for calls relies on splitting. */
8705 reload_completed = 1;
8706 epilogue_completed = 1;
8707 try_split (PATTERN (insn), insn, 0);
8709 emit_barrier ();
8711 /* Run just enough of rest_of_compilation to get the insns emitted.
8712 There's not really enough bulk here to make other passes such as
8713 instruction scheduling worth while. Note that use_thunk calls
8714 assemble_start_function and assemble_end_function. */
8716 insn_locators_initialize ();
8717 emit_all_insn_group_barriers (NULL);
8718 insn = get_insns ();
8719 shorten_branches (insn);
8720 final_start_function (insn, file, 1);
8721 final (insn, file, 1, 0);
8722 final_end_function ();
8724 reload_completed = 0;
8725 epilogue_completed = 0;
8726 no_new_pseudos = 0;
8729 #include "gt-ia64.h"