2003-09-04 Eric Christopher <echristo@redhat.com>
[official-gcc.git] / gcc / config / ia64 / ia64.c
blob4561345e46e3e4c9509cd8e80f0e84b289349748
1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
3 Contributed by James E. Wilson <wilson@cygnus.com> and
4 David Mosberger <davidm@hpl.hp.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "recog.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "except.h"
41 #include "function.h"
42 #include "ggc.h"
43 #include "basic-block.h"
44 #include "toplev.h"
45 #include "sched-int.h"
46 #include "timevar.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "tm_p.h"
50 #include "hashtab.h"
51 #include "langhooks.h"
52 #include "cfglayout.h"
54 /* This is used for communication between ASM_OUTPUT_LABEL and
55 ASM_OUTPUT_LABELREF. */
56 int ia64_asm_output_label = 0;
58 /* Define the information needed to generate branch and scc insns. This is
59 stored from the compare operation. */
60 struct rtx_def * ia64_compare_op0;
61 struct rtx_def * ia64_compare_op1;
63 /* Register names for ia64_expand_prologue. */
64 static const char * const ia64_reg_numbers[96] =
65 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
66 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
67 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
68 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
69 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
70 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
71 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
72 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
73 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
74 "r104","r105","r106","r107","r108","r109","r110","r111",
75 "r112","r113","r114","r115","r116","r117","r118","r119",
76 "r120","r121","r122","r123","r124","r125","r126","r127"};
78 /* ??? These strings could be shared with REGISTER_NAMES. */
79 static const char * const ia64_input_reg_names[8] =
80 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
82 /* ??? These strings could be shared with REGISTER_NAMES. */
83 static const char * const ia64_local_reg_names[80] =
84 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
85 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
86 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
87 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
88 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
89 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
90 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
91 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
92 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
93 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
95 /* ??? These strings could be shared with REGISTER_NAMES. */
96 static const char * const ia64_output_reg_names[8] =
97 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
99 /* String used with the -mfixed-range= option. */
100 const char *ia64_fixed_range_string;
102 /* Determines whether we use adds, addl, or movl to generate our
103 TLS immediate offsets. */
104 int ia64_tls_size = 22;
106 /* String used with the -mtls-size= option. */
107 const char *ia64_tls_size_string;
109 /* Which cpu are we scheduling for. */
110 enum processor_type ia64_tune;
112 /* String used with the -tune= option. */
113 const char *ia64_tune_string;
115 /* Determines whether we run our final scheduling pass or not. We always
116 avoid the normal second scheduling pass. */
117 static int ia64_flag_schedule_insns2;
119 /* Variables which are this size or smaller are put in the sdata/sbss
120 sections. */
122 unsigned int ia64_section_threshold;
124 /* The following variable is used by the DFA insn scheduler. The value is
125 TRUE if we do insn bundling instead of insn scheduling. */
126 int bundling_p = 0;
128 /* Structure to be filled in by ia64_compute_frame_size with register
129 save masks and offsets for the current function. */
131 struct ia64_frame_info
133 HOST_WIDE_INT total_size; /* size of the stack frame, not including
134 the caller's scratch area. */
135 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
136 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
137 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
138 HARD_REG_SET mask; /* mask of saved registers. */
139 unsigned int gr_used_mask; /* mask of registers in use as gr spill
140 registers or long-term scratches. */
141 int n_spilled; /* number of spilled registers. */
142 int reg_fp; /* register for fp. */
143 int reg_save_b0; /* save register for b0. */
144 int reg_save_pr; /* save register for prs. */
145 int reg_save_ar_pfs; /* save register for ar.pfs. */
146 int reg_save_ar_unat; /* save register for ar.unat. */
147 int reg_save_ar_lc; /* save register for ar.lc. */
148 int reg_save_gp; /* save register for gp. */
149 int n_input_regs; /* number of input registers used. */
150 int n_local_regs; /* number of local registers used. */
151 int n_output_regs; /* number of output registers used. */
152 int n_rotate_regs; /* number of rotating registers used. */
154 char need_regstk; /* true if a .regstk directive needed. */
155 char initialized; /* true if the data is finalized. */
158 /* Current frame information calculated by ia64_compute_frame_size. */
159 static struct ia64_frame_info current_frame_info;
161 static int ia64_use_dfa_pipeline_interface (void);
162 static int ia64_first_cycle_multipass_dfa_lookahead (void);
163 static void ia64_dependencies_evaluation_hook (rtx, rtx);
164 static void ia64_init_dfa_pre_cycle_insn (void);
165 static rtx ia64_dfa_pre_cycle_insn (void);
166 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
167 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
168 static rtx gen_tls_get_addr (void);
169 static rtx gen_thread_pointer (void);
170 static rtx ia64_expand_tls_address (enum tls_model, rtx, rtx);
171 static int find_gr_spill (int);
172 static int next_scratch_gr_reg (void);
173 static void mark_reg_gr_used_mask (rtx, void *);
174 static void ia64_compute_frame_size (HOST_WIDE_INT);
175 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
176 static void finish_spill_pointers (void);
177 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
178 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
179 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
180 static rtx gen_movdi_x (rtx, rtx, rtx);
181 static rtx gen_fr_spill_x (rtx, rtx, rtx);
182 static rtx gen_fr_restore_x (rtx, rtx, rtx);
184 static enum machine_mode hfa_element_mode (tree, int);
185 static bool ia64_function_ok_for_sibcall (tree, tree);
186 static bool ia64_rtx_costs (rtx, int, int, int *);
187 static void fix_range (const char *);
188 static struct machine_function * ia64_init_machine_status (void);
189 static void emit_insn_group_barriers (FILE *);
190 static void emit_all_insn_group_barriers (FILE *);
191 static void final_emit_insn_group_barriers (FILE *);
192 static void emit_predicate_relation_info (void);
193 static void ia64_reorg (void);
194 static bool ia64_in_small_data_p (tree);
195 static void process_epilogue (void);
196 static int process_set (FILE *, rtx);
198 static rtx ia64_expand_fetch_and_op (optab, enum machine_mode, tree, rtx);
199 static rtx ia64_expand_op_and_fetch (optab, enum machine_mode, tree, rtx);
200 static rtx ia64_expand_compare_and_swap (enum machine_mode, enum machine_mode,
201 int, tree, rtx);
202 static rtx ia64_expand_lock_test_and_set (enum machine_mode, tree, rtx);
203 static rtx ia64_expand_lock_release (enum machine_mode, tree, rtx);
204 static bool ia64_assemble_integer (rtx, unsigned int, int);
205 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
206 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
207 static void ia64_output_function_end_prologue (FILE *);
209 static int ia64_issue_rate (void);
210 static int ia64_adjust_cost (rtx, rtx, rtx, int);
211 static void ia64_sched_init (FILE *, int, int);
212 static void ia64_sched_finish (FILE *, int);
213 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
214 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
215 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
216 static int ia64_variable_issue (FILE *, int, rtx, int);
218 static struct bundle_state *get_free_bundle_state (void);
219 static void free_bundle_state (struct bundle_state *);
220 static void initiate_bundle_states (void);
221 static void finish_bundle_states (void);
222 static unsigned bundle_state_hash (const void *);
223 static int bundle_state_eq_p (const void *, const void *);
224 static int insert_bundle_state (struct bundle_state *);
225 static void initiate_bundle_state_table (void);
226 static void finish_bundle_state_table (void);
227 static int try_issue_nops (struct bundle_state *, int);
228 static int try_issue_insn (struct bundle_state *, rtx);
229 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
230 static int get_max_pos (state_t);
231 static int get_template (state_t, int);
233 static rtx get_next_important_insn (rtx, rtx);
234 static void bundling (FILE *, int, rtx, rtx);
236 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
237 HOST_WIDE_INT, tree);
238 static void ia64_file_start (void);
240 static void ia64_select_rtx_section (enum machine_mode, rtx,
241 unsigned HOST_WIDE_INT);
242 static void ia64_rwreloc_select_section (tree, int, unsigned HOST_WIDE_INT)
243 ATTRIBUTE_UNUSED;
244 static void ia64_rwreloc_unique_section (tree, int)
245 ATTRIBUTE_UNUSED;
246 static void ia64_rwreloc_select_rtx_section (enum machine_mode, rtx,
247 unsigned HOST_WIDE_INT)
248 ATTRIBUTE_UNUSED;
249 static unsigned int ia64_rwreloc_section_type_flags (tree, const char *, int)
250 ATTRIBUTE_UNUSED;
252 static void ia64_hpux_add_extern_decl (const char *name)
253 ATTRIBUTE_UNUSED;
254 static void ia64_hpux_file_end (void)
255 ATTRIBUTE_UNUSED;
257 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
258 static void ia64_encode_section_info (tree, rtx, int);
261 /* Table of valid machine attributes. */
262 static const struct attribute_spec ia64_attribute_table[] =
264 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
265 { "syscall_linkage", 0, 0, false, true, true, NULL },
266 { "model", 1, 1, true, false, false, ia64_handle_model_attribute },
267 { NULL, 0, 0, false, false, false, NULL }
270 /* Initialize the GCC target structure. */
271 #undef TARGET_ATTRIBUTE_TABLE
272 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
274 #undef TARGET_INIT_BUILTINS
275 #define TARGET_INIT_BUILTINS ia64_init_builtins
277 #undef TARGET_EXPAND_BUILTIN
278 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
280 #undef TARGET_ASM_BYTE_OP
281 #define TARGET_ASM_BYTE_OP "\tdata1\t"
282 #undef TARGET_ASM_ALIGNED_HI_OP
283 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
284 #undef TARGET_ASM_ALIGNED_SI_OP
285 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
286 #undef TARGET_ASM_ALIGNED_DI_OP
287 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
288 #undef TARGET_ASM_UNALIGNED_HI_OP
289 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
290 #undef TARGET_ASM_UNALIGNED_SI_OP
291 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
292 #undef TARGET_ASM_UNALIGNED_DI_OP
293 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
294 #undef TARGET_ASM_INTEGER
295 #define TARGET_ASM_INTEGER ia64_assemble_integer
297 #undef TARGET_ASM_FUNCTION_PROLOGUE
298 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
299 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
300 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
301 #undef TARGET_ASM_FUNCTION_EPILOGUE
302 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
304 #undef TARGET_IN_SMALL_DATA_P
305 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
307 #undef TARGET_SCHED_ADJUST_COST
308 #define TARGET_SCHED_ADJUST_COST ia64_adjust_cost
309 #undef TARGET_SCHED_ISSUE_RATE
310 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
311 #undef TARGET_SCHED_VARIABLE_ISSUE
312 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
313 #undef TARGET_SCHED_INIT
314 #define TARGET_SCHED_INIT ia64_sched_init
315 #undef TARGET_SCHED_FINISH
316 #define TARGET_SCHED_FINISH ia64_sched_finish
317 #undef TARGET_SCHED_REORDER
318 #define TARGET_SCHED_REORDER ia64_sched_reorder
319 #undef TARGET_SCHED_REORDER2
320 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
322 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
323 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
325 #undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE
326 #define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE ia64_use_dfa_pipeline_interface
328 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
329 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
331 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
332 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
333 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
334 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
336 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
337 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
338 ia64_first_cycle_multipass_dfa_lookahead_guard
340 #undef TARGET_SCHED_DFA_NEW_CYCLE
341 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
343 #ifdef HAVE_AS_TLS
344 #undef TARGET_HAVE_TLS
345 #define TARGET_HAVE_TLS true
346 #endif
348 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
349 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
351 #undef TARGET_ASM_OUTPUT_MI_THUNK
352 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
353 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
354 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
356 #undef TARGET_ASM_FILE_START
357 #define TARGET_ASM_FILE_START ia64_file_start
359 #undef TARGET_RTX_COSTS
360 #define TARGET_RTX_COSTS ia64_rtx_costs
361 #undef TARGET_ADDRESS_COST
362 #define TARGET_ADDRESS_COST hook_int_rtx_0
364 #undef TARGET_MACHINE_DEPENDENT_REORG
365 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
367 #undef TARGET_ENCODE_SECTION_INFO
368 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
370 struct gcc_target targetm = TARGET_INITIALIZER;
372 /* Return 1 if OP is a valid operand for the MEM of a CALL insn. */
375 call_operand (rtx op, enum machine_mode mode)
377 if (mode != GET_MODE (op) && mode != VOIDmode)
378 return 0;
380 return (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == REG
381 || (GET_CODE (op) == SUBREG && GET_CODE (XEXP (op, 0)) == REG));
384 /* Return 1 if OP refers to a symbol in the sdata section. */
387 sdata_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
389 switch (GET_CODE (op))
391 case CONST:
392 if (GET_CODE (XEXP (op, 0)) != PLUS
393 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF)
394 break;
395 op = XEXP (XEXP (op, 0), 0);
396 /* FALLTHRU */
398 case SYMBOL_REF:
399 if (CONSTANT_POOL_ADDRESS_P (op))
400 return GET_MODE_SIZE (get_pool_mode (op)) <= ia64_section_threshold;
401 else
402 return SYMBOL_REF_LOCAL_P (op) && SYMBOL_REF_SMALL_P (op);
404 default:
405 break;
408 return 0;
412 small_addr_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
414 return SYMBOL_REF_SMALL_ADDR_P (op);
417 /* Return 1 if OP refers to a symbol, and is appropriate for a GOT load. */
420 got_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
422 switch (GET_CODE (op))
424 case CONST:
425 op = XEXP (op, 0);
426 if (GET_CODE (op) != PLUS)
427 return 0;
428 if (GET_CODE (XEXP (op, 0)) != SYMBOL_REF)
429 return 0;
430 op = XEXP (op, 1);
431 if (GET_CODE (op) != CONST_INT)
432 return 0;
434 return 1;
436 /* Ok if we're not using GOT entries at all. */
437 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
438 return 1;
440 /* "Ok" while emitting rtl, since otherwise we won't be provided
441 with the entire offset during emission, which makes it very
442 hard to split the offset into high and low parts. */
443 if (rtx_equal_function_value_matters)
444 return 1;
446 /* Force the low 14 bits of the constant to zero so that we do not
447 use up so many GOT entries. */
448 return (INTVAL (op) & 0x3fff) == 0;
450 case SYMBOL_REF:
451 if (SYMBOL_REF_SMALL_ADDR_P (op))
452 return 0;
453 case LABEL_REF:
454 return 1;
456 default:
457 break;
459 return 0;
462 /* Return 1 if OP refers to a symbol. */
465 symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
467 switch (GET_CODE (op))
469 case CONST:
470 case SYMBOL_REF:
471 case LABEL_REF:
472 return 1;
474 default:
475 break;
477 return 0;
480 /* Return tls_model if OP refers to a TLS symbol. */
483 tls_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
485 if (GET_CODE (op) != SYMBOL_REF)
486 return 0;
487 return SYMBOL_REF_TLS_MODEL (op);
491 /* Return 1 if OP refers to a function. */
494 function_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
496 if (GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (op))
497 return 1;
498 else
499 return 0;
502 /* Return 1 if OP is setjmp or a similar function. */
504 /* ??? This is an unsatisfying solution. Should rethink. */
507 setjmp_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
509 const char *name;
510 int retval = 0;
512 if (GET_CODE (op) != SYMBOL_REF)
513 return 0;
515 name = XSTR (op, 0);
517 /* The following code is borrowed from special_function_p in calls.c. */
519 /* Disregard prefix _, __ or __x. */
520 if (name[0] == '_')
522 if (name[1] == '_' && name[2] == 'x')
523 name += 3;
524 else if (name[1] == '_')
525 name += 2;
526 else
527 name += 1;
530 if (name[0] == 's')
532 retval
533 = ((name[1] == 'e'
534 && (! strcmp (name, "setjmp")
535 || ! strcmp (name, "setjmp_syscall")))
536 || (name[1] == 'i'
537 && ! strcmp (name, "sigsetjmp"))
538 || (name[1] == 'a'
539 && ! strcmp (name, "savectx")));
541 else if ((name[0] == 'q' && name[1] == 's'
542 && ! strcmp (name, "qsetjmp"))
543 || (name[0] == 'v' && name[1] == 'f'
544 && ! strcmp (name, "vfork")))
545 retval = 1;
547 return retval;
550 /* Return 1 if OP is a general operand, excluding tls symbolic operands. */
553 move_operand (rtx op, enum machine_mode mode)
555 return general_operand (op, mode) && !tls_symbolic_operand (op, mode);
558 /* Return 1 if OP is a register operand that is (or could be) a GR reg. */
561 gr_register_operand (rtx op, enum machine_mode mode)
563 if (! register_operand (op, mode))
564 return 0;
565 if (GET_CODE (op) == SUBREG)
566 op = SUBREG_REG (op);
567 if (GET_CODE (op) == REG)
569 unsigned int regno = REGNO (op);
570 if (regno < FIRST_PSEUDO_REGISTER)
571 return GENERAL_REGNO_P (regno);
573 return 1;
576 /* Return 1 if OP is a register operand that is (or could be) an FR reg. */
579 fr_register_operand (rtx op, enum machine_mode mode)
581 if (! register_operand (op, mode))
582 return 0;
583 if (GET_CODE (op) == SUBREG)
584 op = SUBREG_REG (op);
585 if (GET_CODE (op) == REG)
587 unsigned int regno = REGNO (op);
588 if (regno < FIRST_PSEUDO_REGISTER)
589 return FR_REGNO_P (regno);
591 return 1;
594 /* Return 1 if OP is a register operand that is (or could be) a GR/FR reg. */
597 grfr_register_operand (rtx op, enum machine_mode mode)
599 if (! register_operand (op, mode))
600 return 0;
601 if (GET_CODE (op) == SUBREG)
602 op = SUBREG_REG (op);
603 if (GET_CODE (op) == REG)
605 unsigned int regno = REGNO (op);
606 if (regno < FIRST_PSEUDO_REGISTER)
607 return GENERAL_REGNO_P (regno) || FR_REGNO_P (regno);
609 return 1;
612 /* Return 1 if OP is a nonimmediate operand that is (or could be) a GR reg. */
615 gr_nonimmediate_operand (rtx op, enum machine_mode mode)
617 if (! nonimmediate_operand (op, mode))
618 return 0;
619 if (GET_CODE (op) == SUBREG)
620 op = SUBREG_REG (op);
621 if (GET_CODE (op) == REG)
623 unsigned int regno = REGNO (op);
624 if (regno < FIRST_PSEUDO_REGISTER)
625 return GENERAL_REGNO_P (regno);
627 return 1;
630 /* Return 1 if OP is a nonimmediate operand that is (or could be) a FR reg. */
633 fr_nonimmediate_operand (rtx op, enum machine_mode mode)
635 if (! nonimmediate_operand (op, mode))
636 return 0;
637 if (GET_CODE (op) == SUBREG)
638 op = SUBREG_REG (op);
639 if (GET_CODE (op) == REG)
641 unsigned int regno = REGNO (op);
642 if (regno < FIRST_PSEUDO_REGISTER)
643 return FR_REGNO_P (regno);
645 return 1;
648 /* Return 1 if OP is a nonimmediate operand that is a GR/FR reg. */
651 grfr_nonimmediate_operand (rtx op, enum machine_mode mode)
653 if (! nonimmediate_operand (op, mode))
654 return 0;
655 if (GET_CODE (op) == SUBREG)
656 op = SUBREG_REG (op);
657 if (GET_CODE (op) == REG)
659 unsigned int regno = REGNO (op);
660 if (regno < FIRST_PSEUDO_REGISTER)
661 return GENERAL_REGNO_P (regno) || FR_REGNO_P (regno);
663 return 1;
666 /* Return 1 if OP is a GR register operand, or zero. */
669 gr_reg_or_0_operand (rtx op, enum machine_mode mode)
671 return (op == const0_rtx || gr_register_operand (op, mode));
674 /* Return 1 if OP is a GR register operand, or a 5 bit immediate operand. */
677 gr_reg_or_5bit_operand (rtx op, enum machine_mode mode)
679 return ((GET_CODE (op) == CONST_INT && INTVAL (op) >= 0 && INTVAL (op) < 32)
680 || GET_CODE (op) == CONSTANT_P_RTX
681 || gr_register_operand (op, mode));
684 /* Return 1 if OP is a GR register operand, or a 6 bit immediate operand. */
687 gr_reg_or_6bit_operand (rtx op, enum machine_mode mode)
689 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (INTVAL (op)))
690 || GET_CODE (op) == CONSTANT_P_RTX
691 || gr_register_operand (op, mode));
694 /* Return 1 if OP is a GR register operand, or an 8 bit immediate operand. */
697 gr_reg_or_8bit_operand (rtx op, enum machine_mode mode)
699 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op)))
700 || GET_CODE (op) == CONSTANT_P_RTX
701 || gr_register_operand (op, mode));
704 /* Return 1 if OP is a GR/FR register operand, or an 8 bit immediate. */
707 grfr_reg_or_8bit_operand (rtx op, enum machine_mode mode)
709 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op)))
710 || GET_CODE (op) == CONSTANT_P_RTX
711 || grfr_register_operand (op, mode));
714 /* Return 1 if OP is a register operand, or an 8 bit adjusted immediate
715 operand. */
718 gr_reg_or_8bit_adjusted_operand (rtx op, enum machine_mode mode)
720 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_L (INTVAL (op)))
721 || GET_CODE (op) == CONSTANT_P_RTX
722 || gr_register_operand (op, mode));
725 /* Return 1 if OP is a register operand, or is valid for both an 8 bit
726 immediate and an 8 bit adjusted immediate operand. This is necessary
727 because when we emit a compare, we don't know what the condition will be,
728 so we need the union of the immediates accepted by GT and LT. */
731 gr_reg_or_8bit_and_adjusted_operand (rtx op, enum machine_mode mode)
733 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op))
734 && CONST_OK_FOR_L (INTVAL (op)))
735 || GET_CODE (op) == CONSTANT_P_RTX
736 || gr_register_operand (op, mode));
739 /* Return 1 if OP is a register operand, or a 14 bit immediate operand. */
742 gr_reg_or_14bit_operand (rtx op, enum machine_mode mode)
744 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_I (INTVAL (op)))
745 || GET_CODE (op) == CONSTANT_P_RTX
746 || gr_register_operand (op, mode));
749 /* Return 1 if OP is a register operand, or a 22 bit immediate operand. */
752 gr_reg_or_22bit_operand (rtx op, enum machine_mode mode)
754 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_J (INTVAL (op)))
755 || GET_CODE (op) == CONSTANT_P_RTX
756 || gr_register_operand (op, mode));
759 /* Return 1 if OP is a 6 bit immediate operand. */
762 shift_count_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
764 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (INTVAL (op)))
765 || GET_CODE (op) == CONSTANT_P_RTX);
768 /* Return 1 if OP is a 5 bit immediate operand. */
771 shift_32bit_count_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
773 return ((GET_CODE (op) == CONST_INT
774 && (INTVAL (op) >= 0 && INTVAL (op) < 32))
775 || GET_CODE (op) == CONSTANT_P_RTX);
778 /* Return 1 if OP is a 2, 4, 8, or 16 immediate operand. */
781 shladd_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
783 return (GET_CODE (op) == CONST_INT
784 && (INTVAL (op) == 2 || INTVAL (op) == 4
785 || INTVAL (op) == 8 || INTVAL (op) == 16));
788 /* Return 1 if OP is a -16, -8, -4, -1, 1, 4, 8, or 16 immediate operand. */
791 fetchadd_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
793 return (GET_CODE (op) == CONST_INT
794 && (INTVAL (op) == -16 || INTVAL (op) == -8 ||
795 INTVAL (op) == -4 || INTVAL (op) == -1 ||
796 INTVAL (op) == 1 || INTVAL (op) == 4 ||
797 INTVAL (op) == 8 || INTVAL (op) == 16));
800 /* Return 1 if OP is a floating-point constant zero, one, or a register. */
803 fr_reg_or_fp01_operand (rtx op, enum machine_mode mode)
805 return ((GET_CODE (op) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_G (op))
806 || fr_register_operand (op, mode));
809 /* Like nonimmediate_operand, but don't allow MEMs that try to use a
810 POST_MODIFY with a REG as displacement. */
813 destination_operand (rtx op, enum machine_mode mode)
815 if (! nonimmediate_operand (op, mode))
816 return 0;
817 if (GET_CODE (op) == MEM
818 && GET_CODE (XEXP (op, 0)) == POST_MODIFY
819 && GET_CODE (XEXP (XEXP (XEXP (op, 0), 1), 1)) == REG)
820 return 0;
821 return 1;
824 /* Like memory_operand, but don't allow post-increments. */
827 not_postinc_memory_operand (rtx op, enum machine_mode mode)
829 return (memory_operand (op, mode)
830 && GET_RTX_CLASS (GET_CODE (XEXP (op, 0))) != 'a');
833 /* Return 1 if this is a comparison operator, which accepts a normal 8-bit
834 signed immediate operand. */
837 normal_comparison_operator (register rtx op, enum machine_mode mode)
839 enum rtx_code code = GET_CODE (op);
840 return ((mode == VOIDmode || GET_MODE (op) == mode)
841 && (code == EQ || code == NE
842 || code == GT || code == LE || code == GTU || code == LEU));
845 /* Return 1 if this is a comparison operator, which accepts an adjusted 8-bit
846 signed immediate operand. */
849 adjusted_comparison_operator (register rtx op, enum machine_mode mode)
851 enum rtx_code code = GET_CODE (op);
852 return ((mode == VOIDmode || GET_MODE (op) == mode)
853 && (code == LT || code == GE || code == LTU || code == GEU));
856 /* Return 1 if this is a signed inequality operator. */
859 signed_inequality_operator (register rtx op, enum machine_mode mode)
861 enum rtx_code code = GET_CODE (op);
862 return ((mode == VOIDmode || GET_MODE (op) == mode)
863 && (code == GE || code == GT
864 || code == LE || code == LT));
867 /* Return 1 if this operator is valid for predication. */
870 predicate_operator (register rtx op, enum machine_mode mode)
872 enum rtx_code code = GET_CODE (op);
873 return ((GET_MODE (op) == mode || mode == VOIDmode)
874 && (code == EQ || code == NE));
877 /* Return 1 if this operator can be used in a conditional operation. */
880 condop_operator (register rtx op, enum machine_mode mode)
882 enum rtx_code code = GET_CODE (op);
883 return ((GET_MODE (op) == mode || mode == VOIDmode)
884 && (code == PLUS || code == MINUS || code == AND
885 || code == IOR || code == XOR));
888 /* Return 1 if this is the ar.lc register. */
891 ar_lc_reg_operand (register rtx op, enum machine_mode mode)
893 return (GET_MODE (op) == DImode
894 && (mode == DImode || mode == VOIDmode)
895 && GET_CODE (op) == REG
896 && REGNO (op) == AR_LC_REGNUM);
899 /* Return 1 if this is the ar.ccv register. */
902 ar_ccv_reg_operand (register rtx op, enum machine_mode mode)
904 return ((GET_MODE (op) == mode || mode == VOIDmode)
905 && GET_CODE (op) == REG
906 && REGNO (op) == AR_CCV_REGNUM);
909 /* Return 1 if this is the ar.pfs register. */
912 ar_pfs_reg_operand (register rtx op, enum machine_mode mode)
914 return ((GET_MODE (op) == mode || mode == VOIDmode)
915 && GET_CODE (op) == REG
916 && REGNO (op) == AR_PFS_REGNUM);
919 /* Like general_operand, but don't allow (mem (addressof)). */
922 general_tfmode_operand (rtx op, enum machine_mode mode)
924 if (! general_operand (op, mode))
925 return 0;
926 if (GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) == ADDRESSOF)
927 return 0;
928 return 1;
931 /* Similarly. */
934 destination_tfmode_operand (rtx op, enum machine_mode mode)
936 if (! destination_operand (op, mode))
937 return 0;
938 if (GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) == ADDRESSOF)
939 return 0;
940 return 1;
943 /* Similarly. */
946 tfreg_or_fp01_operand (rtx op, enum machine_mode mode)
948 if (GET_CODE (op) == SUBREG)
949 return 0;
950 return fr_reg_or_fp01_operand (op, mode);
953 /* Return 1 if OP is valid as a base register in a reg + offset address. */
956 basereg_operand (rtx op, enum machine_mode mode)
958 /* ??? Should I copy the flag_omit_frame_pointer and cse_not_expected
959 checks from pa.c basereg_operand as well? Seems to be OK without them
960 in test runs. */
962 return (register_operand (op, mode) &&
963 REG_POINTER ((GET_CODE (op) == SUBREG) ? SUBREG_REG (op) : op));
966 typedef enum
968 ADDR_AREA_NORMAL, /* normal address area */
969 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
971 ia64_addr_area;
973 static GTY(()) tree small_ident1;
974 static GTY(()) tree small_ident2;
976 static void
977 init_idents (void)
979 if (small_ident1 == 0)
981 small_ident1 = get_identifier ("small");
982 small_ident2 = get_identifier ("__small__");
986 /* Retrieve the address area that has been chosen for the given decl. */
988 static ia64_addr_area
989 ia64_get_addr_area (tree decl)
991 tree model_attr;
993 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
994 if (model_attr)
996 tree id;
998 init_idents ();
999 id = TREE_VALUE (TREE_VALUE (model_attr));
1000 if (id == small_ident1 || id == small_ident2)
1001 return ADDR_AREA_SMALL;
1003 return ADDR_AREA_NORMAL;
1006 static tree
1007 ia64_handle_model_attribute (tree *node, tree name, tree args, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1009 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
1010 ia64_addr_area area;
1011 tree arg, decl = *node;
1013 init_idents ();
1014 arg = TREE_VALUE (args);
1015 if (arg == small_ident1 || arg == small_ident2)
1017 addr_area = ADDR_AREA_SMALL;
1019 else
1021 warning ("invalid argument of `%s' attribute",
1022 IDENTIFIER_POINTER (name));
1023 *no_add_attrs = true;
1026 switch (TREE_CODE (decl))
1028 case VAR_DECL:
1029 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
1030 == FUNCTION_DECL)
1031 && !TREE_STATIC (decl))
1033 error ("%Ha an address area attribute cannot be specified for "
1034 "local variables", &DECL_SOURCE_LOCATION (decl), decl);
1035 *no_add_attrs = true;
1037 area = ia64_get_addr_area (decl);
1038 if (area != ADDR_AREA_NORMAL && addr_area != area)
1040 error ("%Ha address area of '%s' conflicts with previous "
1041 "declaration", &DECL_SOURCE_LOCATION (decl), decl);
1042 *no_add_attrs = true;
1044 break;
1046 case FUNCTION_DECL:
1047 error ("%Ha address area attribute cannot be specified for functions",
1048 &DECL_SOURCE_LOCATION (decl), decl);
1049 *no_add_attrs = true;
1050 break;
1052 default:
1053 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
1054 *no_add_attrs = true;
1055 break;
1058 return NULL_TREE;
1061 static void
1062 ia64_encode_addr_area (tree decl, rtx symbol)
1064 int flags;
1066 flags = SYMBOL_REF_FLAGS (symbol);
1067 switch (ia64_get_addr_area (decl))
1069 case ADDR_AREA_NORMAL: break;
1070 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
1071 default: abort ();
1073 SYMBOL_REF_FLAGS (symbol) = flags;
1076 static void
1077 ia64_encode_section_info (tree decl, rtx rtl, int first)
1079 default_encode_section_info (decl, rtl, first);
1081 if (TREE_CODE (decl) == VAR_DECL
1082 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
1083 ia64_encode_addr_area (decl, XEXP (rtl, 0));
1086 /* Return 1 if the operands of a move are ok. */
1089 ia64_move_ok (rtx dst, rtx src)
1091 /* If we're under init_recog_no_volatile, we'll not be able to use
1092 memory_operand. So check the code directly and don't worry about
1093 the validity of the underlying address, which should have been
1094 checked elsewhere anyway. */
1095 if (GET_CODE (dst) != MEM)
1096 return 1;
1097 if (GET_CODE (src) == MEM)
1098 return 0;
1099 if (register_operand (src, VOIDmode))
1100 return 1;
1102 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
1103 if (INTEGRAL_MODE_P (GET_MODE (dst)))
1104 return src == const0_rtx;
1105 else
1106 return GET_CODE (src) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_G (src);
1110 addp4_optimize_ok (rtx op1, rtx op2)
1112 return (basereg_operand (op1, GET_MODE(op1)) !=
1113 basereg_operand (op2, GET_MODE(op2)));
1116 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
1117 Return the length of the field, or <= 0 on failure. */
1120 ia64_depz_field_mask (rtx rop, rtx rshift)
1122 unsigned HOST_WIDE_INT op = INTVAL (rop);
1123 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
1125 /* Get rid of the zero bits we're shifting in. */
1126 op >>= shift;
1128 /* We must now have a solid block of 1's at bit 0. */
1129 return exact_log2 (op + 1);
1132 /* Expand a symbolic constant load. */
1134 void
1135 ia64_expand_load_address (rtx dest, rtx src)
1137 if (tls_symbolic_operand (src, VOIDmode))
1138 abort ();
1139 if (GET_CODE (dest) != REG)
1140 abort ();
1142 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
1143 having to pointer-extend the value afterward. Other forms of address
1144 computation below are also more natural to compute as 64-bit quantities.
1145 If we've been given an SImode destination register, change it. */
1146 if (GET_MODE (dest) != Pmode)
1147 dest = gen_rtx_REG (Pmode, REGNO (dest));
1149 if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_SMALL_ADDR_P (src))
1151 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
1152 return;
1154 else if (TARGET_AUTO_PIC)
1156 emit_insn (gen_load_gprel64 (dest, src));
1157 return;
1159 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
1161 emit_insn (gen_load_fptr (dest, src));
1162 return;
1164 else if (sdata_symbolic_operand (src, VOIDmode))
1166 emit_insn (gen_load_gprel (dest, src));
1167 return;
1170 if (GET_CODE (src) == CONST
1171 && GET_CODE (XEXP (src, 0)) == PLUS
1172 && GET_CODE (XEXP (XEXP (src, 0), 1)) == CONST_INT
1173 && (INTVAL (XEXP (XEXP (src, 0), 1)) & 0x1fff) != 0)
1175 rtx sym = XEXP (XEXP (src, 0), 0);
1176 HOST_WIDE_INT ofs, hi, lo;
1178 /* Split the offset into a sign extended 14-bit low part
1179 and a complementary high part. */
1180 ofs = INTVAL (XEXP (XEXP (src, 0), 1));
1181 lo = ((ofs & 0x3fff) ^ 0x2000) - 0x2000;
1182 hi = ofs - lo;
1184 ia64_expand_load_address (dest, plus_constant (sym, hi));
1185 emit_insn (gen_adddi3 (dest, dest, GEN_INT (lo)));
1187 else
1189 rtx tmp;
1191 tmp = gen_rtx_HIGH (Pmode, src);
1192 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
1193 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1195 tmp = gen_rtx_LO_SUM (GET_MODE (dest), dest, src);
1196 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1200 static GTY(()) rtx gen_tls_tga;
1201 static rtx
1202 gen_tls_get_addr (void)
1204 if (!gen_tls_tga)
1205 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
1206 return gen_tls_tga;
1209 static GTY(()) rtx thread_pointer_rtx;
1210 static rtx
1211 gen_thread_pointer (void)
1213 if (!thread_pointer_rtx)
1215 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
1216 RTX_UNCHANGING_P (thread_pointer_rtx) = 1;
1218 return thread_pointer_rtx;
1221 static rtx
1222 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1)
1224 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
1225 rtx orig_op0 = op0;
1227 switch (tls_kind)
1229 case TLS_MODEL_GLOBAL_DYNAMIC:
1230 start_sequence ();
1232 tga_op1 = gen_reg_rtx (Pmode);
1233 emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1));
1234 tga_op1 = gen_rtx_MEM (Pmode, tga_op1);
1235 RTX_UNCHANGING_P (tga_op1) = 1;
1237 tga_op2 = gen_reg_rtx (Pmode);
1238 emit_insn (gen_load_ltoff_dtprel (tga_op2, op1));
1239 tga_op2 = gen_rtx_MEM (Pmode, tga_op2);
1240 RTX_UNCHANGING_P (tga_op2) = 1;
1242 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1243 LCT_CONST, Pmode, 2, tga_op1,
1244 Pmode, tga_op2, Pmode);
1246 insns = get_insns ();
1247 end_sequence ();
1249 if (GET_MODE (op0) != Pmode)
1250 op0 = tga_ret;
1251 emit_libcall_block (insns, op0, tga_ret, op1);
1252 break;
1254 case TLS_MODEL_LOCAL_DYNAMIC:
1255 /* ??? This isn't the completely proper way to do local-dynamic
1256 If the call to __tls_get_addr is used only by a single symbol,
1257 then we should (somehow) move the dtprel to the second arg
1258 to avoid the extra add. */
1259 start_sequence ();
1261 tga_op1 = gen_reg_rtx (Pmode);
1262 emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1));
1263 tga_op1 = gen_rtx_MEM (Pmode, tga_op1);
1264 RTX_UNCHANGING_P (tga_op1) = 1;
1266 tga_op2 = const0_rtx;
1268 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1269 LCT_CONST, Pmode, 2, tga_op1,
1270 Pmode, tga_op2, Pmode);
1272 insns = get_insns ();
1273 end_sequence ();
1275 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1276 UNSPEC_LD_BASE);
1277 tmp = gen_reg_rtx (Pmode);
1278 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
1280 if (!register_operand (op0, Pmode))
1281 op0 = gen_reg_rtx (Pmode);
1282 if (TARGET_TLS64)
1284 emit_insn (gen_load_dtprel (op0, op1));
1285 emit_insn (gen_adddi3 (op0, tmp, op0));
1287 else
1288 emit_insn (gen_add_dtprel (op0, tmp, op1));
1289 break;
1291 case TLS_MODEL_INITIAL_EXEC:
1292 tmp = gen_reg_rtx (Pmode);
1293 emit_insn (gen_load_ltoff_tprel (tmp, op1));
1294 tmp = gen_rtx_MEM (Pmode, tmp);
1295 RTX_UNCHANGING_P (tmp) = 1;
1296 tmp = force_reg (Pmode, tmp);
1298 if (!register_operand (op0, Pmode))
1299 op0 = gen_reg_rtx (Pmode);
1300 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
1301 break;
1303 case TLS_MODEL_LOCAL_EXEC:
1304 if (!register_operand (op0, Pmode))
1305 op0 = gen_reg_rtx (Pmode);
1306 if (TARGET_TLS64)
1308 emit_insn (gen_load_tprel (op0, op1));
1309 emit_insn (gen_adddi3 (op0, gen_thread_pointer (), op0));
1311 else
1312 emit_insn (gen_add_tprel (op0, gen_thread_pointer (), op1));
1313 break;
1315 default:
1316 abort ();
1319 if (orig_op0 == op0)
1320 return NULL_RTX;
1321 if (GET_MODE (orig_op0) == Pmode)
1322 return op0;
1323 return gen_lowpart (GET_MODE (orig_op0), op0);
1327 ia64_expand_move (rtx op0, rtx op1)
1329 enum machine_mode mode = GET_MODE (op0);
1331 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
1332 op1 = force_reg (mode, op1);
1334 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
1336 enum tls_model tls_kind;
1337 if ((tls_kind = tls_symbolic_operand (op1, VOIDmode)))
1338 return ia64_expand_tls_address (tls_kind, op0, op1);
1340 if (!TARGET_NO_PIC && reload_completed)
1342 ia64_expand_load_address (op0, op1);
1343 return NULL_RTX;
1347 return op1;
1350 /* Split a move from OP1 to OP0 conditional on COND. */
1352 void
1353 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
1355 rtx insn, first = get_last_insn ();
1357 emit_move_insn (op0, op1);
1359 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
1360 if (INSN_P (insn))
1361 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
1362 PATTERN (insn));
1365 /* Split a post-reload TImode reference into two DImode components. */
1368 ia64_split_timode (rtx out[2], rtx in, rtx scratch)
1370 switch (GET_CODE (in))
1372 case REG:
1373 out[0] = gen_rtx_REG (DImode, REGNO (in));
1374 out[1] = gen_rtx_REG (DImode, REGNO (in) + 1);
1375 return NULL_RTX;
1377 case MEM:
1379 rtx base = XEXP (in, 0);
1381 switch (GET_CODE (base))
1383 case REG:
1384 out[0] = adjust_address (in, DImode, 0);
1385 break;
1386 case POST_MODIFY:
1387 base = XEXP (base, 0);
1388 out[0] = adjust_address (in, DImode, 0);
1389 break;
1391 /* Since we're changing the mode, we need to change to POST_MODIFY
1392 as well to preserve the size of the increment. Either that or
1393 do the update in two steps, but we've already got this scratch
1394 register handy so let's use it. */
1395 case POST_INC:
1396 base = XEXP (base, 0);
1397 out[0]
1398 = change_address (in, DImode,
1399 gen_rtx_POST_MODIFY
1400 (Pmode, base, plus_constant (base, 16)));
1401 break;
1402 case POST_DEC:
1403 base = XEXP (base, 0);
1404 out[0]
1405 = change_address (in, DImode,
1406 gen_rtx_POST_MODIFY
1407 (Pmode, base, plus_constant (base, -16)));
1408 break;
1409 default:
1410 abort ();
1413 if (scratch == NULL_RTX)
1414 abort ();
1415 out[1] = change_address (in, DImode, scratch);
1416 return gen_adddi3 (scratch, base, GEN_INT (8));
1419 case CONST_INT:
1420 case CONST_DOUBLE:
1421 split_double (in, &out[0], &out[1]);
1422 return NULL_RTX;
1424 default:
1425 abort ();
1429 /* ??? Fixing GR->FR TFmode moves during reload is hard. You need to go
1430 through memory plus an extra GR scratch register. Except that you can
1431 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1432 SECONDARY_RELOAD_CLASS, but not both.
1434 We got into problems in the first place by allowing a construct like
1435 (subreg:TF (reg:TI)), which we got from a union containing a long double.
1436 This solution attempts to prevent this situation from occurring. When
1437 we see something like the above, we spill the inner register to memory. */
1440 spill_tfmode_operand (rtx in, int force)
1442 if (GET_CODE (in) == SUBREG
1443 && GET_MODE (SUBREG_REG (in)) == TImode
1444 && GET_CODE (SUBREG_REG (in)) == REG)
1446 rtx mem = gen_mem_addressof (SUBREG_REG (in), NULL_TREE, /*rescan=*/true);
1447 return gen_rtx_MEM (TFmode, copy_to_reg (XEXP (mem, 0)));
1449 else if (force && GET_CODE (in) == REG)
1451 rtx mem = gen_mem_addressof (in, NULL_TREE, /*rescan=*/true);
1452 return gen_rtx_MEM (TFmode, copy_to_reg (XEXP (mem, 0)));
1454 else if (GET_CODE (in) == MEM
1455 && GET_CODE (XEXP (in, 0)) == ADDRESSOF)
1456 return change_address (in, TFmode, copy_to_reg (XEXP (in, 0)));
1457 else
1458 return in;
1461 /* Emit comparison instruction if necessary, returning the expression
1462 that holds the compare result in the proper mode. */
1465 ia64_expand_compare (enum rtx_code code, enum machine_mode mode)
1467 rtx op0 = ia64_compare_op0, op1 = ia64_compare_op1;
1468 rtx cmp;
1470 /* If we have a BImode input, then we already have a compare result, and
1471 do not need to emit another comparison. */
1472 if (GET_MODE (op0) == BImode)
1474 if ((code == NE || code == EQ) && op1 == const0_rtx)
1475 cmp = op0;
1476 else
1477 abort ();
1479 else
1481 cmp = gen_reg_rtx (BImode);
1482 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1483 gen_rtx_fmt_ee (code, BImode, op0, op1)));
1484 code = NE;
1487 return gen_rtx_fmt_ee (code, mode, cmp, const0_rtx);
1490 /* Emit the appropriate sequence for a call. */
1492 void
1493 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
1494 int sibcall_p)
1496 rtx insn, b0;
1498 addr = XEXP (addr, 0);
1499 b0 = gen_rtx_REG (DImode, R_BR (0));
1501 /* ??? Should do this for functions known to bind local too. */
1502 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
1504 if (sibcall_p)
1505 insn = gen_sibcall_nogp (addr);
1506 else if (! retval)
1507 insn = gen_call_nogp (addr, b0);
1508 else
1509 insn = gen_call_value_nogp (retval, addr, b0);
1510 insn = emit_call_insn (insn);
1512 else
1514 if (sibcall_p)
1515 insn = gen_sibcall_gp (addr);
1516 else if (! retval)
1517 insn = gen_call_gp (addr, b0);
1518 else
1519 insn = gen_call_value_gp (retval, addr, b0);
1520 insn = emit_call_insn (insn);
1522 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
1525 if (sibcall_p)
1526 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
1529 void
1530 ia64_reload_gp (void)
1532 rtx tmp;
1534 if (current_frame_info.reg_save_gp)
1535 tmp = gen_rtx_REG (DImode, current_frame_info.reg_save_gp);
1536 else
1538 HOST_WIDE_INT offset;
1540 offset = (current_frame_info.spill_cfa_off
1541 + current_frame_info.spill_size);
1542 if (frame_pointer_needed)
1544 tmp = hard_frame_pointer_rtx;
1545 offset = -offset;
1547 else
1549 tmp = stack_pointer_rtx;
1550 offset = current_frame_info.total_size - offset;
1553 if (CONST_OK_FOR_I (offset))
1554 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1555 tmp, GEN_INT (offset)));
1556 else
1558 emit_move_insn (pic_offset_table_rtx, GEN_INT (offset));
1559 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1560 pic_offset_table_rtx, tmp));
1563 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
1566 emit_move_insn (pic_offset_table_rtx, tmp);
1569 void
1570 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
1571 rtx scratch_b, int noreturn_p, int sibcall_p)
1573 rtx insn;
1574 bool is_desc = false;
1576 /* If we find we're calling through a register, then we're actually
1577 calling through a descriptor, so load up the values. */
1578 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
1580 rtx tmp;
1581 bool addr_dead_p;
1583 /* ??? We are currently constrained to *not* use peep2, because
1584 we can legitimately change the global lifetime of the GP
1585 (in the form of killing where previously live). This is
1586 because a call through a descriptor doesn't use the previous
1587 value of the GP, while a direct call does, and we do not
1588 commit to either form until the split here.
1590 That said, this means that we lack precise life info for
1591 whether ADDR is dead after this call. This is not terribly
1592 important, since we can fix things up essentially for free
1593 with the POST_DEC below, but it's nice to not use it when we
1594 can immediately tell it's not necessary. */
1595 addr_dead_p = ((noreturn_p || sibcall_p
1596 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
1597 REGNO (addr)))
1598 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
1600 /* Load the code address into scratch_b. */
1601 tmp = gen_rtx_POST_INC (Pmode, addr);
1602 tmp = gen_rtx_MEM (Pmode, tmp);
1603 emit_move_insn (scratch_r, tmp);
1604 emit_move_insn (scratch_b, scratch_r);
1606 /* Load the GP address. If ADDR is not dead here, then we must
1607 revert the change made above via the POST_INCREMENT. */
1608 if (!addr_dead_p)
1609 tmp = gen_rtx_POST_DEC (Pmode, addr);
1610 else
1611 tmp = addr;
1612 tmp = gen_rtx_MEM (Pmode, tmp);
1613 emit_move_insn (pic_offset_table_rtx, tmp);
1615 is_desc = true;
1616 addr = scratch_b;
1619 if (sibcall_p)
1620 insn = gen_sibcall_nogp (addr);
1621 else if (retval)
1622 insn = gen_call_value_nogp (retval, addr, retaddr);
1623 else
1624 insn = gen_call_nogp (addr, retaddr);
1625 emit_call_insn (insn);
1627 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
1628 ia64_reload_gp ();
1631 /* Begin the assembly file. */
1633 static void
1634 ia64_file_start (void)
1636 default_file_start ();
1637 emit_safe_across_calls ();
1640 void
1641 emit_safe_across_calls (void)
1643 unsigned int rs, re;
1644 int out_state;
1646 rs = 1;
1647 out_state = 0;
1648 while (1)
1650 while (rs < 64 && call_used_regs[PR_REG (rs)])
1651 rs++;
1652 if (rs >= 64)
1653 break;
1654 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
1655 continue;
1656 if (out_state == 0)
1658 fputs ("\t.pred.safe_across_calls ", asm_out_file);
1659 out_state = 1;
1661 else
1662 fputc (',', asm_out_file);
1663 if (re == rs + 1)
1664 fprintf (asm_out_file, "p%u", rs);
1665 else
1666 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
1667 rs = re + 1;
1669 if (out_state)
1670 fputc ('\n', asm_out_file);
1673 /* Helper function for ia64_compute_frame_size: find an appropriate general
1674 register to spill some special register to. SPECIAL_SPILL_MASK contains
1675 bits in GR0 to GR31 that have already been allocated by this routine.
1676 TRY_LOCALS is true if we should attempt to locate a local regnum. */
1678 static int
1679 find_gr_spill (int try_locals)
1681 int regno;
1683 /* If this is a leaf function, first try an otherwise unused
1684 call-clobbered register. */
1685 if (current_function_is_leaf)
1687 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
1688 if (! regs_ever_live[regno]
1689 && call_used_regs[regno]
1690 && ! fixed_regs[regno]
1691 && ! global_regs[regno]
1692 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
1694 current_frame_info.gr_used_mask |= 1 << regno;
1695 return regno;
1699 if (try_locals)
1701 regno = current_frame_info.n_local_regs;
1702 /* If there is a frame pointer, then we can't use loc79, because
1703 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
1704 reg_name switching code in ia64_expand_prologue. */
1705 if (regno < (80 - frame_pointer_needed))
1707 current_frame_info.n_local_regs = regno + 1;
1708 return LOC_REG (0) + regno;
1712 /* Failed to find a general register to spill to. Must use stack. */
1713 return 0;
1716 /* In order to make for nice schedules, we try to allocate every temporary
1717 to a different register. We must of course stay away from call-saved,
1718 fixed, and global registers. We must also stay away from registers
1719 allocated in current_frame_info.gr_used_mask, since those include regs
1720 used all through the prologue.
1722 Any register allocated here must be used immediately. The idea is to
1723 aid scheduling, not to solve data flow problems. */
1725 static int last_scratch_gr_reg;
1727 static int
1728 next_scratch_gr_reg (void)
1730 int i, regno;
1732 for (i = 0; i < 32; ++i)
1734 regno = (last_scratch_gr_reg + i + 1) & 31;
1735 if (call_used_regs[regno]
1736 && ! fixed_regs[regno]
1737 && ! global_regs[regno]
1738 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
1740 last_scratch_gr_reg = regno;
1741 return regno;
1745 /* There must be _something_ available. */
1746 abort ();
1749 /* Helper function for ia64_compute_frame_size, called through
1750 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
1752 static void
1753 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
1755 unsigned int regno = REGNO (reg);
1756 if (regno < 32)
1758 unsigned int i, n = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1759 for (i = 0; i < n; ++i)
1760 current_frame_info.gr_used_mask |= 1 << (regno + i);
1764 /* Returns the number of bytes offset between the frame pointer and the stack
1765 pointer for the current function. SIZE is the number of bytes of space
1766 needed for local variables. */
1768 static void
1769 ia64_compute_frame_size (HOST_WIDE_INT size)
1771 HOST_WIDE_INT total_size;
1772 HOST_WIDE_INT spill_size = 0;
1773 HOST_WIDE_INT extra_spill_size = 0;
1774 HOST_WIDE_INT pretend_args_size;
1775 HARD_REG_SET mask;
1776 int n_spilled = 0;
1777 int spilled_gr_p = 0;
1778 int spilled_fr_p = 0;
1779 unsigned int regno;
1780 int i;
1782 if (current_frame_info.initialized)
1783 return;
1785 memset (&current_frame_info, 0, sizeof current_frame_info);
1786 CLEAR_HARD_REG_SET (mask);
1788 /* Don't allocate scratches to the return register. */
1789 diddle_return_value (mark_reg_gr_used_mask, NULL);
1791 /* Don't allocate scratches to the EH scratch registers. */
1792 if (cfun->machine->ia64_eh_epilogue_sp)
1793 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
1794 if (cfun->machine->ia64_eh_epilogue_bsp)
1795 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
1797 /* Find the size of the register stack frame. We have only 80 local
1798 registers, because we reserve 8 for the inputs and 8 for the
1799 outputs. */
1801 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
1802 since we'll be adjusting that down later. */
1803 regno = LOC_REG (78) + ! frame_pointer_needed;
1804 for (; regno >= LOC_REG (0); regno--)
1805 if (regs_ever_live[regno])
1806 break;
1807 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
1809 /* For functions marked with the syscall_linkage attribute, we must mark
1810 all eight input registers as in use, so that locals aren't visible to
1811 the caller. */
1813 if (cfun->machine->n_varargs > 0
1814 || lookup_attribute ("syscall_linkage",
1815 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
1816 current_frame_info.n_input_regs = 8;
1817 else
1819 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
1820 if (regs_ever_live[regno])
1821 break;
1822 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
1825 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
1826 if (regs_ever_live[regno])
1827 break;
1828 i = regno - OUT_REG (0) + 1;
1830 /* When -p profiling, we need one output register for the mcount argument.
1831 Likewise for -a profiling for the bb_init_func argument. For -ax
1832 profiling, we need two output registers for the two bb_init_trace_func
1833 arguments. */
1834 if (current_function_profile)
1835 i = MAX (i, 1);
1836 current_frame_info.n_output_regs = i;
1838 /* ??? No rotating register support yet. */
1839 current_frame_info.n_rotate_regs = 0;
1841 /* Discover which registers need spilling, and how much room that
1842 will take. Begin with floating point and general registers,
1843 which will always wind up on the stack. */
1845 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
1846 if (regs_ever_live[regno] && ! call_used_regs[regno])
1848 SET_HARD_REG_BIT (mask, regno);
1849 spill_size += 16;
1850 n_spilled += 1;
1851 spilled_fr_p = 1;
1854 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
1855 if (regs_ever_live[regno] && ! call_used_regs[regno])
1857 SET_HARD_REG_BIT (mask, regno);
1858 spill_size += 8;
1859 n_spilled += 1;
1860 spilled_gr_p = 1;
1863 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
1864 if (regs_ever_live[regno] && ! call_used_regs[regno])
1866 SET_HARD_REG_BIT (mask, regno);
1867 spill_size += 8;
1868 n_spilled += 1;
1871 /* Now come all special registers that might get saved in other
1872 general registers. */
1874 if (frame_pointer_needed)
1876 current_frame_info.reg_fp = find_gr_spill (1);
1877 /* If we did not get a register, then we take LOC79. This is guaranteed
1878 to be free, even if regs_ever_live is already set, because this is
1879 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
1880 as we don't count loc79 above. */
1881 if (current_frame_info.reg_fp == 0)
1883 current_frame_info.reg_fp = LOC_REG (79);
1884 current_frame_info.n_local_regs++;
1888 if (! current_function_is_leaf)
1890 /* Emit a save of BR0 if we call other functions. Do this even
1891 if this function doesn't return, as EH depends on this to be
1892 able to unwind the stack. */
1893 SET_HARD_REG_BIT (mask, BR_REG (0));
1895 current_frame_info.reg_save_b0 = find_gr_spill (1);
1896 if (current_frame_info.reg_save_b0 == 0)
1898 spill_size += 8;
1899 n_spilled += 1;
1902 /* Similarly for ar.pfs. */
1903 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
1904 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
1905 if (current_frame_info.reg_save_ar_pfs == 0)
1907 extra_spill_size += 8;
1908 n_spilled += 1;
1911 /* Similarly for gp. Note that if we're calling setjmp, the stacked
1912 registers are clobbered, so we fall back to the stack. */
1913 current_frame_info.reg_save_gp
1914 = (current_function_calls_setjmp ? 0 : find_gr_spill (1));
1915 if (current_frame_info.reg_save_gp == 0)
1917 SET_HARD_REG_BIT (mask, GR_REG (1));
1918 spill_size += 8;
1919 n_spilled += 1;
1922 else
1924 if (regs_ever_live[BR_REG (0)] && ! call_used_regs[BR_REG (0)])
1926 SET_HARD_REG_BIT (mask, BR_REG (0));
1927 spill_size += 8;
1928 n_spilled += 1;
1931 if (regs_ever_live[AR_PFS_REGNUM])
1933 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
1934 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
1935 if (current_frame_info.reg_save_ar_pfs == 0)
1937 extra_spill_size += 8;
1938 n_spilled += 1;
1943 /* Unwind descriptor hackery: things are most efficient if we allocate
1944 consecutive GR save registers for RP, PFS, FP in that order. However,
1945 it is absolutely critical that FP get the only hard register that's
1946 guaranteed to be free, so we allocated it first. If all three did
1947 happen to be allocated hard regs, and are consecutive, rearrange them
1948 into the preferred order now. */
1949 if (current_frame_info.reg_fp != 0
1950 && current_frame_info.reg_save_b0 == current_frame_info.reg_fp + 1
1951 && current_frame_info.reg_save_ar_pfs == current_frame_info.reg_fp + 2)
1953 current_frame_info.reg_save_b0 = current_frame_info.reg_fp;
1954 current_frame_info.reg_save_ar_pfs = current_frame_info.reg_fp + 1;
1955 current_frame_info.reg_fp = current_frame_info.reg_fp + 2;
1958 /* See if we need to store the predicate register block. */
1959 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
1960 if (regs_ever_live[regno] && ! call_used_regs[regno])
1961 break;
1962 if (regno <= PR_REG (63))
1964 SET_HARD_REG_BIT (mask, PR_REG (0));
1965 current_frame_info.reg_save_pr = find_gr_spill (1);
1966 if (current_frame_info.reg_save_pr == 0)
1968 extra_spill_size += 8;
1969 n_spilled += 1;
1972 /* ??? Mark them all as used so that register renaming and such
1973 are free to use them. */
1974 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
1975 regs_ever_live[regno] = 1;
1978 /* If we're forced to use st8.spill, we're forced to save and restore
1979 ar.unat as well. The check for existing liveness allows inline asm
1980 to touch ar.unat. */
1981 if (spilled_gr_p || cfun->machine->n_varargs
1982 || regs_ever_live[AR_UNAT_REGNUM])
1984 regs_ever_live[AR_UNAT_REGNUM] = 1;
1985 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
1986 current_frame_info.reg_save_ar_unat = find_gr_spill (spill_size == 0);
1987 if (current_frame_info.reg_save_ar_unat == 0)
1989 extra_spill_size += 8;
1990 n_spilled += 1;
1994 if (regs_ever_live[AR_LC_REGNUM])
1996 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
1997 current_frame_info.reg_save_ar_lc = find_gr_spill (spill_size == 0);
1998 if (current_frame_info.reg_save_ar_lc == 0)
2000 extra_spill_size += 8;
2001 n_spilled += 1;
2005 /* If we have an odd number of words of pretend arguments written to
2006 the stack, then the FR save area will be unaligned. We round the
2007 size of this area up to keep things 16 byte aligned. */
2008 if (spilled_fr_p)
2009 pretend_args_size = IA64_STACK_ALIGN (current_function_pretend_args_size);
2010 else
2011 pretend_args_size = current_function_pretend_args_size;
2013 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2014 + current_function_outgoing_args_size);
2015 total_size = IA64_STACK_ALIGN (total_size);
2017 /* We always use the 16-byte scratch area provided by the caller, but
2018 if we are a leaf function, there's no one to which we need to provide
2019 a scratch area. */
2020 if (current_function_is_leaf)
2021 total_size = MAX (0, total_size - 16);
2023 current_frame_info.total_size = total_size;
2024 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2025 current_frame_info.spill_size = spill_size;
2026 current_frame_info.extra_spill_size = extra_spill_size;
2027 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2028 current_frame_info.n_spilled = n_spilled;
2029 current_frame_info.initialized = reload_completed;
2032 /* Compute the initial difference between the specified pair of registers. */
2034 HOST_WIDE_INT
2035 ia64_initial_elimination_offset (int from, int to)
2037 HOST_WIDE_INT offset;
2039 ia64_compute_frame_size (get_frame_size ());
2040 switch (from)
2042 case FRAME_POINTER_REGNUM:
2043 if (to == HARD_FRAME_POINTER_REGNUM)
2045 if (current_function_is_leaf)
2046 offset = -current_frame_info.total_size;
2047 else
2048 offset = -(current_frame_info.total_size
2049 - current_function_outgoing_args_size - 16);
2051 else if (to == STACK_POINTER_REGNUM)
2053 if (current_function_is_leaf)
2054 offset = 0;
2055 else
2056 offset = 16 + current_function_outgoing_args_size;
2058 else
2059 abort ();
2060 break;
2062 case ARG_POINTER_REGNUM:
2063 /* Arguments start above the 16 byte save area, unless stdarg
2064 in which case we store through the 16 byte save area. */
2065 if (to == HARD_FRAME_POINTER_REGNUM)
2066 offset = 16 - current_function_pretend_args_size;
2067 else if (to == STACK_POINTER_REGNUM)
2068 offset = (current_frame_info.total_size
2069 + 16 - current_function_pretend_args_size);
2070 else
2071 abort ();
2072 break;
2074 default:
2075 abort ();
2078 return offset;
2081 /* If there are more than a trivial number of register spills, we use
2082 two interleaved iterators so that we can get two memory references
2083 per insn group.
2085 In order to simplify things in the prologue and epilogue expanders,
2086 we use helper functions to fix up the memory references after the
2087 fact with the appropriate offsets to a POST_MODIFY memory mode.
2088 The following data structure tracks the state of the two iterators
2089 while insns are being emitted. */
2091 struct spill_fill_data
2093 rtx init_after; /* point at which to emit initializations */
2094 rtx init_reg[2]; /* initial base register */
2095 rtx iter_reg[2]; /* the iterator registers */
2096 rtx *prev_addr[2]; /* address of last memory use */
2097 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
2098 HOST_WIDE_INT prev_off[2]; /* last offset */
2099 int n_iter; /* number of iterators in use */
2100 int next_iter; /* next iterator to use */
2101 unsigned int save_gr_used_mask;
2104 static struct spill_fill_data spill_fill_data;
2106 static void
2107 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
2109 int i;
2111 spill_fill_data.init_after = get_last_insn ();
2112 spill_fill_data.init_reg[0] = init_reg;
2113 spill_fill_data.init_reg[1] = init_reg;
2114 spill_fill_data.prev_addr[0] = NULL;
2115 spill_fill_data.prev_addr[1] = NULL;
2116 spill_fill_data.prev_insn[0] = NULL;
2117 spill_fill_data.prev_insn[1] = NULL;
2118 spill_fill_data.prev_off[0] = cfa_off;
2119 spill_fill_data.prev_off[1] = cfa_off;
2120 spill_fill_data.next_iter = 0;
2121 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
2123 spill_fill_data.n_iter = 1 + (n_spills > 2);
2124 for (i = 0; i < spill_fill_data.n_iter; ++i)
2126 int regno = next_scratch_gr_reg ();
2127 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
2128 current_frame_info.gr_used_mask |= 1 << regno;
2132 static void
2133 finish_spill_pointers (void)
2135 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
2138 static rtx
2139 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
2141 int iter = spill_fill_data.next_iter;
2142 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
2143 rtx disp_rtx = GEN_INT (disp);
2144 rtx mem;
2146 if (spill_fill_data.prev_addr[iter])
2148 if (CONST_OK_FOR_N (disp))
2150 *spill_fill_data.prev_addr[iter]
2151 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
2152 gen_rtx_PLUS (DImode,
2153 spill_fill_data.iter_reg[iter],
2154 disp_rtx));
2155 REG_NOTES (spill_fill_data.prev_insn[iter])
2156 = gen_rtx_EXPR_LIST (REG_INC, spill_fill_data.iter_reg[iter],
2157 REG_NOTES (spill_fill_data.prev_insn[iter]));
2159 else
2161 /* ??? Could use register post_modify for loads. */
2162 if (! CONST_OK_FOR_I (disp))
2164 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2165 emit_move_insn (tmp, disp_rtx);
2166 disp_rtx = tmp;
2168 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2169 spill_fill_data.iter_reg[iter], disp_rtx));
2172 /* Micro-optimization: if we've created a frame pointer, it's at
2173 CFA 0, which may allow the real iterator to be initialized lower,
2174 slightly increasing parallelism. Also, if there are few saves
2175 it may eliminate the iterator entirely. */
2176 else if (disp == 0
2177 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
2178 && frame_pointer_needed)
2180 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
2181 set_mem_alias_set (mem, get_varargs_alias_set ());
2182 return mem;
2184 else
2186 rtx seq, insn;
2188 if (disp == 0)
2189 seq = gen_movdi (spill_fill_data.iter_reg[iter],
2190 spill_fill_data.init_reg[iter]);
2191 else
2193 start_sequence ();
2195 if (! CONST_OK_FOR_I (disp))
2197 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2198 emit_move_insn (tmp, disp_rtx);
2199 disp_rtx = tmp;
2202 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2203 spill_fill_data.init_reg[iter],
2204 disp_rtx));
2206 seq = get_insns ();
2207 end_sequence ();
2210 /* Careful for being the first insn in a sequence. */
2211 if (spill_fill_data.init_after)
2212 insn = emit_insn_after (seq, spill_fill_data.init_after);
2213 else
2215 rtx first = get_insns ();
2216 if (first)
2217 insn = emit_insn_before (seq, first);
2218 else
2219 insn = emit_insn (seq);
2221 spill_fill_data.init_after = insn;
2223 /* If DISP is 0, we may or may not have a further adjustment
2224 afterward. If we do, then the load/store insn may be modified
2225 to be a post-modify. If we don't, then this copy may be
2226 eliminated by copyprop_hardreg_forward, which makes this
2227 insn garbage, which runs afoul of the sanity check in
2228 propagate_one_insn. So mark this insn as legal to delete. */
2229 if (disp == 0)
2230 REG_NOTES(insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx,
2231 REG_NOTES (insn));
2234 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
2236 /* ??? Not all of the spills are for varargs, but some of them are.
2237 The rest of the spills belong in an alias set of their own. But
2238 it doesn't actually hurt to include them here. */
2239 set_mem_alias_set (mem, get_varargs_alias_set ());
2241 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
2242 spill_fill_data.prev_off[iter] = cfa_off;
2244 if (++iter >= spill_fill_data.n_iter)
2245 iter = 0;
2246 spill_fill_data.next_iter = iter;
2248 return mem;
2251 static void
2252 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
2253 rtx frame_reg)
2255 int iter = spill_fill_data.next_iter;
2256 rtx mem, insn;
2258 mem = spill_restore_mem (reg, cfa_off);
2259 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
2260 spill_fill_data.prev_insn[iter] = insn;
2262 if (frame_reg)
2264 rtx base;
2265 HOST_WIDE_INT off;
2267 RTX_FRAME_RELATED_P (insn) = 1;
2269 /* Don't even pretend that the unwind code can intuit its way
2270 through a pair of interleaved post_modify iterators. Just
2271 provide the correct answer. */
2273 if (frame_pointer_needed)
2275 base = hard_frame_pointer_rtx;
2276 off = - cfa_off;
2278 else
2280 base = stack_pointer_rtx;
2281 off = current_frame_info.total_size - cfa_off;
2284 REG_NOTES (insn)
2285 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2286 gen_rtx_SET (VOIDmode,
2287 gen_rtx_MEM (GET_MODE (reg),
2288 plus_constant (base, off)),
2289 frame_reg),
2290 REG_NOTES (insn));
2294 static void
2295 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
2297 int iter = spill_fill_data.next_iter;
2298 rtx insn;
2300 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
2301 GEN_INT (cfa_off)));
2302 spill_fill_data.prev_insn[iter] = insn;
2305 /* Wrapper functions that discards the CONST_INT spill offset. These
2306 exist so that we can give gr_spill/gr_fill the offset they need and
2307 use a consistent function interface. */
2309 static rtx
2310 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2312 return gen_movdi (dest, src);
2315 static rtx
2316 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2318 return gen_fr_spill (dest, src);
2321 static rtx
2322 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2324 return gen_fr_restore (dest, src);
2327 /* Called after register allocation to add any instructions needed for the
2328 prologue. Using a prologue insn is favored compared to putting all of the
2329 instructions in output_function_prologue(), since it allows the scheduler
2330 to intermix instructions with the saves of the caller saved registers. In
2331 some cases, it might be necessary to emit a barrier instruction as the last
2332 insn to prevent such scheduling.
2334 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
2335 so that the debug info generation code can handle them properly.
2337 The register save area is layed out like so:
2338 cfa+16
2339 [ varargs spill area ]
2340 [ fr register spill area ]
2341 [ br register spill area ]
2342 [ ar register spill area ]
2343 [ pr register spill area ]
2344 [ gr register spill area ] */
2346 /* ??? Get inefficient code when the frame size is larger than can fit in an
2347 adds instruction. */
2349 void
2350 ia64_expand_prologue (void)
2352 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
2353 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
2354 rtx reg, alt_reg;
2356 ia64_compute_frame_size (get_frame_size ());
2357 last_scratch_gr_reg = 15;
2359 /* If there is no epilogue, then we don't need some prologue insns.
2360 We need to avoid emitting the dead prologue insns, because flow
2361 will complain about them. */
2362 if (optimize)
2364 edge e;
2366 for (e = EXIT_BLOCK_PTR->pred; e ; e = e->pred_next)
2367 if ((e->flags & EDGE_FAKE) == 0
2368 && (e->flags & EDGE_FALLTHRU) != 0)
2369 break;
2370 epilogue_p = (e != NULL);
2372 else
2373 epilogue_p = 1;
2375 /* Set the local, input, and output register names. We need to do this
2376 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
2377 half. If we use in/loc/out register names, then we get assembler errors
2378 in crtn.S because there is no alloc insn or regstk directive in there. */
2379 if (! TARGET_REG_NAMES)
2381 int inputs = current_frame_info.n_input_regs;
2382 int locals = current_frame_info.n_local_regs;
2383 int outputs = current_frame_info.n_output_regs;
2385 for (i = 0; i < inputs; i++)
2386 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
2387 for (i = 0; i < locals; i++)
2388 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
2389 for (i = 0; i < outputs; i++)
2390 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
2393 /* Set the frame pointer register name. The regnum is logically loc79,
2394 but of course we'll not have allocated that many locals. Rather than
2395 worrying about renumbering the existing rtxs, we adjust the name. */
2396 /* ??? This code means that we can never use one local register when
2397 there is a frame pointer. loc79 gets wasted in this case, as it is
2398 renamed to a register that will never be used. See also the try_locals
2399 code in find_gr_spill. */
2400 if (current_frame_info.reg_fp)
2402 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
2403 reg_names[HARD_FRAME_POINTER_REGNUM]
2404 = reg_names[current_frame_info.reg_fp];
2405 reg_names[current_frame_info.reg_fp] = tmp;
2408 /* We don't need an alloc instruction if we've used no outputs or locals. */
2409 if (current_frame_info.n_local_regs == 0
2410 && current_frame_info.n_output_regs == 0
2411 && current_frame_info.n_input_regs <= current_function_args_info.int_regs
2412 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
2414 /* If there is no alloc, but there are input registers used, then we
2415 need a .regstk directive. */
2416 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
2417 ar_pfs_save_reg = NULL_RTX;
2419 else
2421 current_frame_info.need_regstk = 0;
2423 if (current_frame_info.reg_save_ar_pfs)
2424 regno = current_frame_info.reg_save_ar_pfs;
2425 else
2426 regno = next_scratch_gr_reg ();
2427 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
2429 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
2430 GEN_INT (current_frame_info.n_input_regs),
2431 GEN_INT (current_frame_info.n_local_regs),
2432 GEN_INT (current_frame_info.n_output_regs),
2433 GEN_INT (current_frame_info.n_rotate_regs)));
2434 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_pfs != 0);
2437 /* Set up frame pointer, stack pointer, and spill iterators. */
2439 n_varargs = cfun->machine->n_varargs;
2440 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
2441 stack_pointer_rtx, 0);
2443 if (frame_pointer_needed)
2445 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
2446 RTX_FRAME_RELATED_P (insn) = 1;
2449 if (current_frame_info.total_size != 0)
2451 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
2452 rtx offset;
2454 if (CONST_OK_FOR_I (- current_frame_info.total_size))
2455 offset = frame_size_rtx;
2456 else
2458 regno = next_scratch_gr_reg ();
2459 offset = gen_rtx_REG (DImode, regno);
2460 emit_move_insn (offset, frame_size_rtx);
2463 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
2464 stack_pointer_rtx, offset));
2466 if (! frame_pointer_needed)
2468 RTX_FRAME_RELATED_P (insn) = 1;
2469 if (GET_CODE (offset) != CONST_INT)
2471 REG_NOTES (insn)
2472 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2473 gen_rtx_SET (VOIDmode,
2474 stack_pointer_rtx,
2475 gen_rtx_PLUS (DImode,
2476 stack_pointer_rtx,
2477 frame_size_rtx)),
2478 REG_NOTES (insn));
2482 /* ??? At this point we must generate a magic insn that appears to
2483 modify the stack pointer, the frame pointer, and all spill
2484 iterators. This would allow the most scheduling freedom. For
2485 now, just hard stop. */
2486 emit_insn (gen_blockage ());
2489 /* Must copy out ar.unat before doing any integer spills. */
2490 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2492 if (current_frame_info.reg_save_ar_unat)
2493 ar_unat_save_reg
2494 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
2495 else
2497 alt_regno = next_scratch_gr_reg ();
2498 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
2499 current_frame_info.gr_used_mask |= 1 << alt_regno;
2502 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2503 insn = emit_move_insn (ar_unat_save_reg, reg);
2504 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_unat != 0);
2506 /* Even if we're not going to generate an epilogue, we still
2507 need to save the register so that EH works. */
2508 if (! epilogue_p && current_frame_info.reg_save_ar_unat)
2509 emit_insn (gen_prologue_use (ar_unat_save_reg));
2511 else
2512 ar_unat_save_reg = NULL_RTX;
2514 /* Spill all varargs registers. Do this before spilling any GR registers,
2515 since we want the UNAT bits for the GR registers to override the UNAT
2516 bits from varargs, which we don't care about. */
2518 cfa_off = -16;
2519 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
2521 reg = gen_rtx_REG (DImode, regno);
2522 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
2525 /* Locate the bottom of the register save area. */
2526 cfa_off = (current_frame_info.spill_cfa_off
2527 + current_frame_info.spill_size
2528 + current_frame_info.extra_spill_size);
2530 /* Save the predicate register block either in a register or in memory. */
2531 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
2533 reg = gen_rtx_REG (DImode, PR_REG (0));
2534 if (current_frame_info.reg_save_pr != 0)
2536 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
2537 insn = emit_move_insn (alt_reg, reg);
2539 /* ??? Denote pr spill/fill by a DImode move that modifies all
2540 64 hard registers. */
2541 RTX_FRAME_RELATED_P (insn) = 1;
2542 REG_NOTES (insn)
2543 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2544 gen_rtx_SET (VOIDmode, alt_reg, reg),
2545 REG_NOTES (insn));
2547 /* Even if we're not going to generate an epilogue, we still
2548 need to save the register so that EH works. */
2549 if (! epilogue_p)
2550 emit_insn (gen_prologue_use (alt_reg));
2552 else
2554 alt_regno = next_scratch_gr_reg ();
2555 alt_reg = gen_rtx_REG (DImode, alt_regno);
2556 insn = emit_move_insn (alt_reg, reg);
2557 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2558 cfa_off -= 8;
2562 /* Handle AR regs in numerical order. All of them get special handling. */
2563 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
2564 && current_frame_info.reg_save_ar_unat == 0)
2566 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2567 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
2568 cfa_off -= 8;
2571 /* The alloc insn already copied ar.pfs into a general register. The
2572 only thing we have to do now is copy that register to a stack slot
2573 if we'd not allocated a local register for the job. */
2574 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
2575 && current_frame_info.reg_save_ar_pfs == 0)
2577 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2578 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
2579 cfa_off -= 8;
2582 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
2584 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
2585 if (current_frame_info.reg_save_ar_lc != 0)
2587 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
2588 insn = emit_move_insn (alt_reg, reg);
2589 RTX_FRAME_RELATED_P (insn) = 1;
2591 /* Even if we're not going to generate an epilogue, we still
2592 need to save the register so that EH works. */
2593 if (! epilogue_p)
2594 emit_insn (gen_prologue_use (alt_reg));
2596 else
2598 alt_regno = next_scratch_gr_reg ();
2599 alt_reg = gen_rtx_REG (DImode, alt_regno);
2600 emit_move_insn (alt_reg, reg);
2601 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2602 cfa_off -= 8;
2606 if (current_frame_info.reg_save_gp)
2608 insn = emit_move_insn (gen_rtx_REG (DImode,
2609 current_frame_info.reg_save_gp),
2610 pic_offset_table_rtx);
2611 /* We don't know for sure yet if this is actually needed, since
2612 we've not split the PIC call patterns. If all of the calls
2613 are indirect, and not followed by any uses of the gp, then
2614 this save is dead. Allow it to go away. */
2615 REG_NOTES (insn)
2616 = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, REG_NOTES (insn));
2619 /* We should now be at the base of the gr/br/fr spill area. */
2620 if (cfa_off != (current_frame_info.spill_cfa_off
2621 + current_frame_info.spill_size))
2622 abort ();
2624 /* Spill all general registers. */
2625 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
2626 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2628 reg = gen_rtx_REG (DImode, regno);
2629 do_spill (gen_gr_spill, reg, cfa_off, reg);
2630 cfa_off -= 8;
2633 /* Handle BR0 specially -- it may be getting stored permanently in
2634 some GR register. */
2635 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
2637 reg = gen_rtx_REG (DImode, BR_REG (0));
2638 if (current_frame_info.reg_save_b0 != 0)
2640 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
2641 insn = emit_move_insn (alt_reg, reg);
2642 RTX_FRAME_RELATED_P (insn) = 1;
2644 /* Even if we're not going to generate an epilogue, we still
2645 need to save the register so that EH works. */
2646 if (! epilogue_p)
2647 emit_insn (gen_prologue_use (alt_reg));
2649 else
2651 alt_regno = next_scratch_gr_reg ();
2652 alt_reg = gen_rtx_REG (DImode, alt_regno);
2653 emit_move_insn (alt_reg, reg);
2654 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2655 cfa_off -= 8;
2659 /* Spill the rest of the BR registers. */
2660 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
2661 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2663 alt_regno = next_scratch_gr_reg ();
2664 alt_reg = gen_rtx_REG (DImode, alt_regno);
2665 reg = gen_rtx_REG (DImode, regno);
2666 emit_move_insn (alt_reg, reg);
2667 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2668 cfa_off -= 8;
2671 /* Align the frame and spill all FR registers. */
2672 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
2673 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2675 if (cfa_off & 15)
2676 abort ();
2677 reg = gen_rtx_REG (TFmode, regno);
2678 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
2679 cfa_off -= 16;
2682 if (cfa_off != current_frame_info.spill_cfa_off)
2683 abort ();
2685 finish_spill_pointers ();
2688 /* Called after register allocation to add any instructions needed for the
2689 epilogue. Using an epilogue insn is favored compared to putting all of the
2690 instructions in output_function_prologue(), since it allows the scheduler
2691 to intermix instructions with the saves of the caller saved registers. In
2692 some cases, it might be necessary to emit a barrier instruction as the last
2693 insn to prevent such scheduling. */
2695 void
2696 ia64_expand_epilogue (int sibcall_p)
2698 rtx insn, reg, alt_reg, ar_unat_save_reg;
2699 int regno, alt_regno, cfa_off;
2701 ia64_compute_frame_size (get_frame_size ());
2703 /* If there is a frame pointer, then we use it instead of the stack
2704 pointer, so that the stack pointer does not need to be valid when
2705 the epilogue starts. See EXIT_IGNORE_STACK. */
2706 if (frame_pointer_needed)
2707 setup_spill_pointers (current_frame_info.n_spilled,
2708 hard_frame_pointer_rtx, 0);
2709 else
2710 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
2711 current_frame_info.total_size);
2713 if (current_frame_info.total_size != 0)
2715 /* ??? At this point we must generate a magic insn that appears to
2716 modify the spill iterators and the frame pointer. This would
2717 allow the most scheduling freedom. For now, just hard stop. */
2718 emit_insn (gen_blockage ());
2721 /* Locate the bottom of the register save area. */
2722 cfa_off = (current_frame_info.spill_cfa_off
2723 + current_frame_info.spill_size
2724 + current_frame_info.extra_spill_size);
2726 /* Restore the predicate registers. */
2727 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
2729 if (current_frame_info.reg_save_pr != 0)
2730 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
2731 else
2733 alt_regno = next_scratch_gr_reg ();
2734 alt_reg = gen_rtx_REG (DImode, alt_regno);
2735 do_restore (gen_movdi_x, alt_reg, cfa_off);
2736 cfa_off -= 8;
2738 reg = gen_rtx_REG (DImode, PR_REG (0));
2739 emit_move_insn (reg, alt_reg);
2742 /* Restore the application registers. */
2744 /* Load the saved unat from the stack, but do not restore it until
2745 after the GRs have been restored. */
2746 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2748 if (current_frame_info.reg_save_ar_unat != 0)
2749 ar_unat_save_reg
2750 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
2751 else
2753 alt_regno = next_scratch_gr_reg ();
2754 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
2755 current_frame_info.gr_used_mask |= 1 << alt_regno;
2756 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
2757 cfa_off -= 8;
2760 else
2761 ar_unat_save_reg = NULL_RTX;
2763 if (current_frame_info.reg_save_ar_pfs != 0)
2765 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_pfs);
2766 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2767 emit_move_insn (reg, alt_reg);
2769 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
2771 alt_regno = next_scratch_gr_reg ();
2772 alt_reg = gen_rtx_REG (DImode, alt_regno);
2773 do_restore (gen_movdi_x, alt_reg, cfa_off);
2774 cfa_off -= 8;
2775 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2776 emit_move_insn (reg, alt_reg);
2779 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
2781 if (current_frame_info.reg_save_ar_lc != 0)
2782 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
2783 else
2785 alt_regno = next_scratch_gr_reg ();
2786 alt_reg = gen_rtx_REG (DImode, alt_regno);
2787 do_restore (gen_movdi_x, alt_reg, cfa_off);
2788 cfa_off -= 8;
2790 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
2791 emit_move_insn (reg, alt_reg);
2794 /* We should now be at the base of the gr/br/fr spill area. */
2795 if (cfa_off != (current_frame_info.spill_cfa_off
2796 + current_frame_info.spill_size))
2797 abort ();
2799 /* The GP may be stored on the stack in the prologue, but it's
2800 never restored in the epilogue. Skip the stack slot. */
2801 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
2802 cfa_off -= 8;
2804 /* Restore all general registers. */
2805 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
2806 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2808 reg = gen_rtx_REG (DImode, regno);
2809 do_restore (gen_gr_restore, reg, cfa_off);
2810 cfa_off -= 8;
2813 /* Restore the branch registers. Handle B0 specially, as it may
2814 have gotten stored in some GR register. */
2815 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
2817 if (current_frame_info.reg_save_b0 != 0)
2818 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
2819 else
2821 alt_regno = next_scratch_gr_reg ();
2822 alt_reg = gen_rtx_REG (DImode, alt_regno);
2823 do_restore (gen_movdi_x, alt_reg, cfa_off);
2824 cfa_off -= 8;
2826 reg = gen_rtx_REG (DImode, BR_REG (0));
2827 emit_move_insn (reg, alt_reg);
2830 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
2831 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2833 alt_regno = next_scratch_gr_reg ();
2834 alt_reg = gen_rtx_REG (DImode, alt_regno);
2835 do_restore (gen_movdi_x, alt_reg, cfa_off);
2836 cfa_off -= 8;
2837 reg = gen_rtx_REG (DImode, regno);
2838 emit_move_insn (reg, alt_reg);
2841 /* Restore floating point registers. */
2842 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
2843 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2845 if (cfa_off & 15)
2846 abort ();
2847 reg = gen_rtx_REG (TFmode, regno);
2848 do_restore (gen_fr_restore_x, reg, cfa_off);
2849 cfa_off -= 16;
2852 /* Restore ar.unat for real. */
2853 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2855 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2856 emit_move_insn (reg, ar_unat_save_reg);
2859 if (cfa_off != current_frame_info.spill_cfa_off)
2860 abort ();
2862 finish_spill_pointers ();
2864 if (current_frame_info.total_size || cfun->machine->ia64_eh_epilogue_sp)
2866 /* ??? At this point we must generate a magic insn that appears to
2867 modify the spill iterators, the stack pointer, and the frame
2868 pointer. This would allow the most scheduling freedom. For now,
2869 just hard stop. */
2870 emit_insn (gen_blockage ());
2873 if (cfun->machine->ia64_eh_epilogue_sp)
2874 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
2875 else if (frame_pointer_needed)
2877 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
2878 RTX_FRAME_RELATED_P (insn) = 1;
2880 else if (current_frame_info.total_size)
2882 rtx offset, frame_size_rtx;
2884 frame_size_rtx = GEN_INT (current_frame_info.total_size);
2885 if (CONST_OK_FOR_I (current_frame_info.total_size))
2886 offset = frame_size_rtx;
2887 else
2889 regno = next_scratch_gr_reg ();
2890 offset = gen_rtx_REG (DImode, regno);
2891 emit_move_insn (offset, frame_size_rtx);
2894 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
2895 offset));
2897 RTX_FRAME_RELATED_P (insn) = 1;
2898 if (GET_CODE (offset) != CONST_INT)
2900 REG_NOTES (insn)
2901 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2902 gen_rtx_SET (VOIDmode,
2903 stack_pointer_rtx,
2904 gen_rtx_PLUS (DImode,
2905 stack_pointer_rtx,
2906 frame_size_rtx)),
2907 REG_NOTES (insn));
2911 if (cfun->machine->ia64_eh_epilogue_bsp)
2912 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
2914 if (! sibcall_p)
2915 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
2916 else
2918 int fp = GR_REG (2);
2919 /* We need a throw away register here, r0 and r1 are reserved, so r2 is the
2920 first available call clobbered register. If there was a frame_pointer
2921 register, we may have swapped the names of r2 and HARD_FRAME_POINTER_REGNUM,
2922 so we have to make sure we're using the string "r2" when emitting
2923 the register name for the assembler. */
2924 if (current_frame_info.reg_fp && current_frame_info.reg_fp == GR_REG (2))
2925 fp = HARD_FRAME_POINTER_REGNUM;
2927 /* We must emit an alloc to force the input registers to become output
2928 registers. Otherwise, if the callee tries to pass its parameters
2929 through to another call without an intervening alloc, then these
2930 values get lost. */
2931 /* ??? We don't need to preserve all input registers. We only need to
2932 preserve those input registers used as arguments to the sibling call.
2933 It is unclear how to compute that number here. */
2934 if (current_frame_info.n_input_regs != 0)
2935 emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
2936 GEN_INT (0), GEN_INT (0),
2937 GEN_INT (current_frame_info.n_input_regs),
2938 GEN_INT (0)));
2942 /* Return 1 if br.ret can do all the work required to return from a
2943 function. */
2946 ia64_direct_return (void)
2948 if (reload_completed && ! frame_pointer_needed)
2950 ia64_compute_frame_size (get_frame_size ());
2952 return (current_frame_info.total_size == 0
2953 && current_frame_info.n_spilled == 0
2954 && current_frame_info.reg_save_b0 == 0
2955 && current_frame_info.reg_save_pr == 0
2956 && current_frame_info.reg_save_ar_pfs == 0
2957 && current_frame_info.reg_save_ar_unat == 0
2958 && current_frame_info.reg_save_ar_lc == 0);
2960 return 0;
2963 /* Return the magic cookie that we use to hold the return address
2964 during early compilation. */
2967 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
2969 if (count != 0)
2970 return NULL;
2971 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
2974 /* Split this value after reload, now that we know where the return
2975 address is saved. */
2977 void
2978 ia64_split_return_addr_rtx (rtx dest)
2980 rtx src;
2982 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
2984 if (current_frame_info.reg_save_b0 != 0)
2985 src = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
2986 else
2988 HOST_WIDE_INT off;
2989 unsigned int regno;
2991 /* Compute offset from CFA for BR0. */
2992 /* ??? Must be kept in sync with ia64_expand_prologue. */
2993 off = (current_frame_info.spill_cfa_off
2994 + current_frame_info.spill_size);
2995 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
2996 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2997 off -= 8;
2999 /* Convert CFA offset to a register based offset. */
3000 if (frame_pointer_needed)
3001 src = hard_frame_pointer_rtx;
3002 else
3004 src = stack_pointer_rtx;
3005 off += current_frame_info.total_size;
3008 /* Load address into scratch register. */
3009 if (CONST_OK_FOR_I (off))
3010 emit_insn (gen_adddi3 (dest, src, GEN_INT (off)));
3011 else
3013 emit_move_insn (dest, GEN_INT (off));
3014 emit_insn (gen_adddi3 (dest, src, dest));
3017 src = gen_rtx_MEM (Pmode, dest);
3020 else
3021 src = gen_rtx_REG (DImode, BR_REG (0));
3023 emit_move_insn (dest, src);
3027 ia64_hard_regno_rename_ok (int from, int to)
3029 /* Don't clobber any of the registers we reserved for the prologue. */
3030 if (to == current_frame_info.reg_fp
3031 || to == current_frame_info.reg_save_b0
3032 || to == current_frame_info.reg_save_pr
3033 || to == current_frame_info.reg_save_ar_pfs
3034 || to == current_frame_info.reg_save_ar_unat
3035 || to == current_frame_info.reg_save_ar_lc)
3036 return 0;
3038 if (from == current_frame_info.reg_fp
3039 || from == current_frame_info.reg_save_b0
3040 || from == current_frame_info.reg_save_pr
3041 || from == current_frame_info.reg_save_ar_pfs
3042 || from == current_frame_info.reg_save_ar_unat
3043 || from == current_frame_info.reg_save_ar_lc)
3044 return 0;
3046 /* Don't use output registers outside the register frame. */
3047 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
3048 return 0;
3050 /* Retain even/oddness on predicate register pairs. */
3051 if (PR_REGNO_P (from) && PR_REGNO_P (to))
3052 return (from & 1) == (to & 1);
3054 return 1;
3057 /* Target hook for assembling integer objects. Handle word-sized
3058 aligned objects and detect the cases when @fptr is needed. */
3060 static bool
3061 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
3063 if (size == (TARGET_ILP32 ? 4 : 8)
3064 && aligned_p
3065 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
3066 && GET_CODE (x) == SYMBOL_REF
3067 && SYMBOL_REF_FUNCTION_P (x))
3069 if (TARGET_ILP32)
3070 fputs ("\tdata4\t@fptr(", asm_out_file);
3071 else
3072 fputs ("\tdata8\t@fptr(", asm_out_file);
3073 output_addr_const (asm_out_file, x);
3074 fputs (")\n", asm_out_file);
3075 return true;
3077 return default_assemble_integer (x, size, aligned_p);
3080 /* Emit the function prologue. */
3082 static void
3083 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3085 int mask, grsave, grsave_prev;
3087 if (current_frame_info.need_regstk)
3088 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
3089 current_frame_info.n_input_regs,
3090 current_frame_info.n_local_regs,
3091 current_frame_info.n_output_regs,
3092 current_frame_info.n_rotate_regs);
3094 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3095 return;
3097 /* Emit the .prologue directive. */
3099 mask = 0;
3100 grsave = grsave_prev = 0;
3101 if (current_frame_info.reg_save_b0 != 0)
3103 mask |= 8;
3104 grsave = grsave_prev = current_frame_info.reg_save_b0;
3106 if (current_frame_info.reg_save_ar_pfs != 0
3107 && (grsave_prev == 0
3108 || current_frame_info.reg_save_ar_pfs == grsave_prev + 1))
3110 mask |= 4;
3111 if (grsave_prev == 0)
3112 grsave = current_frame_info.reg_save_ar_pfs;
3113 grsave_prev = current_frame_info.reg_save_ar_pfs;
3115 if (current_frame_info.reg_fp != 0
3116 && (grsave_prev == 0
3117 || current_frame_info.reg_fp == grsave_prev + 1))
3119 mask |= 2;
3120 if (grsave_prev == 0)
3121 grsave = HARD_FRAME_POINTER_REGNUM;
3122 grsave_prev = current_frame_info.reg_fp;
3124 if (current_frame_info.reg_save_pr != 0
3125 && (grsave_prev == 0
3126 || current_frame_info.reg_save_pr == grsave_prev + 1))
3128 mask |= 1;
3129 if (grsave_prev == 0)
3130 grsave = current_frame_info.reg_save_pr;
3133 if (mask)
3134 fprintf (file, "\t.prologue %d, %d\n", mask,
3135 ia64_dbx_register_number (grsave));
3136 else
3137 fputs ("\t.prologue\n", file);
3139 /* Emit a .spill directive, if necessary, to relocate the base of
3140 the register spill area. */
3141 if (current_frame_info.spill_cfa_off != -16)
3142 fprintf (file, "\t.spill %ld\n",
3143 (long) (current_frame_info.spill_cfa_off
3144 + current_frame_info.spill_size));
3147 /* Emit the .body directive at the scheduled end of the prologue. */
3149 static void
3150 ia64_output_function_end_prologue (FILE *file)
3152 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3153 return;
3155 fputs ("\t.body\n", file);
3158 /* Emit the function epilogue. */
3160 static void
3161 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
3162 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3164 int i;
3166 if (current_frame_info.reg_fp)
3168 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3169 reg_names[HARD_FRAME_POINTER_REGNUM]
3170 = reg_names[current_frame_info.reg_fp];
3171 reg_names[current_frame_info.reg_fp] = tmp;
3173 if (! TARGET_REG_NAMES)
3175 for (i = 0; i < current_frame_info.n_input_regs; i++)
3176 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
3177 for (i = 0; i < current_frame_info.n_local_regs; i++)
3178 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
3179 for (i = 0; i < current_frame_info.n_output_regs; i++)
3180 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
3183 current_frame_info.initialized = 0;
3187 ia64_dbx_register_number (int regno)
3189 /* In ia64_expand_prologue we quite literally renamed the frame pointer
3190 from its home at loc79 to something inside the register frame. We
3191 must perform the same renumbering here for the debug info. */
3192 if (current_frame_info.reg_fp)
3194 if (regno == HARD_FRAME_POINTER_REGNUM)
3195 regno = current_frame_info.reg_fp;
3196 else if (regno == current_frame_info.reg_fp)
3197 regno = HARD_FRAME_POINTER_REGNUM;
3200 if (IN_REGNO_P (regno))
3201 return 32 + regno - IN_REG (0);
3202 else if (LOC_REGNO_P (regno))
3203 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
3204 else if (OUT_REGNO_P (regno))
3205 return (32 + current_frame_info.n_input_regs
3206 + current_frame_info.n_local_regs + regno - OUT_REG (0));
3207 else
3208 return regno;
3211 void
3212 ia64_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
3214 rtx addr_reg, eight = GEN_INT (8);
3216 /* Load up our iterator. */
3217 addr_reg = gen_reg_rtx (Pmode);
3218 emit_move_insn (addr_reg, addr);
3220 /* The first two words are the fake descriptor:
3221 __ia64_trampoline, ADDR+16. */
3222 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3223 gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline"));
3224 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3226 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3227 copy_to_reg (plus_constant (addr, 16)));
3228 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3230 /* The third word is the target descriptor. */
3231 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), fnaddr);
3232 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3234 /* The fourth word is the static chain. */
3235 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), static_chain);
3238 /* Do any needed setup for a variadic function. CUM has not been updated
3239 for the last named argument which has type TYPE and mode MODE.
3241 We generate the actual spill instructions during prologue generation. */
3243 void
3244 ia64_setup_incoming_varargs (CUMULATIVE_ARGS cum, int int_mode, tree type,
3245 int * pretend_size,
3246 int second_time ATTRIBUTE_UNUSED)
3248 /* Skip the current argument. */
3249 ia64_function_arg_advance (&cum, int_mode, type, 1);
3251 if (cum.words < MAX_ARGUMENT_SLOTS)
3253 int n = MAX_ARGUMENT_SLOTS - cum.words;
3254 *pretend_size = n * UNITS_PER_WORD;
3255 cfun->machine->n_varargs = n;
3259 /* Check whether TYPE is a homogeneous floating point aggregate. If
3260 it is, return the mode of the floating point type that appears
3261 in all leafs. If it is not, return VOIDmode.
3263 An aggregate is a homogeneous floating point aggregate is if all
3264 fields/elements in it have the same floating point type (e.g,
3265 SFmode). 128-bit quad-precision floats are excluded. */
3267 static enum machine_mode
3268 hfa_element_mode (tree type, int nested)
3270 enum machine_mode element_mode = VOIDmode;
3271 enum machine_mode mode;
3272 enum tree_code code = TREE_CODE (type);
3273 int know_element_mode = 0;
3274 tree t;
3276 switch (code)
3278 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
3279 case BOOLEAN_TYPE: case CHAR_TYPE: case POINTER_TYPE:
3280 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
3281 case FILE_TYPE: case SET_TYPE: case LANG_TYPE:
3282 case FUNCTION_TYPE:
3283 return VOIDmode;
3285 /* Fortran complex types are supposed to be HFAs, so we need to handle
3286 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
3287 types though. */
3288 case COMPLEX_TYPE:
3289 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
3290 && (TYPE_MODE (type) != TCmode || INTEL_EXTENDED_IEEE_FORMAT))
3291 return mode_for_size (GET_MODE_UNIT_SIZE (TYPE_MODE (type))
3292 * BITS_PER_UNIT, MODE_FLOAT, 0);
3293 else
3294 return VOIDmode;
3296 case REAL_TYPE:
3297 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
3298 mode if this is contained within an aggregate. */
3299 if (nested && (TYPE_MODE (type) != TFmode || INTEL_EXTENDED_IEEE_FORMAT))
3300 return TYPE_MODE (type);
3301 else
3302 return VOIDmode;
3304 case ARRAY_TYPE:
3305 return hfa_element_mode (TREE_TYPE (type), 1);
3307 case RECORD_TYPE:
3308 case UNION_TYPE:
3309 case QUAL_UNION_TYPE:
3310 for (t = TYPE_FIELDS (type); t; t = TREE_CHAIN (t))
3312 if (TREE_CODE (t) != FIELD_DECL)
3313 continue;
3315 mode = hfa_element_mode (TREE_TYPE (t), 1);
3316 if (know_element_mode)
3318 if (mode != element_mode)
3319 return VOIDmode;
3321 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
3322 return VOIDmode;
3323 else
3325 know_element_mode = 1;
3326 element_mode = mode;
3329 return element_mode;
3331 default:
3332 /* If we reach here, we probably have some front-end specific type
3333 that the backend doesn't know about. This can happen via the
3334 aggregate_value_p call in init_function_start. All we can do is
3335 ignore unknown tree types. */
3336 return VOIDmode;
3339 return VOIDmode;
3342 /* Return rtx for register where argument is passed, or zero if it is passed
3343 on the stack. */
3345 /* ??? 128-bit quad-precision floats are always passed in general
3346 registers. */
3349 ia64_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
3350 int named, int incoming)
3352 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
3353 int words = (((mode == BLKmode ? int_size_in_bytes (type)
3354 : GET_MODE_SIZE (mode)) + UNITS_PER_WORD - 1)
3355 / UNITS_PER_WORD);
3356 int offset = 0;
3357 enum machine_mode hfa_mode = VOIDmode;
3359 /* Integer and float arguments larger than 8 bytes start at the next even
3360 boundary. Aggregates larger than 8 bytes start at the next even boundary
3361 if the aggregate has 16 byte alignment. Net effect is that types with
3362 alignment greater than 8 start at the next even boundary. */
3363 /* ??? The ABI does not specify how to handle aggregates with alignment from
3364 9 to 15 bytes, or greater than 16. We handle them all as if they had
3365 16 byte alignment. Such aggregates can occur only if gcc extensions are
3366 used. */
3367 if ((type ? (TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
3368 : (words > 1))
3369 && (cum->words & 1))
3370 offset = 1;
3372 /* If all argument slots are used, then it must go on the stack. */
3373 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
3374 return 0;
3376 /* Check for and handle homogeneous FP aggregates. */
3377 if (type)
3378 hfa_mode = hfa_element_mode (type, 0);
3380 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
3381 and unprototyped hfas are passed specially. */
3382 if (hfa_mode != VOIDmode && (! cum->prototype || named))
3384 rtx loc[16];
3385 int i = 0;
3386 int fp_regs = cum->fp_regs;
3387 int int_regs = cum->words + offset;
3388 int hfa_size = GET_MODE_SIZE (hfa_mode);
3389 int byte_size;
3390 int args_byte_size;
3392 /* If prototyped, pass it in FR regs then GR regs.
3393 If not prototyped, pass it in both FR and GR regs.
3395 If this is an SFmode aggregate, then it is possible to run out of
3396 FR regs while GR regs are still left. In that case, we pass the
3397 remaining part in the GR regs. */
3399 /* Fill the FP regs. We do this always. We stop if we reach the end
3400 of the argument, the last FP register, or the last argument slot. */
3402 byte_size = ((mode == BLKmode)
3403 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3404 args_byte_size = int_regs * UNITS_PER_WORD;
3405 offset = 0;
3406 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
3407 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
3409 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3410 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
3411 + fp_regs)),
3412 GEN_INT (offset));
3413 offset += hfa_size;
3414 args_byte_size += hfa_size;
3415 fp_regs++;
3418 /* If no prototype, then the whole thing must go in GR regs. */
3419 if (! cum->prototype)
3420 offset = 0;
3421 /* If this is an SFmode aggregate, then we might have some left over
3422 that needs to go in GR regs. */
3423 else if (byte_size != offset)
3424 int_regs += offset / UNITS_PER_WORD;
3426 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
3428 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
3430 enum machine_mode gr_mode = DImode;
3432 /* If we have an odd 4 byte hunk because we ran out of FR regs,
3433 then this goes in a GR reg left adjusted/little endian, right
3434 adjusted/big endian. */
3435 /* ??? Currently this is handled wrong, because 4-byte hunks are
3436 always right adjusted/little endian. */
3437 if (offset & 0x4)
3438 gr_mode = SImode;
3439 /* If we have an even 4 byte hunk because the aggregate is a
3440 multiple of 4 bytes in size, then this goes in a GR reg right
3441 adjusted/little endian. */
3442 else if (byte_size - offset == 4)
3443 gr_mode = SImode;
3444 /* Complex floats need to have float mode. */
3445 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
3446 gr_mode = hfa_mode;
3448 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3449 gen_rtx_REG (gr_mode, (basereg
3450 + int_regs)),
3451 GEN_INT (offset));
3452 offset += GET_MODE_SIZE (gr_mode);
3453 int_regs += GET_MODE_SIZE (gr_mode) <= UNITS_PER_WORD
3454 ? 1 : GET_MODE_SIZE (gr_mode) / UNITS_PER_WORD;
3457 /* If we ended up using just one location, just return that one loc. */
3458 if (i == 1)
3459 return XEXP (loc[0], 0);
3460 else
3461 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
3464 /* Integral and aggregates go in general registers. If we have run out of
3465 FR registers, then FP values must also go in general registers. This can
3466 happen when we have a SFmode HFA. */
3467 else if (((mode == TFmode) && ! INTEL_EXTENDED_IEEE_FORMAT)
3468 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
3470 int byte_size = ((mode == BLKmode)
3471 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3472 if (BYTES_BIG_ENDIAN
3473 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
3474 && byte_size < UNITS_PER_WORD
3475 && byte_size > 0)
3477 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
3478 gen_rtx_REG (DImode,
3479 (basereg + cum->words
3480 + offset)),
3481 const0_rtx);
3482 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
3484 else
3485 return gen_rtx_REG (mode, basereg + cum->words + offset);
3489 /* If there is a prototype, then FP values go in a FR register when
3490 named, and in a GR register when unnamed. */
3491 else if (cum->prototype)
3493 if (! named)
3494 return gen_rtx_REG (mode, basereg + cum->words + offset);
3495 else
3496 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
3498 /* If there is no prototype, then FP values go in both FR and GR
3499 registers. */
3500 else
3502 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
3503 gen_rtx_REG (mode, (FR_ARG_FIRST
3504 + cum->fp_regs)),
3505 const0_rtx);
3506 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
3507 gen_rtx_REG (mode,
3508 (basereg + cum->words
3509 + offset)),
3510 const0_rtx);
3512 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
3516 /* Return number of words, at the beginning of the argument, that must be
3517 put in registers. 0 is the argument is entirely in registers or entirely
3518 in memory. */
3521 ia64_function_arg_partial_nregs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3522 tree type, int named ATTRIBUTE_UNUSED)
3524 int words = (((mode == BLKmode ? int_size_in_bytes (type)
3525 : GET_MODE_SIZE (mode)) + UNITS_PER_WORD - 1)
3526 / UNITS_PER_WORD);
3527 int offset = 0;
3529 /* Arguments with alignment larger than 8 bytes start at the next even
3530 boundary. */
3531 if ((type ? (TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
3532 : (words > 1))
3533 && (cum->words & 1))
3534 offset = 1;
3536 /* If all argument slots are used, then it must go on the stack. */
3537 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
3538 return 0;
3540 /* It doesn't matter whether the argument goes in FR or GR regs. If
3541 it fits within the 8 argument slots, then it goes entirely in
3542 registers. If it extends past the last argument slot, then the rest
3543 goes on the stack. */
3545 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
3546 return 0;
3548 return MAX_ARGUMENT_SLOTS - cum->words - offset;
3551 /* Update CUM to point after this argument. This is patterned after
3552 ia64_function_arg. */
3554 void
3555 ia64_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3556 tree type, int named)
3558 int words = (((mode == BLKmode ? int_size_in_bytes (type)
3559 : GET_MODE_SIZE (mode)) + UNITS_PER_WORD - 1)
3560 / UNITS_PER_WORD);
3561 int offset = 0;
3562 enum machine_mode hfa_mode = VOIDmode;
3564 /* If all arg slots are already full, then there is nothing to do. */
3565 if (cum->words >= MAX_ARGUMENT_SLOTS)
3566 return;
3568 /* Arguments with alignment larger than 8 bytes start at the next even
3569 boundary. */
3570 if ((type ? (TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
3571 : (words > 1))
3572 && (cum->words & 1))
3573 offset = 1;
3575 cum->words += words + offset;
3577 /* Check for and handle homogeneous FP aggregates. */
3578 if (type)
3579 hfa_mode = hfa_element_mode (type, 0);
3581 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
3582 and unprototyped hfas are passed specially. */
3583 if (hfa_mode != VOIDmode && (! cum->prototype || named))
3585 int fp_regs = cum->fp_regs;
3586 /* This is the original value of cum->words + offset. */
3587 int int_regs = cum->words - words;
3588 int hfa_size = GET_MODE_SIZE (hfa_mode);
3589 int byte_size;
3590 int args_byte_size;
3592 /* If prototyped, pass it in FR regs then GR regs.
3593 If not prototyped, pass it in both FR and GR regs.
3595 If this is an SFmode aggregate, then it is possible to run out of
3596 FR regs while GR regs are still left. In that case, we pass the
3597 remaining part in the GR regs. */
3599 /* Fill the FP regs. We do this always. We stop if we reach the end
3600 of the argument, the last FP register, or the last argument slot. */
3602 byte_size = ((mode == BLKmode)
3603 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3604 args_byte_size = int_regs * UNITS_PER_WORD;
3605 offset = 0;
3606 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
3607 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
3609 offset += hfa_size;
3610 args_byte_size += hfa_size;
3611 fp_regs++;
3614 cum->fp_regs = fp_regs;
3617 /* Integral and aggregates go in general registers. If we have run out of
3618 FR registers, then FP values must also go in general registers. This can
3619 happen when we have a SFmode HFA. */
3620 else if (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS)
3621 cum->int_regs = cum->words;
3623 /* If there is a prototype, then FP values go in a FR register when
3624 named, and in a GR register when unnamed. */
3625 else if (cum->prototype)
3627 if (! named)
3628 cum->int_regs = cum->words;
3629 else
3630 /* ??? Complex types should not reach here. */
3631 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
3633 /* If there is no prototype, then FP values go in both FR and GR
3634 registers. */
3635 else
3637 /* ??? Complex types should not reach here. */
3638 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
3639 cum->int_regs = cum->words;
3643 /* Variable sized types are passed by reference. */
3644 /* ??? At present this is a GCC extension to the IA-64 ABI. */
3647 ia64_function_arg_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
3648 enum machine_mode mode ATTRIBUTE_UNUSED,
3649 tree type, int named ATTRIBUTE_UNUSED)
3651 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
3654 /* True if it is OK to do sibling call optimization for the specified
3655 call expression EXP. DECL will be the called function, or NULL if
3656 this is an indirect call. */
3657 static bool
3658 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
3660 /* We must always return with our current GP. This means we can
3661 only sibcall to functions defined in the current module. */
3662 return decl && (*targetm.binds_local_p) (decl);
3666 /* Implement va_arg. */
3669 ia64_va_arg (tree valist, tree type)
3671 tree t;
3673 /* Variable sized types are passed by reference. */
3674 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
3676 rtx addr = std_expand_builtin_va_arg (valist, build_pointer_type (type));
3677 return gen_rtx_MEM (ptr_mode, force_reg (Pmode, addr));
3680 /* Arguments with alignment larger than 8 bytes start at the next even
3681 boundary. */
3682 if (TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
3684 t = build (PLUS_EXPR, TREE_TYPE (valist), valist,
3685 build_int_2 (2 * UNITS_PER_WORD - 1, 0));
3686 t = build (BIT_AND_EXPR, TREE_TYPE (t), t,
3687 build_int_2 (-2 * UNITS_PER_WORD, -1));
3688 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
3689 TREE_SIDE_EFFECTS (t) = 1;
3690 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3693 return std_expand_builtin_va_arg (valist, type);
3696 /* Return 1 if function return value returned in memory. Return 0 if it is
3697 in a register. */
3700 ia64_return_in_memory (tree valtype)
3702 enum machine_mode mode;
3703 enum machine_mode hfa_mode;
3704 HOST_WIDE_INT byte_size;
3706 mode = TYPE_MODE (valtype);
3707 byte_size = GET_MODE_SIZE (mode);
3708 if (mode == BLKmode)
3710 byte_size = int_size_in_bytes (valtype);
3711 if (byte_size < 0)
3712 return 1;
3715 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
3717 hfa_mode = hfa_element_mode (valtype, 0);
3718 if (hfa_mode != VOIDmode)
3720 int hfa_size = GET_MODE_SIZE (hfa_mode);
3722 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
3723 return 1;
3724 else
3725 return 0;
3727 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
3728 return 1;
3729 else
3730 return 0;
3733 /* Return rtx for register that holds the function return value. */
3736 ia64_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
3738 enum machine_mode mode;
3739 enum machine_mode hfa_mode;
3741 mode = TYPE_MODE (valtype);
3742 hfa_mode = hfa_element_mode (valtype, 0);
3744 if (hfa_mode != VOIDmode)
3746 rtx loc[8];
3747 int i;
3748 int hfa_size;
3749 int byte_size;
3750 int offset;
3752 hfa_size = GET_MODE_SIZE (hfa_mode);
3753 byte_size = ((mode == BLKmode)
3754 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
3755 offset = 0;
3756 for (i = 0; offset < byte_size; i++)
3758 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3759 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
3760 GEN_INT (offset));
3761 offset += hfa_size;
3764 if (i == 1)
3765 return XEXP (loc[0], 0);
3766 else
3767 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
3769 else if (FLOAT_TYPE_P (valtype) &&
3770 ((mode != TFmode) || INTEL_EXTENDED_IEEE_FORMAT))
3771 return gen_rtx_REG (mode, FR_ARG_FIRST);
3772 else
3774 if (BYTES_BIG_ENDIAN
3775 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
3777 rtx loc[8];
3778 int offset;
3779 int bytesize;
3780 int i;
3782 offset = 0;
3783 bytesize = int_size_in_bytes (valtype);
3784 for (i = 0; offset < bytesize; i++)
3786 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3787 gen_rtx_REG (DImode,
3788 GR_RET_FIRST + i),
3789 GEN_INT (offset));
3790 offset += UNITS_PER_WORD;
3792 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
3794 else
3795 return gen_rtx_REG (mode, GR_RET_FIRST);
3799 /* This is called from dwarf2out.c via ASM_OUTPUT_DWARF_DTPREL.
3800 We need to emit DTP-relative relocations. */
3802 void
3803 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
3805 if (size != 8)
3806 abort ();
3807 fputs ("\tdata8.ua\t@dtprel(", file);
3808 output_addr_const (file, x);
3809 fputs (")", file);
3812 /* Print a memory address as an operand to reference that memory location. */
3814 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
3815 also call this from ia64_print_operand for memory addresses. */
3817 void
3818 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
3819 rtx address ATTRIBUTE_UNUSED)
3823 /* Print an operand to an assembler instruction.
3824 C Swap and print a comparison operator.
3825 D Print an FP comparison operator.
3826 E Print 32 - constant, for SImode shifts as extract.
3827 e Print 64 - constant, for DImode rotates.
3828 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
3829 a floating point register emitted normally.
3830 I Invert a predicate register by adding 1.
3831 J Select the proper predicate register for a condition.
3832 j Select the inverse predicate register for a condition.
3833 O Append .acq for volatile load.
3834 P Postincrement of a MEM.
3835 Q Append .rel for volatile store.
3836 S Shift amount for shladd instruction.
3837 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
3838 for Intel assembler.
3839 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
3840 for Intel assembler.
3841 r Print register name, or constant 0 as r0. HP compatibility for
3842 Linux kernel. */
3843 void
3844 ia64_print_operand (FILE * file, rtx x, int code)
3846 const char *str;
3848 switch (code)
3850 case 0:
3851 /* Handled below. */
3852 break;
3854 case 'C':
3856 enum rtx_code c = swap_condition (GET_CODE (x));
3857 fputs (GET_RTX_NAME (c), file);
3858 return;
3861 case 'D':
3862 switch (GET_CODE (x))
3864 case NE:
3865 str = "neq";
3866 break;
3867 case UNORDERED:
3868 str = "unord";
3869 break;
3870 case ORDERED:
3871 str = "ord";
3872 break;
3873 default:
3874 str = GET_RTX_NAME (GET_CODE (x));
3875 break;
3877 fputs (str, file);
3878 return;
3880 case 'E':
3881 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
3882 return;
3884 case 'e':
3885 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
3886 return;
3888 case 'F':
3889 if (x == CONST0_RTX (GET_MODE (x)))
3890 str = reg_names [FR_REG (0)];
3891 else if (x == CONST1_RTX (GET_MODE (x)))
3892 str = reg_names [FR_REG (1)];
3893 else if (GET_CODE (x) == REG)
3894 str = reg_names [REGNO (x)];
3895 else
3896 abort ();
3897 fputs (str, file);
3898 return;
3900 case 'I':
3901 fputs (reg_names [REGNO (x) + 1], file);
3902 return;
3904 case 'J':
3905 case 'j':
3907 unsigned int regno = REGNO (XEXP (x, 0));
3908 if (GET_CODE (x) == EQ)
3909 regno += 1;
3910 if (code == 'j')
3911 regno ^= 1;
3912 fputs (reg_names [regno], file);
3914 return;
3916 case 'O':
3917 if (MEM_VOLATILE_P (x))
3918 fputs(".acq", file);
3919 return;
3921 case 'P':
3923 HOST_WIDE_INT value;
3925 switch (GET_CODE (XEXP (x, 0)))
3927 default:
3928 return;
3930 case POST_MODIFY:
3931 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
3932 if (GET_CODE (x) == CONST_INT)
3933 value = INTVAL (x);
3934 else if (GET_CODE (x) == REG)
3936 fprintf (file, ", %s", reg_names[REGNO (x)]);
3937 return;
3939 else
3940 abort ();
3941 break;
3943 case POST_INC:
3944 value = GET_MODE_SIZE (GET_MODE (x));
3945 break;
3947 case POST_DEC:
3948 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
3949 break;
3952 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
3953 return;
3956 case 'Q':
3957 if (MEM_VOLATILE_P (x))
3958 fputs(".rel", file);
3959 return;
3961 case 'S':
3962 fprintf (file, "%d", exact_log2 (INTVAL (x)));
3963 return;
3965 case 'T':
3966 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
3968 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
3969 return;
3971 break;
3973 case 'U':
3974 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
3976 const char *prefix = "0x";
3977 if (INTVAL (x) & 0x80000000)
3979 fprintf (file, "0xffffffff");
3980 prefix = "";
3982 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
3983 return;
3985 break;
3987 case 'r':
3988 /* If this operand is the constant zero, write it as register zero.
3989 Any register, zero, or CONST_INT value is OK here. */
3990 if (GET_CODE (x) == REG)
3991 fputs (reg_names[REGNO (x)], file);
3992 else if (x == CONST0_RTX (GET_MODE (x)))
3993 fputs ("r0", file);
3994 else if (GET_CODE (x) == CONST_INT)
3995 output_addr_const (file, x);
3996 else
3997 output_operand_lossage ("invalid %%r value");
3998 return;
4000 case '+':
4002 const char *which;
4004 /* For conditional branches, returns or calls, substitute
4005 sptk, dptk, dpnt, or spnt for %s. */
4006 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
4007 if (x)
4009 int pred_val = INTVAL (XEXP (x, 0));
4011 /* Guess top and bottom 10% statically predicted. */
4012 if (pred_val < REG_BR_PROB_BASE / 50)
4013 which = ".spnt";
4014 else if (pred_val < REG_BR_PROB_BASE / 2)
4015 which = ".dpnt";
4016 else if (pred_val < REG_BR_PROB_BASE / 100 * 98)
4017 which = ".dptk";
4018 else
4019 which = ".sptk";
4021 else if (GET_CODE (current_output_insn) == CALL_INSN)
4022 which = ".sptk";
4023 else
4024 which = ".dptk";
4026 fputs (which, file);
4027 return;
4030 case ',':
4031 x = current_insn_predicate;
4032 if (x)
4034 unsigned int regno = REGNO (XEXP (x, 0));
4035 if (GET_CODE (x) == EQ)
4036 regno += 1;
4037 fprintf (file, "(%s) ", reg_names [regno]);
4039 return;
4041 default:
4042 output_operand_lossage ("ia64_print_operand: unknown code");
4043 return;
4046 switch (GET_CODE (x))
4048 /* This happens for the spill/restore instructions. */
4049 case POST_INC:
4050 case POST_DEC:
4051 case POST_MODIFY:
4052 x = XEXP (x, 0);
4053 /* ... fall through ... */
4055 case REG:
4056 fputs (reg_names [REGNO (x)], file);
4057 break;
4059 case MEM:
4061 rtx addr = XEXP (x, 0);
4062 if (GET_RTX_CLASS (GET_CODE (addr)) == 'a')
4063 addr = XEXP (addr, 0);
4064 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
4065 break;
4068 default:
4069 output_addr_const (file, x);
4070 break;
4073 return;
4076 /* Compute a (partial) cost for rtx X. Return true if the complete
4077 cost has been computed, and false if subexpressions should be
4078 scanned. In either case, *TOTAL contains the cost result. */
4079 /* ??? This is incomplete. */
4081 static bool
4082 ia64_rtx_costs (rtx x, int code, int outer_code, int *total)
4084 switch (code)
4086 case CONST_INT:
4087 switch (outer_code)
4089 case SET:
4090 *total = CONST_OK_FOR_J (INTVAL (x)) ? 0 : COSTS_N_INSNS (1);
4091 return true;
4092 case PLUS:
4093 if (CONST_OK_FOR_I (INTVAL (x)))
4094 *total = 0;
4095 else if (CONST_OK_FOR_J (INTVAL (x)))
4096 *total = 1;
4097 else
4098 *total = COSTS_N_INSNS (1);
4099 return true;
4100 default:
4101 if (CONST_OK_FOR_K (INTVAL (x)) || CONST_OK_FOR_L (INTVAL (x)))
4102 *total = 0;
4103 else
4104 *total = COSTS_N_INSNS (1);
4105 return true;
4108 case CONST_DOUBLE:
4109 *total = COSTS_N_INSNS (1);
4110 return true;
4112 case CONST:
4113 case SYMBOL_REF:
4114 case LABEL_REF:
4115 *total = COSTS_N_INSNS (3);
4116 return true;
4118 case MULT:
4119 /* For multiplies wider than HImode, we have to go to the FPU,
4120 which normally involves copies. Plus there's the latency
4121 of the multiply itself, and the latency of the instructions to
4122 transfer integer regs to FP regs. */
4123 /* ??? Check for FP mode. */
4124 if (GET_MODE_SIZE (GET_MODE (x)) > 2)
4125 *total = COSTS_N_INSNS (10);
4126 else
4127 *total = COSTS_N_INSNS (2);
4128 return true;
4130 case PLUS:
4131 case MINUS:
4132 case ASHIFT:
4133 case ASHIFTRT:
4134 case LSHIFTRT:
4135 *total = COSTS_N_INSNS (1);
4136 return true;
4138 case DIV:
4139 case UDIV:
4140 case MOD:
4141 case UMOD:
4142 /* We make divide expensive, so that divide-by-constant will be
4143 optimized to a multiply. */
4144 *total = COSTS_N_INSNS (60);
4145 return true;
4147 default:
4148 return false;
4152 /* Calculate the cost of moving data from a register in class FROM to
4153 one in class TO, using MODE. */
4156 ia64_register_move_cost (enum machine_mode mode, enum reg_class from,
4157 enum reg_class to)
4159 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
4160 if (to == ADDL_REGS)
4161 to = GR_REGS;
4162 if (from == ADDL_REGS)
4163 from = GR_REGS;
4165 /* All costs are symmetric, so reduce cases by putting the
4166 lower number class as the destination. */
4167 if (from < to)
4169 enum reg_class tmp = to;
4170 to = from, from = tmp;
4173 /* Moving from FR<->GR in TFmode must be more expensive than 2,
4174 so that we get secondary memory reloads. Between FR_REGS,
4175 we have to make this at least as expensive as MEMORY_MOVE_COST
4176 to avoid spectacularly poor register class preferencing. */
4177 if (mode == TFmode)
4179 if (to != GR_REGS || from != GR_REGS)
4180 return MEMORY_MOVE_COST (mode, to, 0);
4181 else
4182 return 3;
4185 switch (to)
4187 case PR_REGS:
4188 /* Moving between PR registers takes two insns. */
4189 if (from == PR_REGS)
4190 return 3;
4191 /* Moving between PR and anything but GR is impossible. */
4192 if (from != GR_REGS)
4193 return MEMORY_MOVE_COST (mode, to, 0);
4194 break;
4196 case BR_REGS:
4197 /* Moving between BR and anything but GR is impossible. */
4198 if (from != GR_REGS && from != GR_AND_BR_REGS)
4199 return MEMORY_MOVE_COST (mode, to, 0);
4200 break;
4202 case AR_I_REGS:
4203 case AR_M_REGS:
4204 /* Moving between AR and anything but GR is impossible. */
4205 if (from != GR_REGS)
4206 return MEMORY_MOVE_COST (mode, to, 0);
4207 break;
4209 case GR_REGS:
4210 case FR_REGS:
4211 case GR_AND_FR_REGS:
4212 case GR_AND_BR_REGS:
4213 case ALL_REGS:
4214 break;
4216 default:
4217 abort ();
4220 return 2;
4223 /* This function returns the register class required for a secondary
4224 register when copying between one of the registers in CLASS, and X,
4225 using MODE. A return value of NO_REGS means that no secondary register
4226 is required. */
4228 enum reg_class
4229 ia64_secondary_reload_class (enum reg_class class,
4230 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
4232 int regno = -1;
4234 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
4235 regno = true_regnum (x);
4237 switch (class)
4239 case BR_REGS:
4240 case AR_M_REGS:
4241 case AR_I_REGS:
4242 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
4243 interaction. We end up with two pseudos with overlapping lifetimes
4244 both of which are equiv to the same constant, and both which need
4245 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
4246 changes depending on the path length, which means the qty_first_reg
4247 check in make_regs_eqv can give different answers at different times.
4248 At some point I'll probably need a reload_indi pattern to handle
4249 this.
4251 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
4252 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
4253 non-general registers for good measure. */
4254 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
4255 return GR_REGS;
4257 /* This is needed if a pseudo used as a call_operand gets spilled to a
4258 stack slot. */
4259 if (GET_CODE (x) == MEM)
4260 return GR_REGS;
4261 break;
4263 case FR_REGS:
4264 /* Need to go through general registers to get to other class regs. */
4265 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
4266 return GR_REGS;
4268 /* This can happen when a paradoxical subreg is an operand to the
4269 muldi3 pattern. */
4270 /* ??? This shouldn't be necessary after instruction scheduling is
4271 enabled, because paradoxical subregs are not accepted by
4272 register_operand when INSN_SCHEDULING is defined. Or alternatively,
4273 stop the paradoxical subreg stupidity in the *_operand functions
4274 in recog.c. */
4275 if (GET_CODE (x) == MEM
4276 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
4277 || GET_MODE (x) == QImode))
4278 return GR_REGS;
4280 /* This can happen because of the ior/and/etc patterns that accept FP
4281 registers as operands. If the third operand is a constant, then it
4282 needs to be reloaded into a FP register. */
4283 if (GET_CODE (x) == CONST_INT)
4284 return GR_REGS;
4286 /* This can happen because of register elimination in a muldi3 insn.
4287 E.g. `26107 * (unsigned long)&u'. */
4288 if (GET_CODE (x) == PLUS)
4289 return GR_REGS;
4290 break;
4292 case PR_REGS:
4293 /* ??? This happens if we cse/gcse a BImode value across a call,
4294 and the function has a nonlocal goto. This is because global
4295 does not allocate call crossing pseudos to hard registers when
4296 current_function_has_nonlocal_goto is true. This is relatively
4297 common for C++ programs that use exceptions. To reproduce,
4298 return NO_REGS and compile libstdc++. */
4299 if (GET_CODE (x) == MEM)
4300 return GR_REGS;
4302 /* This can happen when we take a BImode subreg of a DImode value,
4303 and that DImode value winds up in some non-GR register. */
4304 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
4305 return GR_REGS;
4306 break;
4308 case GR_REGS:
4309 /* Since we have no offsettable memory addresses, we need a temporary
4310 to hold the address of the second word. */
4311 if (mode == TImode)
4312 return GR_REGS;
4313 break;
4315 default:
4316 break;
4319 return NO_REGS;
4323 /* Emit text to declare externally defined variables and functions, because
4324 the Intel assembler does not support undefined externals. */
4326 void
4327 ia64_asm_output_external (FILE *file, tree decl, const char *name)
4329 int save_referenced;
4331 /* GNU as does not need anything here, but the HP linker does need
4332 something for external functions. */
4334 if (TARGET_GNU_AS
4335 && (!TARGET_HPUX_LD
4336 || TREE_CODE (decl) != FUNCTION_DECL
4337 || strstr(name, "__builtin_") == name))
4338 return;
4340 /* ??? The Intel assembler creates a reference that needs to be satisfied by
4341 the linker when we do this, so we need to be careful not to do this for
4342 builtin functions which have no library equivalent. Unfortunately, we
4343 can't tell here whether or not a function will actually be called by
4344 expand_expr, so we pull in library functions even if we may not need
4345 them later. */
4346 if (! strcmp (name, "__builtin_next_arg")
4347 || ! strcmp (name, "alloca")
4348 || ! strcmp (name, "__builtin_constant_p")
4349 || ! strcmp (name, "__builtin_args_info"))
4350 return;
4352 if (TARGET_HPUX_LD)
4353 ia64_hpux_add_extern_decl (name);
4354 else
4356 /* assemble_name will set TREE_SYMBOL_REFERENCED, so we must save and
4357 restore it. */
4358 save_referenced = TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl));
4359 if (TREE_CODE (decl) == FUNCTION_DECL)
4360 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
4361 (*targetm.asm_out.globalize_label) (file, name);
4362 TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)) = save_referenced;
4366 /* Parse the -mfixed-range= option string. */
4368 static void
4369 fix_range (const char *const_str)
4371 int i, first, last;
4372 char *str, *dash, *comma;
4374 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
4375 REG2 are either register names or register numbers. The effect
4376 of this option is to mark the registers in the range from REG1 to
4377 REG2 as ``fixed'' so they won't be used by the compiler. This is
4378 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
4380 i = strlen (const_str);
4381 str = (char *) alloca (i + 1);
4382 memcpy (str, const_str, i + 1);
4384 while (1)
4386 dash = strchr (str, '-');
4387 if (!dash)
4389 warning ("value of -mfixed-range must have form REG1-REG2");
4390 return;
4392 *dash = '\0';
4394 comma = strchr (dash + 1, ',');
4395 if (comma)
4396 *comma = '\0';
4398 first = decode_reg_name (str);
4399 if (first < 0)
4401 warning ("unknown register name: %s", str);
4402 return;
4405 last = decode_reg_name (dash + 1);
4406 if (last < 0)
4408 warning ("unknown register name: %s", dash + 1);
4409 return;
4412 *dash = '-';
4414 if (first > last)
4416 warning ("%s-%s is an empty range", str, dash + 1);
4417 return;
4420 for (i = first; i <= last; ++i)
4421 fixed_regs[i] = call_used_regs[i] = 1;
4423 if (!comma)
4424 break;
4426 *comma = ',';
4427 str = comma + 1;
4431 static struct machine_function *
4432 ia64_init_machine_status (void)
4434 return ggc_alloc_cleared (sizeof (struct machine_function));
4437 /* Handle TARGET_OPTIONS switches. */
4439 void
4440 ia64_override_options (void)
4442 static struct pta
4444 const char *const name; /* processor name or nickname. */
4445 const enum processor_type processor;
4447 const processor_alias_table[] =
4449 {"itanium", PROCESSOR_ITANIUM},
4450 {"itanium1", PROCESSOR_ITANIUM},
4451 {"merced", PROCESSOR_ITANIUM},
4452 {"itanium2", PROCESSOR_ITANIUM2},
4453 {"mckinley", PROCESSOR_ITANIUM2},
4456 int const pta_size = ARRAY_SIZE (processor_alias_table);
4457 int i;
4459 if (TARGET_AUTO_PIC)
4460 target_flags |= MASK_CONST_GP;
4462 if (TARGET_INLINE_FLOAT_DIV_LAT && TARGET_INLINE_FLOAT_DIV_THR)
4464 warning ("cannot optimize floating point division for both latency and throughput");
4465 target_flags &= ~MASK_INLINE_FLOAT_DIV_THR;
4468 if (TARGET_INLINE_INT_DIV_LAT && TARGET_INLINE_INT_DIV_THR)
4470 warning ("cannot optimize integer division for both latency and throughput");
4471 target_flags &= ~MASK_INLINE_INT_DIV_THR;
4474 if (ia64_fixed_range_string)
4475 fix_range (ia64_fixed_range_string);
4477 if (ia64_tls_size_string)
4479 char *end;
4480 unsigned long tmp = strtoul (ia64_tls_size_string, &end, 10);
4481 if (*end || (tmp != 14 && tmp != 22 && tmp != 64))
4482 error ("bad value (%s) for -mtls-size= switch", ia64_tls_size_string);
4483 else
4484 ia64_tls_size = tmp;
4487 if (!ia64_tune_string)
4488 ia64_tune_string = "itanium2";
4490 for (i = 0; i < pta_size; i++)
4491 if (! strcmp (ia64_tune_string, processor_alias_table[i].name))
4493 ia64_tune = processor_alias_table[i].processor;
4494 break;
4497 if (i == pta_size)
4498 error ("bad value (%s) for -tune= switch", ia64_tune_string);
4500 ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
4501 flag_schedule_insns_after_reload = 0;
4503 ia64_section_threshold = g_switch_set ? g_switch_value : IA64_DEFAULT_GVALUE;
4505 init_machine_status = ia64_init_machine_status;
4507 /* Tell the compiler which flavor of TFmode we're using. */
4508 if (INTEL_EXTENDED_IEEE_FORMAT)
4509 real_format_for_mode[TFmode - QFmode] = &ieee_extended_intel_128_format;
4512 static enum attr_itanium_class ia64_safe_itanium_class (rtx);
4513 static enum attr_type ia64_safe_type (rtx);
4515 static enum attr_itanium_class
4516 ia64_safe_itanium_class (rtx insn)
4518 if (recog_memoized (insn) >= 0)
4519 return get_attr_itanium_class (insn);
4520 else
4521 return ITANIUM_CLASS_UNKNOWN;
4524 static enum attr_type
4525 ia64_safe_type (rtx insn)
4527 if (recog_memoized (insn) >= 0)
4528 return get_attr_type (insn);
4529 else
4530 return TYPE_UNKNOWN;
4533 /* The following collection of routines emit instruction group stop bits as
4534 necessary to avoid dependencies. */
4536 /* Need to track some additional registers as far as serialization is
4537 concerned so we can properly handle br.call and br.ret. We could
4538 make these registers visible to gcc, but since these registers are
4539 never explicitly used in gcc generated code, it seems wasteful to
4540 do so (plus it would make the call and return patterns needlessly
4541 complex). */
4542 #define REG_GP (GR_REG (1))
4543 #define REG_RP (BR_REG (0))
4544 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
4545 /* This is used for volatile asms which may require a stop bit immediately
4546 before and after them. */
4547 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
4548 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
4549 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
4551 /* For each register, we keep track of how it has been written in the
4552 current instruction group.
4554 If a register is written unconditionally (no qualifying predicate),
4555 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
4557 If a register is written if its qualifying predicate P is true, we
4558 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
4559 may be written again by the complement of P (P^1) and when this happens,
4560 WRITE_COUNT gets set to 2.
4562 The result of this is that whenever an insn attempts to write a register
4563 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
4565 If a predicate register is written by a floating-point insn, we set
4566 WRITTEN_BY_FP to true.
4568 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
4569 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
4571 struct reg_write_state
4573 unsigned int write_count : 2;
4574 unsigned int first_pred : 16;
4575 unsigned int written_by_fp : 1;
4576 unsigned int written_by_and : 1;
4577 unsigned int written_by_or : 1;
4580 /* Cumulative info for the current instruction group. */
4581 struct reg_write_state rws_sum[NUM_REGS];
4582 /* Info for the current instruction. This gets copied to rws_sum after a
4583 stop bit is emitted. */
4584 struct reg_write_state rws_insn[NUM_REGS];
4586 /* Indicates whether this is the first instruction after a stop bit,
4587 in which case we don't need another stop bit. Without this, we hit
4588 the abort in ia64_variable_issue when scheduling an alloc. */
4589 static int first_instruction;
4591 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
4592 RTL for one instruction. */
4593 struct reg_flags
4595 unsigned int is_write : 1; /* Is register being written? */
4596 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
4597 unsigned int is_branch : 1; /* Is register used as part of a branch? */
4598 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
4599 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
4600 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
4603 static void rws_update (struct reg_write_state *, int, struct reg_flags, int);
4604 static int rws_access_regno (int, struct reg_flags, int);
4605 static int rws_access_reg (rtx, struct reg_flags, int);
4606 static void update_set_flags (rtx, struct reg_flags *, int *, rtx *);
4607 static int set_src_needs_barrier (rtx, struct reg_flags, int, rtx);
4608 static int rtx_needs_barrier (rtx, struct reg_flags, int);
4609 static void init_insn_group_barriers (void);
4610 static int group_barrier_needed_p (rtx);
4611 static int safe_group_barrier_needed_p (rtx);
4613 /* Update *RWS for REGNO, which is being written by the current instruction,
4614 with predicate PRED, and associated register flags in FLAGS. */
4616 static void
4617 rws_update (struct reg_write_state *rws, int regno, struct reg_flags flags, int pred)
4619 if (pred)
4620 rws[regno].write_count++;
4621 else
4622 rws[regno].write_count = 2;
4623 rws[regno].written_by_fp |= flags.is_fp;
4624 /* ??? Not tracking and/or across differing predicates. */
4625 rws[regno].written_by_and = flags.is_and;
4626 rws[regno].written_by_or = flags.is_or;
4627 rws[regno].first_pred = pred;
4630 /* Handle an access to register REGNO of type FLAGS using predicate register
4631 PRED. Update rws_insn and rws_sum arrays. Return 1 if this access creates
4632 a dependency with an earlier instruction in the same group. */
4634 static int
4635 rws_access_regno (int regno, struct reg_flags flags, int pred)
4637 int need_barrier = 0;
4639 if (regno >= NUM_REGS)
4640 abort ();
4642 if (! PR_REGNO_P (regno))
4643 flags.is_and = flags.is_or = 0;
4645 if (flags.is_write)
4647 int write_count;
4649 /* One insn writes same reg multiple times? */
4650 if (rws_insn[regno].write_count > 0)
4651 abort ();
4653 /* Update info for current instruction. */
4654 rws_update (rws_insn, regno, flags, pred);
4655 write_count = rws_sum[regno].write_count;
4657 switch (write_count)
4659 case 0:
4660 /* The register has not been written yet. */
4661 rws_update (rws_sum, regno, flags, pred);
4662 break;
4664 case 1:
4665 /* The register has been written via a predicate. If this is
4666 not a complementary predicate, then we need a barrier. */
4667 /* ??? This assumes that P and P+1 are always complementary
4668 predicates for P even. */
4669 if (flags.is_and && rws_sum[regno].written_by_and)
4671 else if (flags.is_or && rws_sum[regno].written_by_or)
4673 else if ((rws_sum[regno].first_pred ^ 1) != pred)
4674 need_barrier = 1;
4675 rws_update (rws_sum, regno, flags, pred);
4676 break;
4678 case 2:
4679 /* The register has been unconditionally written already. We
4680 need a barrier. */
4681 if (flags.is_and && rws_sum[regno].written_by_and)
4683 else if (flags.is_or && rws_sum[regno].written_by_or)
4685 else
4686 need_barrier = 1;
4687 rws_sum[regno].written_by_and = flags.is_and;
4688 rws_sum[regno].written_by_or = flags.is_or;
4689 break;
4691 default:
4692 abort ();
4695 else
4697 if (flags.is_branch)
4699 /* Branches have several RAW exceptions that allow to avoid
4700 barriers. */
4702 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
4703 /* RAW dependencies on branch regs are permissible as long
4704 as the writer is a non-branch instruction. Since we
4705 never generate code that uses a branch register written
4706 by a branch instruction, handling this case is
4707 easy. */
4708 return 0;
4710 if (REGNO_REG_CLASS (regno) == PR_REGS
4711 && ! rws_sum[regno].written_by_fp)
4712 /* The predicates of a branch are available within the
4713 same insn group as long as the predicate was written by
4714 something other than a floating-point instruction. */
4715 return 0;
4718 if (flags.is_and && rws_sum[regno].written_by_and)
4719 return 0;
4720 if (flags.is_or && rws_sum[regno].written_by_or)
4721 return 0;
4723 switch (rws_sum[regno].write_count)
4725 case 0:
4726 /* The register has not been written yet. */
4727 break;
4729 case 1:
4730 /* The register has been written via a predicate. If this is
4731 not a complementary predicate, then we need a barrier. */
4732 /* ??? This assumes that P and P+1 are always complementary
4733 predicates for P even. */
4734 if ((rws_sum[regno].first_pred ^ 1) != pred)
4735 need_barrier = 1;
4736 break;
4738 case 2:
4739 /* The register has been unconditionally written already. We
4740 need a barrier. */
4741 need_barrier = 1;
4742 break;
4744 default:
4745 abort ();
4749 return need_barrier;
4752 static int
4753 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
4755 int regno = REGNO (reg);
4756 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
4758 if (n == 1)
4759 return rws_access_regno (regno, flags, pred);
4760 else
4762 int need_barrier = 0;
4763 while (--n >= 0)
4764 need_barrier |= rws_access_regno (regno + n, flags, pred);
4765 return need_barrier;
4769 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
4770 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
4772 static void
4773 update_set_flags (rtx x, struct reg_flags *pflags, int *ppred, rtx *pcond)
4775 rtx src = SET_SRC (x);
4777 *pcond = 0;
4779 switch (GET_CODE (src))
4781 case CALL:
4782 return;
4784 case IF_THEN_ELSE:
4785 if (SET_DEST (x) == pc_rtx)
4786 /* X is a conditional branch. */
4787 return;
4788 else
4790 int is_complemented = 0;
4792 /* X is a conditional move. */
4793 rtx cond = XEXP (src, 0);
4794 if (GET_CODE (cond) == EQ)
4795 is_complemented = 1;
4796 cond = XEXP (cond, 0);
4797 if (GET_CODE (cond) != REG
4798 && REGNO_REG_CLASS (REGNO (cond)) != PR_REGS)
4799 abort ();
4800 *pcond = cond;
4801 if (XEXP (src, 1) == SET_DEST (x)
4802 || XEXP (src, 2) == SET_DEST (x))
4804 /* X is a conditional move that conditionally writes the
4805 destination. */
4807 /* We need another complement in this case. */
4808 if (XEXP (src, 1) == SET_DEST (x))
4809 is_complemented = ! is_complemented;
4811 *ppred = REGNO (cond);
4812 if (is_complemented)
4813 ++*ppred;
4816 /* ??? If this is a conditional write to the dest, then this
4817 instruction does not actually read one source. This probably
4818 doesn't matter, because that source is also the dest. */
4819 /* ??? Multiple writes to predicate registers are allowed
4820 if they are all AND type compares, or if they are all OR
4821 type compares. We do not generate such instructions
4822 currently. */
4824 /* ... fall through ... */
4826 default:
4827 if (GET_RTX_CLASS (GET_CODE (src)) == '<'
4828 && GET_MODE_CLASS (GET_MODE (XEXP (src, 0))) == MODE_FLOAT)
4829 /* Set pflags->is_fp to 1 so that we know we're dealing
4830 with a floating point comparison when processing the
4831 destination of the SET. */
4832 pflags->is_fp = 1;
4834 /* Discover if this is a parallel comparison. We only handle
4835 and.orcm and or.andcm at present, since we must retain a
4836 strict inverse on the predicate pair. */
4837 else if (GET_CODE (src) == AND)
4838 pflags->is_and = 1;
4839 else if (GET_CODE (src) == IOR)
4840 pflags->is_or = 1;
4842 break;
4846 /* Subroutine of rtx_needs_barrier; this function determines whether the
4847 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
4848 are as in rtx_needs_barrier. COND is an rtx that holds the condition
4849 for this insn. */
4851 static int
4852 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred, rtx cond)
4854 int need_barrier = 0;
4855 rtx dst;
4856 rtx src = SET_SRC (x);
4858 if (GET_CODE (src) == CALL)
4859 /* We don't need to worry about the result registers that
4860 get written by subroutine call. */
4861 return rtx_needs_barrier (src, flags, pred);
4862 else if (SET_DEST (x) == pc_rtx)
4864 /* X is a conditional branch. */
4865 /* ??? This seems redundant, as the caller sets this bit for
4866 all JUMP_INSNs. */
4867 flags.is_branch = 1;
4868 return rtx_needs_barrier (src, flags, pred);
4871 need_barrier = rtx_needs_barrier (src, flags, pred);
4873 /* This instruction unconditionally uses a predicate register. */
4874 if (cond)
4875 need_barrier |= rws_access_reg (cond, flags, 0);
4877 dst = SET_DEST (x);
4878 if (GET_CODE (dst) == ZERO_EXTRACT)
4880 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
4881 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
4882 dst = XEXP (dst, 0);
4884 return need_barrier;
4887 /* Handle an access to rtx X of type FLAGS using predicate register PRED.
4888 Return 1 is this access creates a dependency with an earlier instruction
4889 in the same group. */
4891 static int
4892 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
4894 int i, j;
4895 int is_complemented = 0;
4896 int need_barrier = 0;
4897 const char *format_ptr;
4898 struct reg_flags new_flags;
4899 rtx cond = 0;
4901 if (! x)
4902 return 0;
4904 new_flags = flags;
4906 switch (GET_CODE (x))
4908 case SET:
4909 update_set_flags (x, &new_flags, &pred, &cond);
4910 need_barrier = set_src_needs_barrier (x, new_flags, pred, cond);
4911 if (GET_CODE (SET_SRC (x)) != CALL)
4913 new_flags.is_write = 1;
4914 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
4916 break;
4918 case CALL:
4919 new_flags.is_write = 0;
4920 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
4922 /* Avoid multiple register writes, in case this is a pattern with
4923 multiple CALL rtx. This avoids an abort in rws_access_reg. */
4924 if (! flags.is_sibcall && ! rws_insn[REG_AR_CFM].write_count)
4926 new_flags.is_write = 1;
4927 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
4928 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
4929 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
4931 break;
4933 case COND_EXEC:
4934 /* X is a predicated instruction. */
4936 cond = COND_EXEC_TEST (x);
4937 if (pred)
4938 abort ();
4939 need_barrier = rtx_needs_barrier (cond, flags, 0);
4941 if (GET_CODE (cond) == EQ)
4942 is_complemented = 1;
4943 cond = XEXP (cond, 0);
4944 if (GET_CODE (cond) != REG
4945 && REGNO_REG_CLASS (REGNO (cond)) != PR_REGS)
4946 abort ();
4947 pred = REGNO (cond);
4948 if (is_complemented)
4949 ++pred;
4951 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
4952 return need_barrier;
4954 case CLOBBER:
4955 case USE:
4956 /* Clobber & use are for earlier compiler-phases only. */
4957 break;
4959 case ASM_OPERANDS:
4960 case ASM_INPUT:
4961 /* We always emit stop bits for traditional asms. We emit stop bits
4962 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
4963 if (GET_CODE (x) != ASM_OPERANDS
4964 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
4966 /* Avoid writing the register multiple times if we have multiple
4967 asm outputs. This avoids an abort in rws_access_reg. */
4968 if (! rws_insn[REG_VOLATILE].write_count)
4970 new_flags.is_write = 1;
4971 rws_access_regno (REG_VOLATILE, new_flags, pred);
4973 return 1;
4976 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
4977 We can not just fall through here since then we would be confused
4978 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
4979 traditional asms unlike their normal usage. */
4981 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
4982 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
4983 need_barrier = 1;
4984 break;
4986 case PARALLEL:
4987 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
4989 rtx pat = XVECEXP (x, 0, i);
4990 if (GET_CODE (pat) == SET)
4992 update_set_flags (pat, &new_flags, &pred, &cond);
4993 need_barrier |= set_src_needs_barrier (pat, new_flags, pred, cond);
4995 else if (GET_CODE (pat) == USE
4996 || GET_CODE (pat) == CALL
4997 || GET_CODE (pat) == ASM_OPERANDS)
4998 need_barrier |= rtx_needs_barrier (pat, flags, pred);
4999 else if (GET_CODE (pat) != CLOBBER && GET_CODE (pat) != RETURN)
5000 abort ();
5002 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5004 rtx pat = XVECEXP (x, 0, i);
5005 if (GET_CODE (pat) == SET)
5007 if (GET_CODE (SET_SRC (pat)) != CALL)
5009 new_flags.is_write = 1;
5010 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
5011 pred);
5014 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
5015 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5017 break;
5019 case SUBREG:
5020 x = SUBREG_REG (x);
5021 /* FALLTHRU */
5022 case REG:
5023 if (REGNO (x) == AR_UNAT_REGNUM)
5025 for (i = 0; i < 64; ++i)
5026 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
5028 else
5029 need_barrier = rws_access_reg (x, flags, pred);
5030 break;
5032 case MEM:
5033 /* Find the regs used in memory address computation. */
5034 new_flags.is_write = 0;
5035 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5036 break;
5038 case CONST_INT: case CONST_DOUBLE:
5039 case SYMBOL_REF: case LABEL_REF: case CONST:
5040 break;
5042 /* Operators with side-effects. */
5043 case POST_INC: case POST_DEC:
5044 if (GET_CODE (XEXP (x, 0)) != REG)
5045 abort ();
5047 new_flags.is_write = 0;
5048 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5049 new_flags.is_write = 1;
5050 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5051 break;
5053 case POST_MODIFY:
5054 if (GET_CODE (XEXP (x, 0)) != REG)
5055 abort ();
5057 new_flags.is_write = 0;
5058 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5059 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5060 new_flags.is_write = 1;
5061 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5062 break;
5064 /* Handle common unary and binary ops for efficiency. */
5065 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
5066 case MOD: case UDIV: case UMOD: case AND: case IOR:
5067 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
5068 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
5069 case NE: case EQ: case GE: case GT: case LE:
5070 case LT: case GEU: case GTU: case LEU: case LTU:
5071 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5072 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5073 break;
5075 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
5076 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
5077 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
5078 case SQRT: case FFS: case POPCOUNT:
5079 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5080 break;
5082 case UNSPEC:
5083 switch (XINT (x, 1))
5085 case UNSPEC_LTOFF_DTPMOD:
5086 case UNSPEC_LTOFF_DTPREL:
5087 case UNSPEC_DTPREL:
5088 case UNSPEC_LTOFF_TPREL:
5089 case UNSPEC_TPREL:
5090 case UNSPEC_PRED_REL_MUTEX:
5091 case UNSPEC_PIC_CALL:
5092 case UNSPEC_MF:
5093 case UNSPEC_FETCHADD_ACQ:
5094 case UNSPEC_BSP_VALUE:
5095 case UNSPEC_FLUSHRS:
5096 case UNSPEC_BUNDLE_SELECTOR:
5097 break;
5099 case UNSPEC_GR_SPILL:
5100 case UNSPEC_GR_RESTORE:
5102 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
5103 HOST_WIDE_INT bit = (offset >> 3) & 63;
5105 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5106 new_flags.is_write = (XINT (x, 1) == 1);
5107 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
5108 new_flags, pred);
5109 break;
5112 case UNSPEC_FR_SPILL:
5113 case UNSPEC_FR_RESTORE:
5114 case UNSPEC_GETF_EXP:
5115 case UNSPEC_ADDP4:
5116 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5117 break;
5119 case UNSPEC_FR_RECIP_APPROX:
5120 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5121 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5122 break;
5124 case UNSPEC_CMPXCHG_ACQ:
5125 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5126 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
5127 break;
5129 default:
5130 abort ();
5132 break;
5134 case UNSPEC_VOLATILE:
5135 switch (XINT (x, 1))
5137 case UNSPECV_ALLOC:
5138 /* Alloc must always be the first instruction of a group.
5139 We force this by always returning true. */
5140 /* ??? We might get better scheduling if we explicitly check for
5141 input/local/output register dependencies, and modify the
5142 scheduler so that alloc is always reordered to the start of
5143 the current group. We could then eliminate all of the
5144 first_instruction code. */
5145 rws_access_regno (AR_PFS_REGNUM, flags, pred);
5147 new_flags.is_write = 1;
5148 rws_access_regno (REG_AR_CFM, new_flags, pred);
5149 return 1;
5151 case UNSPECV_SET_BSP:
5152 need_barrier = 1;
5153 break;
5155 case UNSPECV_BLOCKAGE:
5156 case UNSPECV_INSN_GROUP_BARRIER:
5157 case UNSPECV_BREAK:
5158 case UNSPECV_PSAC_ALL:
5159 case UNSPECV_PSAC_NORMAL:
5160 return 0;
5162 default:
5163 abort ();
5165 break;
5167 case RETURN:
5168 new_flags.is_write = 0;
5169 need_barrier = rws_access_regno (REG_RP, flags, pred);
5170 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
5172 new_flags.is_write = 1;
5173 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5174 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5175 break;
5177 default:
5178 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
5179 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5180 switch (format_ptr[i])
5182 case '0': /* unused field */
5183 case 'i': /* integer */
5184 case 'n': /* note */
5185 case 'w': /* wide integer */
5186 case 's': /* pointer to string */
5187 case 'S': /* optional pointer to string */
5188 break;
5190 case 'e':
5191 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
5192 need_barrier = 1;
5193 break;
5195 case 'E':
5196 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
5197 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
5198 need_barrier = 1;
5199 break;
5201 default:
5202 abort ();
5204 break;
5206 return need_barrier;
5209 /* Clear out the state for group_barrier_needed_p at the start of a
5210 sequence of insns. */
5212 static void
5213 init_insn_group_barriers (void)
5215 memset (rws_sum, 0, sizeof (rws_sum));
5216 first_instruction = 1;
5219 /* Given the current state, recorded by previous calls to this function,
5220 determine whether a group barrier (a stop bit) is necessary before INSN.
5221 Return nonzero if so. */
5223 static int
5224 group_barrier_needed_p (rtx insn)
5226 rtx pat;
5227 int need_barrier = 0;
5228 struct reg_flags flags;
5230 memset (&flags, 0, sizeof (flags));
5231 switch (GET_CODE (insn))
5233 case NOTE:
5234 break;
5236 case BARRIER:
5237 /* A barrier doesn't imply an instruction group boundary. */
5238 break;
5240 case CODE_LABEL:
5241 memset (rws_insn, 0, sizeof (rws_insn));
5242 return 1;
5244 case CALL_INSN:
5245 flags.is_branch = 1;
5246 flags.is_sibcall = SIBLING_CALL_P (insn);
5247 memset (rws_insn, 0, sizeof (rws_insn));
5249 /* Don't bundle a call following another call. */
5250 if ((pat = prev_active_insn (insn))
5251 && GET_CODE (pat) == CALL_INSN)
5253 need_barrier = 1;
5254 break;
5257 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
5258 break;
5260 case JUMP_INSN:
5261 flags.is_branch = 1;
5263 /* Don't bundle a jump following a call. */
5264 if ((pat = prev_active_insn (insn))
5265 && GET_CODE (pat) == CALL_INSN)
5267 need_barrier = 1;
5268 break;
5270 /* FALLTHRU */
5272 case INSN:
5273 if (GET_CODE (PATTERN (insn)) == USE
5274 || GET_CODE (PATTERN (insn)) == CLOBBER)
5275 /* Don't care about USE and CLOBBER "insns"---those are used to
5276 indicate to the optimizer that it shouldn't get rid of
5277 certain operations. */
5278 break;
5280 pat = PATTERN (insn);
5282 /* Ug. Hack hacks hacked elsewhere. */
5283 switch (recog_memoized (insn))
5285 /* We play dependency tricks with the epilogue in order
5286 to get proper schedules. Undo this for dv analysis. */
5287 case CODE_FOR_epilogue_deallocate_stack:
5288 case CODE_FOR_prologue_allocate_stack:
5289 pat = XVECEXP (pat, 0, 0);
5290 break;
5292 /* The pattern we use for br.cloop confuses the code above.
5293 The second element of the vector is representative. */
5294 case CODE_FOR_doloop_end_internal:
5295 pat = XVECEXP (pat, 0, 1);
5296 break;
5298 /* Doesn't generate code. */
5299 case CODE_FOR_pred_rel_mutex:
5300 case CODE_FOR_prologue_use:
5301 return 0;
5303 default:
5304 break;
5307 memset (rws_insn, 0, sizeof (rws_insn));
5308 need_barrier = rtx_needs_barrier (pat, flags, 0);
5310 /* Check to see if the previous instruction was a volatile
5311 asm. */
5312 if (! need_barrier)
5313 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
5314 break;
5316 default:
5317 abort ();
5320 if (first_instruction && INSN_P (insn)
5321 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
5322 && GET_CODE (PATTERN (insn)) != USE
5323 && GET_CODE (PATTERN (insn)) != CLOBBER)
5325 need_barrier = 0;
5326 first_instruction = 0;
5329 return need_barrier;
5332 /* Like group_barrier_needed_p, but do not clobber the current state. */
5334 static int
5335 safe_group_barrier_needed_p (rtx insn)
5337 struct reg_write_state rws_saved[NUM_REGS];
5338 int saved_first_instruction;
5339 int t;
5341 memcpy (rws_saved, rws_sum, NUM_REGS * sizeof *rws_saved);
5342 saved_first_instruction = first_instruction;
5344 t = group_barrier_needed_p (insn);
5346 memcpy (rws_sum, rws_saved, NUM_REGS * sizeof *rws_saved);
5347 first_instruction = saved_first_instruction;
5349 return t;
5352 /* Scan the current function and insert stop bits as necessary to
5353 eliminate dependencies. This function assumes that a final
5354 instruction scheduling pass has been run which has already
5355 inserted most of the necessary stop bits. This function only
5356 inserts new ones at basic block boundaries, since these are
5357 invisible to the scheduler. */
5359 static void
5360 emit_insn_group_barriers (FILE *dump)
5362 rtx insn;
5363 rtx last_label = 0;
5364 int insns_since_last_label = 0;
5366 init_insn_group_barriers ();
5368 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5370 if (GET_CODE (insn) == CODE_LABEL)
5372 if (insns_since_last_label)
5373 last_label = insn;
5374 insns_since_last_label = 0;
5376 else if (GET_CODE (insn) == NOTE
5377 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
5379 if (insns_since_last_label)
5380 last_label = insn;
5381 insns_since_last_label = 0;
5383 else if (GET_CODE (insn) == INSN
5384 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
5385 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
5387 init_insn_group_barriers ();
5388 last_label = 0;
5390 else if (INSN_P (insn))
5392 insns_since_last_label = 1;
5394 if (group_barrier_needed_p (insn))
5396 if (last_label)
5398 if (dump)
5399 fprintf (dump, "Emitting stop before label %d\n",
5400 INSN_UID (last_label));
5401 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
5402 insn = last_label;
5404 init_insn_group_barriers ();
5405 last_label = 0;
5412 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
5413 This function has to emit all necessary group barriers. */
5415 static void
5416 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
5418 rtx insn;
5420 init_insn_group_barriers ();
5422 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5424 if (GET_CODE (insn) == BARRIER)
5426 rtx last = prev_active_insn (insn);
5428 if (! last)
5429 continue;
5430 if (GET_CODE (last) == JUMP_INSN
5431 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
5432 last = prev_active_insn (last);
5433 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
5434 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
5436 init_insn_group_barriers ();
5438 else if (INSN_P (insn))
5440 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
5441 init_insn_group_barriers ();
5442 else if (group_barrier_needed_p (insn))
5444 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5445 init_insn_group_barriers ();
5446 group_barrier_needed_p (insn);
5453 static int errata_find_address_regs (rtx *, void *);
5454 static void errata_emit_nops (rtx);
5455 static void fixup_errata (void);
5457 /* This structure is used to track some details about the previous insns
5458 groups so we can determine if it may be necessary to insert NOPs to
5459 workaround hardware errata. */
5460 static struct group
5462 HARD_REG_SET p_reg_set;
5463 HARD_REG_SET gr_reg_conditionally_set;
5464 } last_group[2];
5466 /* Index into the last_group array. */
5467 static int group_idx;
5469 /* Called through for_each_rtx; determines if a hard register that was
5470 conditionally set in the previous group is used as an address register.
5471 It ensures that for_each_rtx returns 1 in that case. */
5472 static int
5473 errata_find_address_regs (rtx *xp, void *data ATTRIBUTE_UNUSED)
5475 rtx x = *xp;
5476 if (GET_CODE (x) != MEM)
5477 return 0;
5478 x = XEXP (x, 0);
5479 if (GET_CODE (x) == POST_MODIFY)
5480 x = XEXP (x, 0);
5481 if (GET_CODE (x) == REG)
5483 struct group *prev_group = last_group + (group_idx ^ 1);
5484 if (TEST_HARD_REG_BIT (prev_group->gr_reg_conditionally_set,
5485 REGNO (x)))
5486 return 1;
5487 return -1;
5489 return 0;
5492 /* Called for each insn; this function keeps track of the state in
5493 last_group and emits additional NOPs if necessary to work around
5494 an Itanium A/B step erratum. */
5495 static void
5496 errata_emit_nops (rtx insn)
5498 struct group *this_group = last_group + group_idx;
5499 struct group *prev_group = last_group + (group_idx ^ 1);
5500 rtx pat = PATTERN (insn);
5501 rtx cond = GET_CODE (pat) == COND_EXEC ? COND_EXEC_TEST (pat) : 0;
5502 rtx real_pat = cond ? COND_EXEC_CODE (pat) : pat;
5503 enum attr_type type;
5504 rtx set = real_pat;
5506 if (GET_CODE (real_pat) == USE
5507 || GET_CODE (real_pat) == CLOBBER
5508 || GET_CODE (real_pat) == ASM_INPUT
5509 || GET_CODE (real_pat) == ADDR_VEC
5510 || GET_CODE (real_pat) == ADDR_DIFF_VEC
5511 || asm_noperands (PATTERN (insn)) >= 0)
5512 return;
5514 /* single_set doesn't work for COND_EXEC insns, so we have to duplicate
5515 parts of it. */
5517 if (GET_CODE (set) == PARALLEL)
5519 int i;
5520 set = XVECEXP (real_pat, 0, 0);
5521 for (i = 1; i < XVECLEN (real_pat, 0); i++)
5522 if (GET_CODE (XVECEXP (real_pat, 0, i)) != USE
5523 && GET_CODE (XVECEXP (real_pat, 0, i)) != CLOBBER)
5525 set = 0;
5526 break;
5530 if (set && GET_CODE (set) != SET)
5531 set = 0;
5533 type = get_attr_type (insn);
5535 if (type == TYPE_F
5536 && set && REG_P (SET_DEST (set)) && PR_REGNO_P (REGNO (SET_DEST (set))))
5537 SET_HARD_REG_BIT (this_group->p_reg_set, REGNO (SET_DEST (set)));
5539 if ((type == TYPE_M || type == TYPE_A) && cond && set
5540 && REG_P (SET_DEST (set))
5541 && GET_CODE (SET_SRC (set)) != PLUS
5542 && GET_CODE (SET_SRC (set)) != MINUS
5543 && (GET_CODE (SET_SRC (set)) != ASHIFT
5544 || !shladd_operand (XEXP (SET_SRC (set), 1), VOIDmode))
5545 && (GET_CODE (SET_SRC (set)) != MEM
5546 || GET_CODE (XEXP (SET_SRC (set), 0)) != POST_MODIFY)
5547 && GENERAL_REGNO_P (REGNO (SET_DEST (set))))
5549 if (GET_RTX_CLASS (GET_CODE (cond)) != '<'
5550 || ! REG_P (XEXP (cond, 0)))
5551 abort ();
5553 if (TEST_HARD_REG_BIT (prev_group->p_reg_set, REGNO (XEXP (cond, 0))))
5554 SET_HARD_REG_BIT (this_group->gr_reg_conditionally_set, REGNO (SET_DEST (set)));
5556 if (for_each_rtx (&real_pat, errata_find_address_regs, NULL))
5558 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5559 emit_insn_before (gen_nop (), insn);
5560 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5561 group_idx = 0;
5562 memset (last_group, 0, sizeof last_group);
5566 /* Emit extra nops if they are required to work around hardware errata. */
5568 static void
5569 fixup_errata (void)
5571 rtx insn;
5573 if (! TARGET_B_STEP)
5574 return;
5576 group_idx = 0;
5577 memset (last_group, 0, sizeof last_group);
5579 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5581 if (!INSN_P (insn))
5582 continue;
5584 if (ia64_safe_type (insn) == TYPE_S)
5586 group_idx ^= 1;
5587 memset (last_group + group_idx, 0, sizeof last_group[group_idx]);
5589 else
5590 errata_emit_nops (insn);
5595 /* Instruction scheduling support. */
5597 #define NR_BUNDLES 10
5599 /* A list of names of all available bundles. */
5601 static const char *bundle_name [NR_BUNDLES] =
5603 ".mii",
5604 ".mmi",
5605 ".mfi",
5606 ".mmf",
5607 #if NR_BUNDLES == 10
5608 ".bbb",
5609 ".mbb",
5610 #endif
5611 ".mib",
5612 ".mmb",
5613 ".mfb",
5614 ".mlx"
5617 /* Nonzero if we should insert stop bits into the schedule. */
5619 int ia64_final_schedule = 0;
5621 /* Codes of the corresponding quieryied units: */
5623 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
5624 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
5626 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
5627 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
5629 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
5631 /* The following variable value is an insn group barrier. */
5633 static rtx dfa_stop_insn;
5635 /* The following variable value is the last issued insn. */
5637 static rtx last_scheduled_insn;
5639 /* The following variable value is size of the DFA state. */
5641 static size_t dfa_state_size;
5643 /* The following variable value is pointer to a DFA state used as
5644 temporary variable. */
5646 static state_t temp_dfa_state = NULL;
5648 /* The following variable value is DFA state after issuing the last
5649 insn. */
5651 static state_t prev_cycle_state = NULL;
5653 /* The following array element values are TRUE if the corresponding
5654 insn requires to add stop bits before it. */
5656 static char *stops_p;
5658 /* The following variable is used to set up the mentioned above array. */
5660 static int stop_before_p = 0;
5662 /* The following variable value is length of the arrays `clocks' and
5663 `add_cycles'. */
5665 static int clocks_length;
5667 /* The following array element values are cycles on which the
5668 corresponding insn will be issued. The array is used only for
5669 Itanium1. */
5671 static int *clocks;
5673 /* The following array element values are numbers of cycles should be
5674 added to improve insn scheduling for MM_insns for Itanium1. */
5676 static int *add_cycles;
5678 static rtx ia64_single_set (rtx);
5679 static void ia64_emit_insn_before (rtx, rtx);
5681 /* Map a bundle number to its pseudo-op. */
5683 const char *
5684 get_bundle_name (int b)
5686 return bundle_name[b];
5690 /* Return the maximum number of instructions a cpu can issue. */
5692 static int
5693 ia64_issue_rate (void)
5695 return 6;
5698 /* Helper function - like single_set, but look inside COND_EXEC. */
5700 static rtx
5701 ia64_single_set (rtx insn)
5703 rtx x = PATTERN (insn), ret;
5704 if (GET_CODE (x) == COND_EXEC)
5705 x = COND_EXEC_CODE (x);
5706 if (GET_CODE (x) == SET)
5707 return x;
5709 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
5710 Although they are not classical single set, the second set is there just
5711 to protect it from moving past FP-relative stack accesses. */
5712 switch (recog_memoized (insn))
5714 case CODE_FOR_prologue_allocate_stack:
5715 case CODE_FOR_epilogue_deallocate_stack:
5716 ret = XVECEXP (x, 0, 0);
5717 break;
5719 default:
5720 ret = single_set_2 (insn, x);
5721 break;
5724 return ret;
5727 /* Adjust the cost of a scheduling dependency. Return the new cost of
5728 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
5730 static int
5731 ia64_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
5733 enum attr_itanium_class dep_class;
5734 enum attr_itanium_class insn_class;
5736 if (REG_NOTE_KIND (link) != REG_DEP_OUTPUT)
5737 return cost;
5739 insn_class = ia64_safe_itanium_class (insn);
5740 dep_class = ia64_safe_itanium_class (dep_insn);
5741 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
5742 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
5743 return 0;
5745 return cost;
5748 /* Like emit_insn_before, but skip cycle_display notes.
5749 ??? When cycle display notes are implemented, update this. */
5751 static void
5752 ia64_emit_insn_before (rtx insn, rtx before)
5754 emit_insn_before (insn, before);
5757 /* The following function marks insns who produce addresses for load
5758 and store insns. Such insns will be placed into M slots because it
5759 decrease latency time for Itanium1 (see function
5760 `ia64_produce_address_p' and the DFA descriptions). */
5762 static void
5763 ia64_dependencies_evaluation_hook (rtx head, rtx tail)
5765 rtx insn, link, next, next_tail;
5767 next_tail = NEXT_INSN (tail);
5768 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
5769 if (INSN_P (insn))
5770 insn->call = 0;
5771 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
5772 if (INSN_P (insn)
5773 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
5775 for (link = INSN_DEPEND (insn); link != 0; link = XEXP (link, 1))
5777 next = XEXP (link, 0);
5778 if ((ia64_safe_itanium_class (next) == ITANIUM_CLASS_ST
5779 || ia64_safe_itanium_class (next) == ITANIUM_CLASS_STF)
5780 && ia64_st_address_bypass_p (insn, next))
5781 break;
5782 else if ((ia64_safe_itanium_class (next) == ITANIUM_CLASS_LD
5783 || ia64_safe_itanium_class (next)
5784 == ITANIUM_CLASS_FLD)
5785 && ia64_ld_address_bypass_p (insn, next))
5786 break;
5788 insn->call = link != 0;
5792 /* We're beginning a new block. Initialize data structures as necessary. */
5794 static void
5795 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
5796 int sched_verbose ATTRIBUTE_UNUSED,
5797 int max_ready ATTRIBUTE_UNUSED)
5799 #ifdef ENABLE_CHECKING
5800 rtx insn;
5802 if (reload_completed)
5803 for (insn = NEXT_INSN (current_sched_info->prev_head);
5804 insn != current_sched_info->next_tail;
5805 insn = NEXT_INSN (insn))
5806 if (SCHED_GROUP_P (insn))
5807 abort ();
5808 #endif
5809 last_scheduled_insn = NULL_RTX;
5810 init_insn_group_barriers ();
5813 /* We are about to being issuing insns for this clock cycle.
5814 Override the default sort algorithm to better slot instructions. */
5816 static int
5817 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
5818 int *pn_ready, int clock_var ATTRIBUTE_UNUSED,
5819 int reorder_type)
5821 int n_asms;
5822 int n_ready = *pn_ready;
5823 rtx *e_ready = ready + n_ready;
5824 rtx *insnp;
5826 if (sched_verbose)
5827 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
5829 if (reorder_type == 0)
5831 /* First, move all USEs, CLOBBERs and other crud out of the way. */
5832 n_asms = 0;
5833 for (insnp = ready; insnp < e_ready; insnp++)
5834 if (insnp < e_ready)
5836 rtx insn = *insnp;
5837 enum attr_type t = ia64_safe_type (insn);
5838 if (t == TYPE_UNKNOWN)
5840 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
5841 || asm_noperands (PATTERN (insn)) >= 0)
5843 rtx lowest = ready[n_asms];
5844 ready[n_asms] = insn;
5845 *insnp = lowest;
5846 n_asms++;
5848 else
5850 rtx highest = ready[n_ready - 1];
5851 ready[n_ready - 1] = insn;
5852 *insnp = highest;
5853 return 1;
5858 if (n_asms < n_ready)
5860 /* Some normal insns to process. Skip the asms. */
5861 ready += n_asms;
5862 n_ready -= n_asms;
5864 else if (n_ready > 0)
5865 return 1;
5868 if (ia64_final_schedule)
5870 int deleted = 0;
5871 int nr_need_stop = 0;
5873 for (insnp = ready; insnp < e_ready; insnp++)
5874 if (safe_group_barrier_needed_p (*insnp))
5875 nr_need_stop++;
5877 if (reorder_type == 1 && n_ready == nr_need_stop)
5878 return 0;
5879 if (reorder_type == 0)
5880 return 1;
5881 insnp = e_ready;
5882 /* Move down everything that needs a stop bit, preserving
5883 relative order. */
5884 while (insnp-- > ready + deleted)
5885 while (insnp >= ready + deleted)
5887 rtx insn = *insnp;
5888 if (! safe_group_barrier_needed_p (insn))
5889 break;
5890 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
5891 *ready = insn;
5892 deleted++;
5894 n_ready -= deleted;
5895 ready += deleted;
5898 return 1;
5901 /* We are about to being issuing insns for this clock cycle. Override
5902 the default sort algorithm to better slot instructions. */
5904 static int
5905 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
5906 int clock_var)
5908 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
5909 pn_ready, clock_var, 0);
5912 /* Like ia64_sched_reorder, but called after issuing each insn.
5913 Override the default sort algorithm to better slot instructions. */
5915 static int
5916 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
5917 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
5918 int *pn_ready, int clock_var)
5920 if (ia64_tune == PROCESSOR_ITANIUM && reload_completed && last_scheduled_insn)
5921 clocks [INSN_UID (last_scheduled_insn)] = clock_var;
5922 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
5923 clock_var, 1);
5926 /* We are about to issue INSN. Return the number of insns left on the
5927 ready queue that can be issued this cycle. */
5929 static int
5930 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
5931 int sched_verbose ATTRIBUTE_UNUSED,
5932 rtx insn ATTRIBUTE_UNUSED,
5933 int can_issue_more ATTRIBUTE_UNUSED)
5935 last_scheduled_insn = insn;
5936 memcpy (prev_cycle_state, curr_state, dfa_state_size);
5937 if (reload_completed)
5939 if (group_barrier_needed_p (insn))
5940 abort ();
5941 if (GET_CODE (insn) == CALL_INSN)
5942 init_insn_group_barriers ();
5943 stops_p [INSN_UID (insn)] = stop_before_p;
5944 stop_before_p = 0;
5946 return 1;
5949 /* We are choosing insn from the ready queue. Return nonzero if INSN
5950 can be chosen. */
5952 static int
5953 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn)
5955 if (insn == NULL_RTX || !INSN_P (insn))
5956 abort ();
5957 return (!reload_completed
5958 || !safe_group_barrier_needed_p (insn));
5961 /* The following variable value is pseudo-insn used by the DFA insn
5962 scheduler to change the DFA state when the simulated clock is
5963 increased. */
5965 static rtx dfa_pre_cycle_insn;
5967 /* We are about to being issuing INSN. Return nonzero if we can not
5968 issue it on given cycle CLOCK and return zero if we should not sort
5969 the ready queue on the next clock start. */
5971 static int
5972 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
5973 int clock, int *sort_p)
5975 int setup_clocks_p = FALSE;
5977 if (insn == NULL_RTX || !INSN_P (insn))
5978 abort ();
5979 if ((reload_completed && safe_group_barrier_needed_p (insn))
5980 || (last_scheduled_insn
5981 && (GET_CODE (last_scheduled_insn) == CALL_INSN
5982 || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
5983 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
5985 init_insn_group_barriers ();
5986 if (verbose && dump)
5987 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
5988 last_clock == clock ? " + cycle advance" : "");
5989 stop_before_p = 1;
5990 if (last_clock == clock)
5992 state_transition (curr_state, dfa_stop_insn);
5993 if (TARGET_EARLY_STOP_BITS)
5994 *sort_p = (last_scheduled_insn == NULL_RTX
5995 || GET_CODE (last_scheduled_insn) != CALL_INSN);
5996 else
5997 *sort_p = 0;
5998 return 1;
6000 else if (reload_completed)
6001 setup_clocks_p = TRUE;
6002 memcpy (curr_state, prev_cycle_state, dfa_state_size);
6003 state_transition (curr_state, dfa_stop_insn);
6004 state_transition (curr_state, dfa_pre_cycle_insn);
6005 state_transition (curr_state, NULL);
6007 else if (reload_completed)
6008 setup_clocks_p = TRUE;
6009 if (setup_clocks_p && ia64_tune == PROCESSOR_ITANIUM)
6011 enum attr_itanium_class c = ia64_safe_itanium_class (insn);
6013 if (c != ITANIUM_CLASS_MMMUL && c != ITANIUM_CLASS_MMSHF)
6015 rtx link;
6016 int d = -1;
6018 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
6019 if (REG_NOTE_KIND (link) == 0)
6021 enum attr_itanium_class dep_class;
6022 rtx dep_insn = XEXP (link, 0);
6024 dep_class = ia64_safe_itanium_class (dep_insn);
6025 if ((dep_class == ITANIUM_CLASS_MMMUL
6026 || dep_class == ITANIUM_CLASS_MMSHF)
6027 && last_clock - clocks [INSN_UID (dep_insn)] < 4
6028 && (d < 0
6029 || last_clock - clocks [INSN_UID (dep_insn)] < d))
6030 d = last_clock - clocks [INSN_UID (dep_insn)];
6032 if (d >= 0)
6033 add_cycles [INSN_UID (insn)] = 3 - d;
6036 return 0;
6041 /* The following page contains abstract data `bundle states' which are
6042 used for bundling insns (inserting nops and template generation). */
6044 /* The following describes state of insn bundling. */
6046 struct bundle_state
6048 /* Unique bundle state number to identify them in the debugging
6049 output */
6050 int unique_num;
6051 rtx insn; /* corresponding insn, NULL for the 1st and the last state */
6052 /* number nops before and after the insn */
6053 short before_nops_num, after_nops_num;
6054 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
6055 insn */
6056 int cost; /* cost of the state in cycles */
6057 int accumulated_insns_num; /* number of all previous insns including
6058 nops. L is considered as 2 insns */
6059 int branch_deviation; /* deviation of previous branches from 3rd slots */
6060 struct bundle_state *next; /* next state with the same insn_num */
6061 struct bundle_state *originator; /* originator (previous insn state) */
6062 /* All bundle states are in the following chain. */
6063 struct bundle_state *allocated_states_chain;
6064 /* The DFA State after issuing the insn and the nops. */
6065 state_t dfa_state;
6068 /* The following is map insn number to the corresponding bundle state. */
6070 static struct bundle_state **index_to_bundle_states;
6072 /* The unique number of next bundle state. */
6074 static int bundle_states_num;
6076 /* All allocated bundle states are in the following chain. */
6078 static struct bundle_state *allocated_bundle_states_chain;
6080 /* All allocated but not used bundle states are in the following
6081 chain. */
6083 static struct bundle_state *free_bundle_state_chain;
6086 /* The following function returns a free bundle state. */
6088 static struct bundle_state *
6089 get_free_bundle_state (void)
6091 struct bundle_state *result;
6093 if (free_bundle_state_chain != NULL)
6095 result = free_bundle_state_chain;
6096 free_bundle_state_chain = result->next;
6098 else
6100 result = xmalloc (sizeof (struct bundle_state));
6101 result->dfa_state = xmalloc (dfa_state_size);
6102 result->allocated_states_chain = allocated_bundle_states_chain;
6103 allocated_bundle_states_chain = result;
6105 result->unique_num = bundle_states_num++;
6106 return result;
6110 /* The following function frees given bundle state. */
6112 static void
6113 free_bundle_state (struct bundle_state *state)
6115 state->next = free_bundle_state_chain;
6116 free_bundle_state_chain = state;
6119 /* Start work with abstract data `bundle states'. */
6121 static void
6122 initiate_bundle_states (void)
6124 bundle_states_num = 0;
6125 free_bundle_state_chain = NULL;
6126 allocated_bundle_states_chain = NULL;
6129 /* Finish work with abstract data `bundle states'. */
6131 static void
6132 finish_bundle_states (void)
6134 struct bundle_state *curr_state, *next_state;
6136 for (curr_state = allocated_bundle_states_chain;
6137 curr_state != NULL;
6138 curr_state = next_state)
6140 next_state = curr_state->allocated_states_chain;
6141 free (curr_state->dfa_state);
6142 free (curr_state);
6146 /* Hash table of the bundle states. The key is dfa_state and insn_num
6147 of the bundle states. */
6149 static htab_t bundle_state_table;
6151 /* The function returns hash of BUNDLE_STATE. */
6153 static unsigned
6154 bundle_state_hash (const void *bundle_state)
6156 const struct bundle_state *state = (struct bundle_state *) bundle_state;
6157 unsigned result, i;
6159 for (result = i = 0; i < dfa_state_size; i++)
6160 result += (((unsigned char *) state->dfa_state) [i]
6161 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
6162 return result + state->insn_num;
6165 /* The function returns nonzero if the bundle state keys are equal. */
6167 static int
6168 bundle_state_eq_p (const void *bundle_state_1, const void *bundle_state_2)
6170 const struct bundle_state * state1 = (struct bundle_state *) bundle_state_1;
6171 const struct bundle_state * state2 = (struct bundle_state *) bundle_state_2;
6173 return (state1->insn_num == state2->insn_num
6174 && memcmp (state1->dfa_state, state2->dfa_state,
6175 dfa_state_size) == 0);
6178 /* The function inserts the BUNDLE_STATE into the hash table. The
6179 function returns nonzero if the bundle has been inserted into the
6180 table. The table contains the best bundle state with given key. */
6182 static int
6183 insert_bundle_state (struct bundle_state *bundle_state)
6185 void **entry_ptr;
6187 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, 1);
6188 if (*entry_ptr == NULL)
6190 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
6191 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
6192 *entry_ptr = (void *) bundle_state;
6193 return TRUE;
6195 else if (bundle_state->cost < ((struct bundle_state *) *entry_ptr)->cost
6196 || (bundle_state->cost == ((struct bundle_state *) *entry_ptr)->cost
6197 && (((struct bundle_state *)*entry_ptr)->accumulated_insns_num
6198 > bundle_state->accumulated_insns_num
6199 || (((struct bundle_state *)
6200 *entry_ptr)->accumulated_insns_num
6201 == bundle_state->accumulated_insns_num
6202 && ((struct bundle_state *)
6203 *entry_ptr)->branch_deviation
6204 > bundle_state->branch_deviation))))
6207 struct bundle_state temp;
6209 temp = *(struct bundle_state *) *entry_ptr;
6210 *(struct bundle_state *) *entry_ptr = *bundle_state;
6211 ((struct bundle_state *) *entry_ptr)->next = temp.next;
6212 *bundle_state = temp;
6214 return FALSE;
6217 /* Start work with the hash table. */
6219 static void
6220 initiate_bundle_state_table (void)
6222 bundle_state_table = htab_create (50, bundle_state_hash, bundle_state_eq_p,
6223 (htab_del) 0);
6226 /* Finish work with the hash table. */
6228 static void
6229 finish_bundle_state_table (void)
6231 htab_delete (bundle_state_table);
6236 /* The following variable is a insn `nop' used to check bundle states
6237 with different number of inserted nops. */
6239 static rtx ia64_nop;
6241 /* The following function tries to issue NOPS_NUM nops for the current
6242 state without advancing processor cycle. If it failed, the
6243 function returns FALSE and frees the current state. */
6245 static int
6246 try_issue_nops (struct bundle_state *curr_state, int nops_num)
6248 int i;
6250 for (i = 0; i < nops_num; i++)
6251 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
6253 free_bundle_state (curr_state);
6254 return FALSE;
6256 return TRUE;
6259 /* The following function tries to issue INSN for the current
6260 state without advancing processor cycle. If it failed, the
6261 function returns FALSE and frees the current state. */
6263 static int
6264 try_issue_insn (struct bundle_state *curr_state, rtx insn)
6266 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
6268 free_bundle_state (curr_state);
6269 return FALSE;
6271 return TRUE;
6274 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
6275 starting with ORIGINATOR without advancing processor cycle. If
6276 TRY_BUNDLE_END_P is TRUE, the function also/only (if
6277 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
6278 If it was successful, the function creates new bundle state and
6279 insert into the hash table and into `index_to_bundle_states'. */
6281 static void
6282 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
6283 rtx insn, int try_bundle_end_p, int only_bundle_end_p)
6285 struct bundle_state *curr_state;
6287 curr_state = get_free_bundle_state ();
6288 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
6289 curr_state->insn = insn;
6290 curr_state->insn_num = originator->insn_num + 1;
6291 curr_state->cost = originator->cost;
6292 curr_state->originator = originator;
6293 curr_state->before_nops_num = before_nops_num;
6294 curr_state->after_nops_num = 0;
6295 curr_state->accumulated_insns_num
6296 = originator->accumulated_insns_num + before_nops_num;
6297 curr_state->branch_deviation = originator->branch_deviation;
6298 if (insn == NULL_RTX)
6299 abort ();
6300 else if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
6302 if (GET_MODE (insn) == TImode)
6303 abort ();
6304 if (!try_issue_nops (curr_state, before_nops_num))
6305 return;
6306 if (!try_issue_insn (curr_state, insn))
6307 return;
6308 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
6309 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
6310 && curr_state->accumulated_insns_num % 3 != 0)
6312 free_bundle_state (curr_state);
6313 return;
6316 else if (GET_MODE (insn) != TImode)
6318 if (!try_issue_nops (curr_state, before_nops_num))
6319 return;
6320 if (!try_issue_insn (curr_state, insn))
6321 return;
6322 curr_state->accumulated_insns_num++;
6323 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6324 || asm_noperands (PATTERN (insn)) >= 0)
6325 abort ();
6326 if (ia64_safe_type (insn) == TYPE_L)
6327 curr_state->accumulated_insns_num++;
6329 else
6331 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
6332 state_transition (curr_state->dfa_state, NULL);
6333 curr_state->cost++;
6334 if (!try_issue_nops (curr_state, before_nops_num))
6335 return;
6336 if (!try_issue_insn (curr_state, insn))
6337 return;
6338 curr_state->accumulated_insns_num++;
6339 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6340 || asm_noperands (PATTERN (insn)) >= 0)
6342 /* Finish bundle containing asm insn. */
6343 curr_state->after_nops_num
6344 = 3 - curr_state->accumulated_insns_num % 3;
6345 curr_state->accumulated_insns_num
6346 += 3 - curr_state->accumulated_insns_num % 3;
6348 else if (ia64_safe_type (insn) == TYPE_L)
6349 curr_state->accumulated_insns_num++;
6351 if (ia64_safe_type (insn) == TYPE_B)
6352 curr_state->branch_deviation
6353 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
6354 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
6356 if (!only_bundle_end_p && insert_bundle_state (curr_state))
6358 state_t dfa_state;
6359 struct bundle_state *curr_state1;
6360 struct bundle_state *allocated_states_chain;
6362 curr_state1 = get_free_bundle_state ();
6363 dfa_state = curr_state1->dfa_state;
6364 allocated_states_chain = curr_state1->allocated_states_chain;
6365 *curr_state1 = *curr_state;
6366 curr_state1->dfa_state = dfa_state;
6367 curr_state1->allocated_states_chain = allocated_states_chain;
6368 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
6369 dfa_state_size);
6370 curr_state = curr_state1;
6372 if (!try_issue_nops (curr_state,
6373 3 - curr_state->accumulated_insns_num % 3))
6374 return;
6375 curr_state->after_nops_num
6376 = 3 - curr_state->accumulated_insns_num % 3;
6377 curr_state->accumulated_insns_num
6378 += 3 - curr_state->accumulated_insns_num % 3;
6380 if (!insert_bundle_state (curr_state))
6381 free_bundle_state (curr_state);
6382 return;
6385 /* The following function returns position in the two window bundle
6386 for given STATE. */
6388 static int
6389 get_max_pos (state_t state)
6391 if (cpu_unit_reservation_p (state, pos_6))
6392 return 6;
6393 else if (cpu_unit_reservation_p (state, pos_5))
6394 return 5;
6395 else if (cpu_unit_reservation_p (state, pos_4))
6396 return 4;
6397 else if (cpu_unit_reservation_p (state, pos_3))
6398 return 3;
6399 else if (cpu_unit_reservation_p (state, pos_2))
6400 return 2;
6401 else if (cpu_unit_reservation_p (state, pos_1))
6402 return 1;
6403 else
6404 return 0;
6407 /* The function returns code of a possible template for given position
6408 and state. The function should be called only with 2 values of
6409 position equal to 3 or 6. */
6411 static int
6412 get_template (state_t state, int pos)
6414 switch (pos)
6416 case 3:
6417 if (cpu_unit_reservation_p (state, _0mii_))
6418 return 0;
6419 else if (cpu_unit_reservation_p (state, _0mmi_))
6420 return 1;
6421 else if (cpu_unit_reservation_p (state, _0mfi_))
6422 return 2;
6423 else if (cpu_unit_reservation_p (state, _0mmf_))
6424 return 3;
6425 else if (cpu_unit_reservation_p (state, _0bbb_))
6426 return 4;
6427 else if (cpu_unit_reservation_p (state, _0mbb_))
6428 return 5;
6429 else if (cpu_unit_reservation_p (state, _0mib_))
6430 return 6;
6431 else if (cpu_unit_reservation_p (state, _0mmb_))
6432 return 7;
6433 else if (cpu_unit_reservation_p (state, _0mfb_))
6434 return 8;
6435 else if (cpu_unit_reservation_p (state, _0mlx_))
6436 return 9;
6437 else
6438 abort ();
6439 case 6:
6440 if (cpu_unit_reservation_p (state, _1mii_))
6441 return 0;
6442 else if (cpu_unit_reservation_p (state, _1mmi_))
6443 return 1;
6444 else if (cpu_unit_reservation_p (state, _1mfi_))
6445 return 2;
6446 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
6447 return 3;
6448 else if (cpu_unit_reservation_p (state, _1bbb_))
6449 return 4;
6450 else if (cpu_unit_reservation_p (state, _1mbb_))
6451 return 5;
6452 else if (cpu_unit_reservation_p (state, _1mib_))
6453 return 6;
6454 else if (cpu_unit_reservation_p (state, _1mmb_))
6455 return 7;
6456 else if (cpu_unit_reservation_p (state, _1mfb_))
6457 return 8;
6458 else if (cpu_unit_reservation_p (state, _1mlx_))
6459 return 9;
6460 else
6461 abort ();
6462 default:
6463 abort ();
6467 /* The following function returns an insn important for insn bundling
6468 followed by INSN and before TAIL. */
6470 static rtx
6471 get_next_important_insn (rtx insn, rtx tail)
6473 for (; insn && insn != tail; insn = NEXT_INSN (insn))
6474 if (INSN_P (insn)
6475 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
6476 && GET_CODE (PATTERN (insn)) != USE
6477 && GET_CODE (PATTERN (insn)) != CLOBBER)
6478 return insn;
6479 return NULL_RTX;
6482 /* The following function does insn bundling. Bundling algorithm is
6483 based on dynamic programming. It tries to insert different number of
6484 nop insns before/after the real insns. At the end of EBB, it chooses the
6485 best alternative and then, moving back in EBB, inserts templates for
6486 the best alternative. The algorithm is directed by information
6487 (changes of simulated processor cycle) created by the 2nd insn
6488 scheduling. */
6490 static void
6491 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
6493 struct bundle_state *curr_state, *next_state, *best_state;
6494 rtx insn, next_insn;
6495 int insn_num;
6496 int i, bundle_end_p, only_bundle_end_p, asm_p;
6497 int pos = 0, max_pos, template0, template1;
6498 rtx b;
6499 rtx nop;
6500 enum attr_type type;
6502 insn_num = 0;
6503 for (insn = NEXT_INSN (prev_head_insn);
6504 insn && insn != tail;
6505 insn = NEXT_INSN (insn))
6506 if (INSN_P (insn))
6507 insn_num++;
6508 if (insn_num == 0)
6509 return;
6510 bundling_p = 1;
6511 dfa_clean_insn_cache ();
6512 initiate_bundle_state_table ();
6513 index_to_bundle_states = xmalloc ((insn_num + 2)
6514 * sizeof (struct bundle_state *));
6515 /* First (forward) pass -- generates states. */
6516 curr_state = get_free_bundle_state ();
6517 curr_state->insn = NULL;
6518 curr_state->before_nops_num = 0;
6519 curr_state->after_nops_num = 0;
6520 curr_state->insn_num = 0;
6521 curr_state->cost = 0;
6522 curr_state->accumulated_insns_num = 0;
6523 curr_state->branch_deviation = 0;
6524 curr_state->next = NULL;
6525 curr_state->originator = NULL;
6526 state_reset (curr_state->dfa_state);
6527 index_to_bundle_states [0] = curr_state;
6528 insn_num = 0;
6529 for (insn = NEXT_INSN (prev_head_insn);
6530 insn != tail;
6531 insn = NEXT_INSN (insn))
6532 if (INSN_P (insn)
6533 && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
6534 || GET_CODE (PATTERN (insn)) == USE
6535 || GET_CODE (PATTERN (insn)) == CLOBBER)
6536 && GET_MODE (insn) == TImode)
6538 PUT_MODE (insn, VOIDmode);
6539 for (next_insn = NEXT_INSN (insn);
6540 next_insn != tail;
6541 next_insn = NEXT_INSN (next_insn))
6542 if (INSN_P (next_insn)
6543 && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
6544 && GET_CODE (PATTERN (next_insn)) != USE
6545 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
6547 PUT_MODE (next_insn, TImode);
6548 break;
6551 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
6552 insn != NULL_RTX;
6553 insn = next_insn)
6555 if (!INSN_P (insn)
6556 || ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
6557 || GET_CODE (PATTERN (insn)) == USE
6558 || GET_CODE (PATTERN (insn)) == CLOBBER)
6559 abort ();
6560 type = ia64_safe_type (insn);
6561 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
6562 insn_num++;
6563 index_to_bundle_states [insn_num] = NULL;
6564 for (curr_state = index_to_bundle_states [insn_num - 1];
6565 curr_state != NULL;
6566 curr_state = next_state)
6568 pos = curr_state->accumulated_insns_num % 3;
6569 next_state = curr_state->next;
6570 /* Finish the current bundle in order to start a subsequent
6571 asm insn in a new bundle. */
6572 only_bundle_end_p
6573 = (next_insn != NULL_RTX
6574 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
6575 && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
6576 bundle_end_p
6577 = (only_bundle_end_p || next_insn == NULL_RTX
6578 || (GET_MODE (next_insn) == TImode
6579 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
6580 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
6581 || type == TYPE_S
6582 /* We need to insert 2 Nops for cases like M_MII. */
6583 || (type == TYPE_M && ia64_tune == PROCESSOR_ITANIUM
6584 && !bundle_end_p && pos == 1))
6585 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
6586 only_bundle_end_p);
6587 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
6588 only_bundle_end_p);
6589 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
6590 only_bundle_end_p);
6592 if (index_to_bundle_states [insn_num] == NULL)
6593 abort ();
6594 for (curr_state = index_to_bundle_states [insn_num];
6595 curr_state != NULL;
6596 curr_state = curr_state->next)
6597 if (verbose >= 2 && dump)
6599 struct DFA_chip
6601 unsigned short one_automaton_state;
6602 unsigned short oneb_automaton_state;
6603 unsigned short two_automaton_state;
6604 unsigned short twob_automaton_state;
6607 fprintf
6608 (dump,
6609 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
6610 curr_state->unique_num,
6611 (curr_state->originator == NULL
6612 ? -1 : curr_state->originator->unique_num),
6613 curr_state->cost,
6614 curr_state->before_nops_num, curr_state->after_nops_num,
6615 curr_state->accumulated_insns_num, curr_state->branch_deviation,
6616 (ia64_tune == PROCESSOR_ITANIUM
6617 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
6618 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
6619 INSN_UID (insn));
6622 if (index_to_bundle_states [insn_num] == NULL)
6623 abort ();
6624 /* Finding state with a minimal cost: */
6625 best_state = NULL;
6626 for (curr_state = index_to_bundle_states [insn_num];
6627 curr_state != NULL;
6628 curr_state = curr_state->next)
6629 if (curr_state->accumulated_insns_num % 3 == 0
6630 && (best_state == NULL || best_state->cost > curr_state->cost
6631 || (best_state->cost == curr_state->cost
6632 && (curr_state->accumulated_insns_num
6633 < best_state->accumulated_insns_num
6634 || (curr_state->accumulated_insns_num
6635 == best_state->accumulated_insns_num
6636 && curr_state->branch_deviation
6637 < best_state->branch_deviation)))))
6638 best_state = curr_state;
6639 /* Second (backward) pass: adding nops and templates: */
6640 insn_num = best_state->before_nops_num;
6641 template0 = template1 = -1;
6642 for (curr_state = best_state;
6643 curr_state->originator != NULL;
6644 curr_state = curr_state->originator)
6646 insn = curr_state->insn;
6647 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
6648 || asm_noperands (PATTERN (insn)) >= 0);
6649 insn_num++;
6650 if (verbose >= 2 && dump)
6652 struct DFA_chip
6654 unsigned short one_automaton_state;
6655 unsigned short oneb_automaton_state;
6656 unsigned short two_automaton_state;
6657 unsigned short twob_automaton_state;
6660 fprintf
6661 (dump,
6662 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
6663 curr_state->unique_num,
6664 (curr_state->originator == NULL
6665 ? -1 : curr_state->originator->unique_num),
6666 curr_state->cost,
6667 curr_state->before_nops_num, curr_state->after_nops_num,
6668 curr_state->accumulated_insns_num, curr_state->branch_deviation,
6669 (ia64_tune == PROCESSOR_ITANIUM
6670 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
6671 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
6672 INSN_UID (insn));
6674 max_pos = get_max_pos (curr_state->dfa_state);
6675 if (max_pos == 6 || (max_pos == 3 && template0 < 0))
6677 pos = max_pos;
6678 if (max_pos == 3)
6679 template0 = get_template (curr_state->dfa_state, 3);
6680 else
6682 template1 = get_template (curr_state->dfa_state, 3);
6683 template0 = get_template (curr_state->dfa_state, 6);
6686 if (max_pos > 3 && template1 < 0)
6688 if (pos > 3)
6689 abort ();
6690 template1 = get_template (curr_state->dfa_state, 3);
6691 pos += 3;
6693 if (!asm_p)
6694 for (i = 0; i < curr_state->after_nops_num; i++)
6696 nop = gen_nop ();
6697 emit_insn_after (nop, insn);
6698 pos--;
6699 if (pos < 0)
6700 abort ();
6701 if (pos % 3 == 0)
6703 if (template0 < 0)
6704 abort ();
6705 b = gen_bundle_selector (GEN_INT (template0));
6706 ia64_emit_insn_before (b, nop);
6707 template0 = template1;
6708 template1 = -1;
6711 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
6712 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6713 && asm_noperands (PATTERN (insn)) < 0)
6714 pos--;
6715 if (ia64_safe_type (insn) == TYPE_L)
6716 pos--;
6717 if (pos < 0)
6718 abort ();
6719 if (pos % 3 == 0
6720 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
6721 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6722 && asm_noperands (PATTERN (insn)) < 0)
6724 if (template0 < 0)
6725 abort ();
6726 b = gen_bundle_selector (GEN_INT (template0));
6727 ia64_emit_insn_before (b, insn);
6728 b = PREV_INSN (insn);
6729 insn = b;
6730 template0 = template1;
6731 template1 = -1;
6733 for (i = 0; i < curr_state->before_nops_num; i++)
6735 nop = gen_nop ();
6736 ia64_emit_insn_before (nop, insn);
6737 nop = PREV_INSN (insn);
6738 insn = nop;
6739 pos--;
6740 if (pos < 0)
6741 abort ();
6742 if (pos % 3 == 0)
6744 if (template0 < 0)
6745 abort ();
6746 b = gen_bundle_selector (GEN_INT (template0));
6747 ia64_emit_insn_before (b, insn);
6748 b = PREV_INSN (insn);
6749 insn = b;
6750 template0 = template1;
6751 template1 = -1;
6755 if (ia64_tune == PROCESSOR_ITANIUM)
6756 /* Insert additional cycles for MM-insns: */
6757 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
6758 insn != NULL_RTX;
6759 insn = next_insn)
6761 if (!INSN_P (insn)
6762 || ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
6763 || GET_CODE (PATTERN (insn)) == USE
6764 || GET_CODE (PATTERN (insn)) == CLOBBER)
6765 abort ();
6766 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
6767 if (INSN_UID (insn) < clocks_length && add_cycles [INSN_UID (insn)])
6769 rtx last;
6770 int i, j, n;
6771 int pred_stop_p;
6773 last = prev_active_insn (insn);
6774 pred_stop_p = recog_memoized (last) == CODE_FOR_insn_group_barrier;
6775 if (pred_stop_p)
6776 last = prev_active_insn (last);
6777 n = 0;
6778 for (;; last = prev_active_insn (last))
6779 if (recog_memoized (last) == CODE_FOR_bundle_selector)
6781 template0 = XINT (XVECEXP (PATTERN (last), 0, 0), 0);
6782 if (template0 == 9)
6783 PATTERN (last)
6784 = gen_bundle_selector (GEN_INT (2)); /* -> MFI */
6785 break;
6787 else if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
6788 n++;
6789 if ((pred_stop_p && n == 0) || n > 2
6790 || (template0 == 9 && n != 0))
6791 abort ();
6792 for (j = 3 - n; j > 0; j --)
6793 ia64_emit_insn_before (gen_nop (), insn);
6794 add_cycles [INSN_UID (insn)]--;
6795 if (!pred_stop_p || add_cycles [INSN_UID (insn)])
6796 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
6797 insn);
6798 if (pred_stop_p)
6799 add_cycles [INSN_UID (insn)]--;
6800 for (i = add_cycles [INSN_UID (insn)]; i > 0; i--)
6802 /* Insert .MII bundle. */
6803 ia64_emit_insn_before (gen_bundle_selector (GEN_INT (0)),
6804 insn);
6805 ia64_emit_insn_before (gen_nop (), insn);
6806 ia64_emit_insn_before (gen_nop (), insn);
6807 if (i > 1)
6809 ia64_emit_insn_before
6810 (gen_insn_group_barrier (GEN_INT (3)), insn);
6811 i--;
6813 ia64_emit_insn_before (gen_nop (), insn);
6814 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
6815 insn);
6817 ia64_emit_insn_before (gen_bundle_selector (GEN_INT (template0)),
6818 insn);
6819 for (j = n; j > 0; j --)
6820 ia64_emit_insn_before (gen_nop (), insn);
6821 if (pred_stop_p)
6822 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
6823 insn);
6826 free (index_to_bundle_states);
6827 finish_bundle_state_table ();
6828 bundling_p = 0;
6829 dfa_clean_insn_cache ();
6832 /* The following function is called at the end of scheduling BB or
6833 EBB. After reload, it inserts stop bits and does insn bundling. */
6835 static void
6836 ia64_sched_finish (FILE *dump, int sched_verbose)
6838 if (sched_verbose)
6839 fprintf (dump, "// Finishing schedule.\n");
6840 if (!reload_completed)
6841 return;
6842 if (reload_completed)
6844 final_emit_insn_group_barriers (dump);
6845 bundling (dump, sched_verbose, current_sched_info->prev_head,
6846 current_sched_info->next_tail);
6847 if (sched_verbose && dump)
6848 fprintf (dump, "// finishing %d-%d\n",
6849 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
6850 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
6852 return;
6856 /* The following function inserts stop bits in scheduled BB or EBB. */
6858 static void
6859 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
6861 rtx insn;
6862 int need_barrier_p = 0;
6863 rtx prev_insn = NULL_RTX;
6865 init_insn_group_barriers ();
6867 for (insn = NEXT_INSN (current_sched_info->prev_head);
6868 insn != current_sched_info->next_tail;
6869 insn = NEXT_INSN (insn))
6871 if (GET_CODE (insn) == BARRIER)
6873 rtx last = prev_active_insn (insn);
6875 if (! last)
6876 continue;
6877 if (GET_CODE (last) == JUMP_INSN
6878 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
6879 last = prev_active_insn (last);
6880 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
6881 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
6883 init_insn_group_barriers ();
6884 need_barrier_p = 0;
6885 prev_insn = NULL_RTX;
6887 else if (INSN_P (insn))
6889 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
6891 init_insn_group_barriers ();
6892 need_barrier_p = 0;
6893 prev_insn = NULL_RTX;
6895 else if (need_barrier_p || group_barrier_needed_p (insn))
6897 if (TARGET_EARLY_STOP_BITS)
6899 rtx last;
6901 for (last = insn;
6902 last != current_sched_info->prev_head;
6903 last = PREV_INSN (last))
6904 if (INSN_P (last) && GET_MODE (last) == TImode
6905 && stops_p [INSN_UID (last)])
6906 break;
6907 if (last == current_sched_info->prev_head)
6908 last = insn;
6909 last = prev_active_insn (last);
6910 if (last
6911 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
6912 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
6913 last);
6914 init_insn_group_barriers ();
6915 for (last = NEXT_INSN (last);
6916 last != insn;
6917 last = NEXT_INSN (last))
6918 if (INSN_P (last))
6919 group_barrier_needed_p (last);
6921 else
6923 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
6924 insn);
6925 init_insn_group_barriers ();
6927 group_barrier_needed_p (insn);
6928 prev_insn = NULL_RTX;
6930 else if (recog_memoized (insn) >= 0)
6931 prev_insn = insn;
6932 need_barrier_p = (GET_CODE (insn) == CALL_INSN
6933 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6934 || asm_noperands (PATTERN (insn)) >= 0);
6941 /* If the following function returns TRUE, we will use the the DFA
6942 insn scheduler. */
6944 static int
6945 ia64_use_dfa_pipeline_interface (void)
6947 return 1;
6950 /* If the following function returns TRUE, we will use the the DFA
6951 insn scheduler. */
6953 static int
6954 ia64_first_cycle_multipass_dfa_lookahead (void)
6956 return (reload_completed ? 6 : 4);
6959 /* The following function initiates variable `dfa_pre_cycle_insn'. */
6961 static void
6962 ia64_init_dfa_pre_cycle_insn (void)
6964 if (temp_dfa_state == NULL)
6966 dfa_state_size = state_size ();
6967 temp_dfa_state = xmalloc (dfa_state_size);
6968 prev_cycle_state = xmalloc (dfa_state_size);
6970 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
6971 PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
6972 recog_memoized (dfa_pre_cycle_insn);
6973 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
6974 PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX;
6975 recog_memoized (dfa_stop_insn);
6978 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
6979 used by the DFA insn scheduler. */
6981 static rtx
6982 ia64_dfa_pre_cycle_insn (void)
6984 return dfa_pre_cycle_insn;
6987 /* The following function returns TRUE if PRODUCER (of type ilog or
6988 ld) produces address for CONSUMER (of type st or stf). */
6991 ia64_st_address_bypass_p (rtx producer, rtx consumer)
6993 rtx dest, reg, mem;
6995 if (producer == NULL_RTX || consumer == NULL_RTX)
6996 abort ();
6997 dest = ia64_single_set (producer);
6998 if (dest == NULL_RTX || (reg = SET_DEST (dest)) == NULL_RTX
6999 || (GET_CODE (reg) != REG && GET_CODE (reg) != SUBREG))
7000 abort ();
7001 if (GET_CODE (reg) == SUBREG)
7002 reg = SUBREG_REG (reg);
7003 dest = ia64_single_set (consumer);
7004 if (dest == NULL_RTX || (mem = SET_DEST (dest)) == NULL_RTX
7005 || GET_CODE (mem) != MEM)
7006 abort ();
7007 return reg_mentioned_p (reg, mem);
7010 /* The following function returns TRUE if PRODUCER (of type ilog or
7011 ld) produces address for CONSUMER (of type ld or fld). */
7014 ia64_ld_address_bypass_p (rtx producer, rtx consumer)
7016 rtx dest, src, reg, mem;
7018 if (producer == NULL_RTX || consumer == NULL_RTX)
7019 abort ();
7020 dest = ia64_single_set (producer);
7021 if (dest == NULL_RTX || (reg = SET_DEST (dest)) == NULL_RTX
7022 || (GET_CODE (reg) != REG && GET_CODE (reg) != SUBREG))
7023 abort ();
7024 if (GET_CODE (reg) == SUBREG)
7025 reg = SUBREG_REG (reg);
7026 src = ia64_single_set (consumer);
7027 if (src == NULL_RTX || (mem = SET_SRC (src)) == NULL_RTX)
7028 abort ();
7029 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
7030 mem = XVECEXP (mem, 0, 0);
7031 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
7032 mem = XEXP (mem, 0);
7034 /* Note that LO_SUM is used for GOT loads. */
7035 if (GET_CODE (mem) != LO_SUM && GET_CODE (mem) != MEM)
7036 abort ();
7038 return reg_mentioned_p (reg, mem);
7041 /* The following function returns TRUE if INSN produces address for a
7042 load/store insn. We will place such insns into M slot because it
7043 decreases its latency time. */
7046 ia64_produce_address_p (rtx insn)
7048 return insn->call;
7052 /* Emit pseudo-ops for the assembler to describe predicate relations.
7053 At present this assumes that we only consider predicate pairs to
7054 be mutex, and that the assembler can deduce proper values from
7055 straight-line code. */
7057 static void
7058 emit_predicate_relation_info (void)
7060 basic_block bb;
7062 FOR_EACH_BB_REVERSE (bb)
7064 int r;
7065 rtx head = bb->head;
7067 /* We only need such notes at code labels. */
7068 if (GET_CODE (head) != CODE_LABEL)
7069 continue;
7070 if (GET_CODE (NEXT_INSN (head)) == NOTE
7071 && NOTE_LINE_NUMBER (NEXT_INSN (head)) == NOTE_INSN_BASIC_BLOCK)
7072 head = NEXT_INSN (head);
7074 for (r = PR_REG (0); r < PR_REG (64); r += 2)
7075 if (REGNO_REG_SET_P (bb->global_live_at_start, r))
7077 rtx p = gen_rtx_REG (BImode, r);
7078 rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
7079 if (head == bb->end)
7080 bb->end = n;
7081 head = n;
7085 /* Look for conditional calls that do not return, and protect predicate
7086 relations around them. Otherwise the assembler will assume the call
7087 returns, and complain about uses of call-clobbered predicates after
7088 the call. */
7089 FOR_EACH_BB_REVERSE (bb)
7091 rtx insn = bb->head;
7093 while (1)
7095 if (GET_CODE (insn) == CALL_INSN
7096 && GET_CODE (PATTERN (insn)) == COND_EXEC
7097 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
7099 rtx b = emit_insn_before (gen_safe_across_calls_all (), insn);
7100 rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn);
7101 if (bb->head == insn)
7102 bb->head = b;
7103 if (bb->end == insn)
7104 bb->end = a;
7107 if (insn == bb->end)
7108 break;
7109 insn = NEXT_INSN (insn);
7114 /* Perform machine dependent operations on the rtl chain INSNS. */
7116 static void
7117 ia64_reorg (void)
7119 /* We are freeing block_for_insn in the toplev to keep compatibility
7120 with old MDEP_REORGS that are not CFG based. Recompute it now. */
7121 compute_bb_for_insn ();
7123 /* If optimizing, we'll have split before scheduling. */
7124 if (optimize == 0)
7125 split_all_insns (0);
7127 /* ??? update_life_info_in_dirty_blocks fails to terminate during
7128 non-optimizing bootstrap. */
7129 update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES, PROP_DEATH_NOTES);
7131 if (ia64_flag_schedule_insns2)
7133 timevar_push (TV_SCHED2);
7134 ia64_final_schedule = 1;
7136 initiate_bundle_states ();
7137 ia64_nop = make_insn_raw (gen_nop ());
7138 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
7139 recog_memoized (ia64_nop);
7140 clocks_length = get_max_uid () + 1;
7141 stops_p = xcalloc (1, clocks_length);
7142 if (ia64_tune == PROCESSOR_ITANIUM)
7144 clocks = xcalloc (clocks_length, sizeof (int));
7145 add_cycles = xcalloc (clocks_length, sizeof (int));
7147 if (ia64_tune == PROCESSOR_ITANIUM2)
7149 pos_1 = get_cpu_unit_code ("2_1");
7150 pos_2 = get_cpu_unit_code ("2_2");
7151 pos_3 = get_cpu_unit_code ("2_3");
7152 pos_4 = get_cpu_unit_code ("2_4");
7153 pos_5 = get_cpu_unit_code ("2_5");
7154 pos_6 = get_cpu_unit_code ("2_6");
7155 _0mii_ = get_cpu_unit_code ("2b_0mii.");
7156 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
7157 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
7158 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
7159 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
7160 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
7161 _0mib_ = get_cpu_unit_code ("2b_0mib.");
7162 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
7163 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
7164 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
7165 _1mii_ = get_cpu_unit_code ("2b_1mii.");
7166 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
7167 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
7168 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
7169 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
7170 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
7171 _1mib_ = get_cpu_unit_code ("2b_1mib.");
7172 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
7173 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
7174 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
7176 else
7178 pos_1 = get_cpu_unit_code ("1_1");
7179 pos_2 = get_cpu_unit_code ("1_2");
7180 pos_3 = get_cpu_unit_code ("1_3");
7181 pos_4 = get_cpu_unit_code ("1_4");
7182 pos_5 = get_cpu_unit_code ("1_5");
7183 pos_6 = get_cpu_unit_code ("1_6");
7184 _0mii_ = get_cpu_unit_code ("1b_0mii.");
7185 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
7186 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
7187 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
7188 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
7189 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
7190 _0mib_ = get_cpu_unit_code ("1b_0mib.");
7191 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
7192 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
7193 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
7194 _1mii_ = get_cpu_unit_code ("1b_1mii.");
7195 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
7196 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
7197 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
7198 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
7199 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
7200 _1mib_ = get_cpu_unit_code ("1b_1mib.");
7201 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
7202 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
7203 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
7205 schedule_ebbs (rtl_dump_file);
7206 finish_bundle_states ();
7207 if (ia64_tune == PROCESSOR_ITANIUM)
7209 free (add_cycles);
7210 free (clocks);
7212 free (stops_p);
7213 emit_insn_group_barriers (rtl_dump_file);
7215 ia64_final_schedule = 0;
7216 timevar_pop (TV_SCHED2);
7218 else
7219 emit_all_insn_group_barriers (rtl_dump_file);
7221 /* A call must not be the last instruction in a function, so that the
7222 return address is still within the function, so that unwinding works
7223 properly. Note that IA-64 differs from dwarf2 on this point. */
7224 if (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
7226 rtx insn;
7227 int saw_stop = 0;
7229 insn = get_last_insn ();
7230 if (! INSN_P (insn))
7231 insn = prev_active_insn (insn);
7232 if (GET_CODE (insn) == INSN
7233 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
7234 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
7236 saw_stop = 1;
7237 insn = prev_active_insn (insn);
7239 if (GET_CODE (insn) == CALL_INSN)
7241 if (! saw_stop)
7242 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7243 emit_insn (gen_break_f ());
7244 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7248 fixup_errata ();
7249 emit_predicate_relation_info ();
7252 /* Return true if REGNO is used by the epilogue. */
7255 ia64_epilogue_uses (int regno)
7257 switch (regno)
7259 case R_GR (1):
7260 /* With a call to a function in another module, we will write a new
7261 value to "gp". After returning from such a call, we need to make
7262 sure the function restores the original gp-value, even if the
7263 function itself does not use the gp anymore. */
7264 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
7266 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
7267 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
7268 /* For functions defined with the syscall_linkage attribute, all
7269 input registers are marked as live at all function exits. This
7270 prevents the register allocator from using the input registers,
7271 which in turn makes it possible to restart a system call after
7272 an interrupt without having to save/restore the input registers.
7273 This also prevents kernel data from leaking to application code. */
7274 return lookup_attribute ("syscall_linkage",
7275 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
7277 case R_BR (0):
7278 /* Conditional return patterns can't represent the use of `b0' as
7279 the return address, so we force the value live this way. */
7280 return 1;
7282 case AR_PFS_REGNUM:
7283 /* Likewise for ar.pfs, which is used by br.ret. */
7284 return 1;
7286 default:
7287 return 0;
7291 /* Return true if REGNO is used by the frame unwinder. */
7294 ia64_eh_uses (int regno)
7296 if (! reload_completed)
7297 return 0;
7299 if (current_frame_info.reg_save_b0
7300 && regno == current_frame_info.reg_save_b0)
7301 return 1;
7302 if (current_frame_info.reg_save_pr
7303 && regno == current_frame_info.reg_save_pr)
7304 return 1;
7305 if (current_frame_info.reg_save_ar_pfs
7306 && regno == current_frame_info.reg_save_ar_pfs)
7307 return 1;
7308 if (current_frame_info.reg_save_ar_unat
7309 && regno == current_frame_info.reg_save_ar_unat)
7310 return 1;
7311 if (current_frame_info.reg_save_ar_lc
7312 && regno == current_frame_info.reg_save_ar_lc)
7313 return 1;
7315 return 0;
7318 /* Return true if this goes in small data/bss. */
7320 /* ??? We could also support own long data here. Generating movl/add/ld8
7321 instead of addl,ld8/ld8. This makes the code bigger, but should make the
7322 code faster because there is one less load. This also includes incomplete
7323 types which can't go in sdata/sbss. */
7325 static bool
7326 ia64_in_small_data_p (tree exp)
7328 if (TARGET_NO_SDATA)
7329 return false;
7331 /* We want to merge strings, so we never consider them small data. */
7332 if (TREE_CODE (exp) == STRING_CST)
7333 return false;
7335 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
7337 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
7338 if (strcmp (section, ".sdata") == 0
7339 || strcmp (section, ".sbss") == 0)
7340 return true;
7342 else
7344 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
7346 /* If this is an incomplete type with size 0, then we can't put it
7347 in sdata because it might be too big when completed. */
7348 if (size > 0 && size <= ia64_section_threshold)
7349 return true;
7352 return false;
7355 /* Output assembly directives for prologue regions. */
7357 /* The current basic block number. */
7359 static bool last_block;
7361 /* True if we need a copy_state command at the start of the next block. */
7363 static bool need_copy_state;
7365 /* The function emits unwind directives for the start of an epilogue. */
7367 static void
7368 process_epilogue (void)
7370 /* If this isn't the last block of the function, then we need to label the
7371 current state, and copy it back in at the start of the next block. */
7373 if (!last_block)
7375 fprintf (asm_out_file, "\t.label_state 1\n");
7376 need_copy_state = true;
7379 fprintf (asm_out_file, "\t.restore sp\n");
7382 /* This function processes a SET pattern looking for specific patterns
7383 which result in emitting an assembly directive required for unwinding. */
7385 static int
7386 process_set (FILE *asm_out_file, rtx pat)
7388 rtx src = SET_SRC (pat);
7389 rtx dest = SET_DEST (pat);
7390 int src_regno, dest_regno;
7392 /* Look for the ALLOC insn. */
7393 if (GET_CODE (src) == UNSPEC_VOLATILE
7394 && XINT (src, 1) == UNSPECV_ALLOC
7395 && GET_CODE (dest) == REG)
7397 dest_regno = REGNO (dest);
7399 /* If this isn't the final destination for ar.pfs, the alloc
7400 shouldn't have been marked frame related. */
7401 if (dest_regno != current_frame_info.reg_save_ar_pfs)
7402 abort ();
7404 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
7405 ia64_dbx_register_number (dest_regno));
7406 return 1;
7409 /* Look for SP = .... */
7410 if (GET_CODE (dest) == REG && REGNO (dest) == STACK_POINTER_REGNUM)
7412 if (GET_CODE (src) == PLUS)
7414 rtx op0 = XEXP (src, 0);
7415 rtx op1 = XEXP (src, 1);
7416 if (op0 == dest && GET_CODE (op1) == CONST_INT)
7418 if (INTVAL (op1) < 0)
7419 fprintf (asm_out_file, "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
7420 -INTVAL (op1));
7421 else
7422 process_epilogue ();
7424 else
7425 abort ();
7427 else if (GET_CODE (src) == REG
7428 && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
7429 process_epilogue ();
7430 else
7431 abort ();
7433 return 1;
7436 /* Register move we need to look at. */
7437 if (GET_CODE (dest) == REG && GET_CODE (src) == REG)
7439 src_regno = REGNO (src);
7440 dest_regno = REGNO (dest);
7442 switch (src_regno)
7444 case BR_REG (0):
7445 /* Saving return address pointer. */
7446 if (dest_regno != current_frame_info.reg_save_b0)
7447 abort ();
7448 fprintf (asm_out_file, "\t.save rp, r%d\n",
7449 ia64_dbx_register_number (dest_regno));
7450 return 1;
7452 case PR_REG (0):
7453 if (dest_regno != current_frame_info.reg_save_pr)
7454 abort ();
7455 fprintf (asm_out_file, "\t.save pr, r%d\n",
7456 ia64_dbx_register_number (dest_regno));
7457 return 1;
7459 case AR_UNAT_REGNUM:
7460 if (dest_regno != current_frame_info.reg_save_ar_unat)
7461 abort ();
7462 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
7463 ia64_dbx_register_number (dest_regno));
7464 return 1;
7466 case AR_LC_REGNUM:
7467 if (dest_regno != current_frame_info.reg_save_ar_lc)
7468 abort ();
7469 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
7470 ia64_dbx_register_number (dest_regno));
7471 return 1;
7473 case STACK_POINTER_REGNUM:
7474 if (dest_regno != HARD_FRAME_POINTER_REGNUM
7475 || ! frame_pointer_needed)
7476 abort ();
7477 fprintf (asm_out_file, "\t.vframe r%d\n",
7478 ia64_dbx_register_number (dest_regno));
7479 return 1;
7481 default:
7482 /* Everything else should indicate being stored to memory. */
7483 abort ();
7487 /* Memory store we need to look at. */
7488 if (GET_CODE (dest) == MEM && GET_CODE (src) == REG)
7490 long off;
7491 rtx base;
7492 const char *saveop;
7494 if (GET_CODE (XEXP (dest, 0)) == REG)
7496 base = XEXP (dest, 0);
7497 off = 0;
7499 else if (GET_CODE (XEXP (dest, 0)) == PLUS
7500 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT)
7502 base = XEXP (XEXP (dest, 0), 0);
7503 off = INTVAL (XEXP (XEXP (dest, 0), 1));
7505 else
7506 abort ();
7508 if (base == hard_frame_pointer_rtx)
7510 saveop = ".savepsp";
7511 off = - off;
7513 else if (base == stack_pointer_rtx)
7514 saveop = ".savesp";
7515 else
7516 abort ();
7518 src_regno = REGNO (src);
7519 switch (src_regno)
7521 case BR_REG (0):
7522 if (current_frame_info.reg_save_b0 != 0)
7523 abort ();
7524 fprintf (asm_out_file, "\t%s rp, %ld\n", saveop, off);
7525 return 1;
7527 case PR_REG (0):
7528 if (current_frame_info.reg_save_pr != 0)
7529 abort ();
7530 fprintf (asm_out_file, "\t%s pr, %ld\n", saveop, off);
7531 return 1;
7533 case AR_LC_REGNUM:
7534 if (current_frame_info.reg_save_ar_lc != 0)
7535 abort ();
7536 fprintf (asm_out_file, "\t%s ar.lc, %ld\n", saveop, off);
7537 return 1;
7539 case AR_PFS_REGNUM:
7540 if (current_frame_info.reg_save_ar_pfs != 0)
7541 abort ();
7542 fprintf (asm_out_file, "\t%s ar.pfs, %ld\n", saveop, off);
7543 return 1;
7545 case AR_UNAT_REGNUM:
7546 if (current_frame_info.reg_save_ar_unat != 0)
7547 abort ();
7548 fprintf (asm_out_file, "\t%s ar.unat, %ld\n", saveop, off);
7549 return 1;
7551 case GR_REG (4):
7552 case GR_REG (5):
7553 case GR_REG (6):
7554 case GR_REG (7):
7555 fprintf (asm_out_file, "\t.save.g 0x%x\n",
7556 1 << (src_regno - GR_REG (4)));
7557 return 1;
7559 case BR_REG (1):
7560 case BR_REG (2):
7561 case BR_REG (3):
7562 case BR_REG (4):
7563 case BR_REG (5):
7564 fprintf (asm_out_file, "\t.save.b 0x%x\n",
7565 1 << (src_regno - BR_REG (1)));
7566 return 1;
7568 case FR_REG (2):
7569 case FR_REG (3):
7570 case FR_REG (4):
7571 case FR_REG (5):
7572 fprintf (asm_out_file, "\t.save.f 0x%x\n",
7573 1 << (src_regno - FR_REG (2)));
7574 return 1;
7576 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
7577 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
7578 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
7579 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
7580 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
7581 1 << (src_regno - FR_REG (12)));
7582 return 1;
7584 default:
7585 return 0;
7589 return 0;
7593 /* This function looks at a single insn and emits any directives
7594 required to unwind this insn. */
7595 void
7596 process_for_unwind_directive (FILE *asm_out_file, rtx insn)
7598 if (flag_unwind_tables
7599 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
7601 rtx pat;
7603 if (GET_CODE (insn) == NOTE
7604 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
7606 last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
7608 /* Restore unwind state from immediately before the epilogue. */
7609 if (need_copy_state)
7611 fprintf (asm_out_file, "\t.body\n");
7612 fprintf (asm_out_file, "\t.copy_state 1\n");
7613 need_copy_state = false;
7617 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
7618 return;
7620 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
7621 if (pat)
7622 pat = XEXP (pat, 0);
7623 else
7624 pat = PATTERN (insn);
7626 switch (GET_CODE (pat))
7628 case SET:
7629 process_set (asm_out_file, pat);
7630 break;
7632 case PARALLEL:
7634 int par_index;
7635 int limit = XVECLEN (pat, 0);
7636 for (par_index = 0; par_index < limit; par_index++)
7638 rtx x = XVECEXP (pat, 0, par_index);
7639 if (GET_CODE (x) == SET)
7640 process_set (asm_out_file, x);
7642 break;
7645 default:
7646 abort ();
7652 void
7653 ia64_init_builtins (void)
7655 tree psi_type_node = build_pointer_type (integer_type_node);
7656 tree pdi_type_node = build_pointer_type (long_integer_type_node);
7658 /* __sync_val_compare_and_swap_si, __sync_bool_compare_and_swap_si */
7659 tree si_ftype_psi_si_si
7660 = build_function_type_list (integer_type_node,
7661 psi_type_node, integer_type_node,
7662 integer_type_node, NULL_TREE);
7664 /* __sync_val_compare_and_swap_di */
7665 tree di_ftype_pdi_di_di
7666 = build_function_type_list (long_integer_type_node,
7667 pdi_type_node, long_integer_type_node,
7668 long_integer_type_node, NULL_TREE);
7669 /* __sync_bool_compare_and_swap_di */
7670 tree si_ftype_pdi_di_di
7671 = build_function_type_list (integer_type_node,
7672 pdi_type_node, long_integer_type_node,
7673 long_integer_type_node, NULL_TREE);
7674 /* __sync_synchronize */
7675 tree void_ftype_void
7676 = build_function_type (void_type_node, void_list_node);
7678 /* __sync_lock_test_and_set_si */
7679 tree si_ftype_psi_si
7680 = build_function_type_list (integer_type_node,
7681 psi_type_node, integer_type_node, NULL_TREE);
7683 /* __sync_lock_test_and_set_di */
7684 tree di_ftype_pdi_di
7685 = build_function_type_list (long_integer_type_node,
7686 pdi_type_node, long_integer_type_node,
7687 NULL_TREE);
7689 /* __sync_lock_release_si */
7690 tree void_ftype_psi
7691 = build_function_type_list (void_type_node, psi_type_node, NULL_TREE);
7693 /* __sync_lock_release_di */
7694 tree void_ftype_pdi
7695 = build_function_type_list (void_type_node, pdi_type_node, NULL_TREE);
7697 #define def_builtin(name, type, code) \
7698 builtin_function ((name), (type), (code), BUILT_IN_MD, NULL, NULL_TREE)
7700 def_builtin ("__sync_val_compare_and_swap_si", si_ftype_psi_si_si,
7701 IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI);
7702 def_builtin ("__sync_val_compare_and_swap_di", di_ftype_pdi_di_di,
7703 IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI);
7704 def_builtin ("__sync_bool_compare_and_swap_si", si_ftype_psi_si_si,
7705 IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI);
7706 def_builtin ("__sync_bool_compare_and_swap_di", si_ftype_pdi_di_di,
7707 IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI);
7709 def_builtin ("__sync_synchronize", void_ftype_void,
7710 IA64_BUILTIN_SYNCHRONIZE);
7712 def_builtin ("__sync_lock_test_and_set_si", si_ftype_psi_si,
7713 IA64_BUILTIN_LOCK_TEST_AND_SET_SI);
7714 def_builtin ("__sync_lock_test_and_set_di", di_ftype_pdi_di,
7715 IA64_BUILTIN_LOCK_TEST_AND_SET_DI);
7716 def_builtin ("__sync_lock_release_si", void_ftype_psi,
7717 IA64_BUILTIN_LOCK_RELEASE_SI);
7718 def_builtin ("__sync_lock_release_di", void_ftype_pdi,
7719 IA64_BUILTIN_LOCK_RELEASE_DI);
7721 def_builtin ("__builtin_ia64_bsp",
7722 build_function_type (ptr_type_node, void_list_node),
7723 IA64_BUILTIN_BSP);
7725 def_builtin ("__builtin_ia64_flushrs",
7726 build_function_type (void_type_node, void_list_node),
7727 IA64_BUILTIN_FLUSHRS);
7729 def_builtin ("__sync_fetch_and_add_si", si_ftype_psi_si,
7730 IA64_BUILTIN_FETCH_AND_ADD_SI);
7731 def_builtin ("__sync_fetch_and_sub_si", si_ftype_psi_si,
7732 IA64_BUILTIN_FETCH_AND_SUB_SI);
7733 def_builtin ("__sync_fetch_and_or_si", si_ftype_psi_si,
7734 IA64_BUILTIN_FETCH_AND_OR_SI);
7735 def_builtin ("__sync_fetch_and_and_si", si_ftype_psi_si,
7736 IA64_BUILTIN_FETCH_AND_AND_SI);
7737 def_builtin ("__sync_fetch_and_xor_si", si_ftype_psi_si,
7738 IA64_BUILTIN_FETCH_AND_XOR_SI);
7739 def_builtin ("__sync_fetch_and_nand_si", si_ftype_psi_si,
7740 IA64_BUILTIN_FETCH_AND_NAND_SI);
7742 def_builtin ("__sync_add_and_fetch_si", si_ftype_psi_si,
7743 IA64_BUILTIN_ADD_AND_FETCH_SI);
7744 def_builtin ("__sync_sub_and_fetch_si", si_ftype_psi_si,
7745 IA64_BUILTIN_SUB_AND_FETCH_SI);
7746 def_builtin ("__sync_or_and_fetch_si", si_ftype_psi_si,
7747 IA64_BUILTIN_OR_AND_FETCH_SI);
7748 def_builtin ("__sync_and_and_fetch_si", si_ftype_psi_si,
7749 IA64_BUILTIN_AND_AND_FETCH_SI);
7750 def_builtin ("__sync_xor_and_fetch_si", si_ftype_psi_si,
7751 IA64_BUILTIN_XOR_AND_FETCH_SI);
7752 def_builtin ("__sync_nand_and_fetch_si", si_ftype_psi_si,
7753 IA64_BUILTIN_NAND_AND_FETCH_SI);
7755 def_builtin ("__sync_fetch_and_add_di", di_ftype_pdi_di,
7756 IA64_BUILTIN_FETCH_AND_ADD_DI);
7757 def_builtin ("__sync_fetch_and_sub_di", di_ftype_pdi_di,
7758 IA64_BUILTIN_FETCH_AND_SUB_DI);
7759 def_builtin ("__sync_fetch_and_or_di", di_ftype_pdi_di,
7760 IA64_BUILTIN_FETCH_AND_OR_DI);
7761 def_builtin ("__sync_fetch_and_and_di", di_ftype_pdi_di,
7762 IA64_BUILTIN_FETCH_AND_AND_DI);
7763 def_builtin ("__sync_fetch_and_xor_di", di_ftype_pdi_di,
7764 IA64_BUILTIN_FETCH_AND_XOR_DI);
7765 def_builtin ("__sync_fetch_and_nand_di", di_ftype_pdi_di,
7766 IA64_BUILTIN_FETCH_AND_NAND_DI);
7768 def_builtin ("__sync_add_and_fetch_di", di_ftype_pdi_di,
7769 IA64_BUILTIN_ADD_AND_FETCH_DI);
7770 def_builtin ("__sync_sub_and_fetch_di", di_ftype_pdi_di,
7771 IA64_BUILTIN_SUB_AND_FETCH_DI);
7772 def_builtin ("__sync_or_and_fetch_di", di_ftype_pdi_di,
7773 IA64_BUILTIN_OR_AND_FETCH_DI);
7774 def_builtin ("__sync_and_and_fetch_di", di_ftype_pdi_di,
7775 IA64_BUILTIN_AND_AND_FETCH_DI);
7776 def_builtin ("__sync_xor_and_fetch_di", di_ftype_pdi_di,
7777 IA64_BUILTIN_XOR_AND_FETCH_DI);
7778 def_builtin ("__sync_nand_and_fetch_di", di_ftype_pdi_di,
7779 IA64_BUILTIN_NAND_AND_FETCH_DI);
7781 #undef def_builtin
7784 /* Expand fetch_and_op intrinsics. The basic code sequence is:
7787 tmp = [ptr];
7788 do {
7789 ret = tmp;
7790 ar.ccv = tmp;
7791 tmp <op>= value;
7792 cmpxchgsz.acq tmp = [ptr], tmp
7793 } while (tmp != ret)
7796 static rtx
7797 ia64_expand_fetch_and_op (optab binoptab, enum machine_mode mode,
7798 tree arglist, rtx target)
7800 rtx ret, label, tmp, ccv, insn, mem, value;
7801 tree arg0, arg1;
7803 arg0 = TREE_VALUE (arglist);
7804 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
7805 mem = expand_expr (arg0, NULL_RTX, Pmode, 0);
7806 #ifdef POINTERS_EXTEND_UNSIGNED
7807 if (GET_MODE(mem) != Pmode)
7808 mem = convert_memory_address (Pmode, mem);
7809 #endif
7810 value = expand_expr (arg1, NULL_RTX, mode, 0);
7812 mem = gen_rtx_MEM (mode, force_reg (Pmode, mem));
7813 MEM_VOLATILE_P (mem) = 1;
7815 if (target && register_operand (target, mode))
7816 ret = target;
7817 else
7818 ret = gen_reg_rtx (mode);
7820 emit_insn (gen_mf ());
7822 /* Special case for fetchadd instructions. */
7823 if (binoptab == add_optab && fetchadd_operand (value, VOIDmode))
7825 if (mode == SImode)
7826 insn = gen_fetchadd_acq_si (ret, mem, value);
7827 else
7828 insn = gen_fetchadd_acq_di (ret, mem, value);
7829 emit_insn (insn);
7830 return ret;
7833 tmp = gen_reg_rtx (mode);
7834 ccv = gen_rtx_REG (mode, AR_CCV_REGNUM);
7835 emit_move_insn (tmp, mem);
7837 label = gen_label_rtx ();
7838 emit_label (label);
7839 emit_move_insn (ret, tmp);
7840 emit_move_insn (ccv, tmp);
7842 /* Perform the specific operation. Special case NAND by noticing
7843 one_cmpl_optab instead. */
7844 if (binoptab == one_cmpl_optab)
7846 tmp = expand_unop (mode, binoptab, tmp, NULL, OPTAB_WIDEN);
7847 binoptab = and_optab;
7849 tmp = expand_binop (mode, binoptab, tmp, value, tmp, 1, OPTAB_WIDEN);
7851 if (mode == SImode)
7852 insn = gen_cmpxchg_acq_si (tmp, mem, tmp, ccv);
7853 else
7854 insn = gen_cmpxchg_acq_di (tmp, mem, tmp, ccv);
7855 emit_insn (insn);
7857 emit_cmp_and_jump_insns (tmp, ret, NE, 0, mode, 1, label);
7859 return ret;
7862 /* Expand op_and_fetch intrinsics. The basic code sequence is:
7865 tmp = [ptr];
7866 do {
7867 old = tmp;
7868 ar.ccv = tmp;
7869 ret = tmp <op> value;
7870 cmpxchgsz.acq tmp = [ptr], ret
7871 } while (tmp != old)
7874 static rtx
7875 ia64_expand_op_and_fetch (optab binoptab, enum machine_mode mode,
7876 tree arglist, rtx target)
7878 rtx old, label, tmp, ret, ccv, insn, mem, value;
7879 tree arg0, arg1;
7881 arg0 = TREE_VALUE (arglist);
7882 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
7883 mem = expand_expr (arg0, NULL_RTX, Pmode, 0);
7884 #ifdef POINTERS_EXTEND_UNSIGNED
7885 if (GET_MODE(mem) != Pmode)
7886 mem = convert_memory_address (Pmode, mem);
7887 #endif
7889 value = expand_expr (arg1, NULL_RTX, mode, 0);
7891 mem = gen_rtx_MEM (mode, force_reg (Pmode, mem));
7892 MEM_VOLATILE_P (mem) = 1;
7894 if (target && ! register_operand (target, mode))
7895 target = NULL_RTX;
7897 emit_insn (gen_mf ());
7898 tmp = gen_reg_rtx (mode);
7899 old = gen_reg_rtx (mode);
7900 ccv = gen_rtx_REG (mode, AR_CCV_REGNUM);
7902 emit_move_insn (tmp, mem);
7904 label = gen_label_rtx ();
7905 emit_label (label);
7906 emit_move_insn (old, tmp);
7907 emit_move_insn (ccv, tmp);
7909 /* Perform the specific operation. Special case NAND by noticing
7910 one_cmpl_optab instead. */
7911 if (binoptab == one_cmpl_optab)
7913 tmp = expand_unop (mode, binoptab, tmp, NULL, OPTAB_WIDEN);
7914 binoptab = and_optab;
7916 ret = expand_binop (mode, binoptab, tmp, value, target, 1, OPTAB_WIDEN);
7918 if (mode == SImode)
7919 insn = gen_cmpxchg_acq_si (tmp, mem, ret, ccv);
7920 else
7921 insn = gen_cmpxchg_acq_di (tmp, mem, ret, ccv);
7922 emit_insn (insn);
7924 emit_cmp_and_jump_insns (tmp, old, NE, 0, mode, 1, label);
7926 return ret;
7929 /* Expand val_ and bool_compare_and_swap. For val_ we want:
7931 ar.ccv = oldval
7933 cmpxchgsz.acq ret = [ptr], newval, ar.ccv
7934 return ret
7936 For bool_ it's the same except return ret == oldval.
7939 static rtx
7940 ia64_expand_compare_and_swap (enum machine_mode rmode, enum machine_mode mode,
7941 int boolp, tree arglist, rtx target)
7943 tree arg0, arg1, arg2;
7944 rtx mem, old, new, ccv, tmp, insn;
7946 arg0 = TREE_VALUE (arglist);
7947 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
7948 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
7949 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
7950 old = expand_expr (arg1, NULL_RTX, mode, 0);
7951 new = expand_expr (arg2, NULL_RTX, mode, 0);
7953 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
7954 MEM_VOLATILE_P (mem) = 1;
7956 if (! register_operand (old, mode))
7957 old = copy_to_mode_reg (mode, old);
7958 if (! register_operand (new, mode))
7959 new = copy_to_mode_reg (mode, new);
7961 if (! boolp && target && register_operand (target, mode))
7962 tmp = target;
7963 else
7964 tmp = gen_reg_rtx (mode);
7966 ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
7967 if (mode == DImode)
7968 emit_move_insn (ccv, old);
7969 else
7971 rtx ccvtmp = gen_reg_rtx (DImode);
7972 emit_insn (gen_zero_extendsidi2 (ccvtmp, old));
7973 emit_move_insn (ccv, ccvtmp);
7975 emit_insn (gen_mf ());
7976 if (mode == SImode)
7977 insn = gen_cmpxchg_acq_si (tmp, mem, new, ccv);
7978 else
7979 insn = gen_cmpxchg_acq_di (tmp, mem, new, ccv);
7980 emit_insn (insn);
7982 if (boolp)
7984 if (! target)
7985 target = gen_reg_rtx (rmode);
7986 return emit_store_flag_force (target, EQ, tmp, old, mode, 1, 1);
7988 else
7989 return tmp;
7992 /* Expand lock_test_and_set. I.e. `xchgsz ret = [ptr], new'. */
7994 static rtx
7995 ia64_expand_lock_test_and_set (enum machine_mode mode, tree arglist,
7996 rtx target)
7998 tree arg0, arg1;
7999 rtx mem, new, ret, insn;
8001 arg0 = TREE_VALUE (arglist);
8002 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8003 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
8004 new = expand_expr (arg1, NULL_RTX, mode, 0);
8006 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
8007 MEM_VOLATILE_P (mem) = 1;
8008 if (! register_operand (new, mode))
8009 new = copy_to_mode_reg (mode, new);
8011 if (target && register_operand (target, mode))
8012 ret = target;
8013 else
8014 ret = gen_reg_rtx (mode);
8016 if (mode == SImode)
8017 insn = gen_xchgsi (ret, mem, new);
8018 else
8019 insn = gen_xchgdi (ret, mem, new);
8020 emit_insn (insn);
8022 return ret;
8025 /* Expand lock_release. I.e. `stsz.rel [ptr] = r0'. */
8027 static rtx
8028 ia64_expand_lock_release (enum machine_mode mode, tree arglist,
8029 rtx target ATTRIBUTE_UNUSED)
8031 tree arg0;
8032 rtx mem;
8034 arg0 = TREE_VALUE (arglist);
8035 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
8037 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
8038 MEM_VOLATILE_P (mem) = 1;
8040 emit_move_insn (mem, const0_rtx);
8042 return const0_rtx;
8046 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
8047 enum machine_mode mode ATTRIBUTE_UNUSED,
8048 int ignore ATTRIBUTE_UNUSED)
8050 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
8051 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
8052 tree arglist = TREE_OPERAND (exp, 1);
8053 enum machine_mode rmode = VOIDmode;
8055 switch (fcode)
8057 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI:
8058 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI:
8059 mode = SImode;
8060 rmode = SImode;
8061 break;
8063 case IA64_BUILTIN_LOCK_TEST_AND_SET_SI:
8064 case IA64_BUILTIN_LOCK_RELEASE_SI:
8065 case IA64_BUILTIN_FETCH_AND_ADD_SI:
8066 case IA64_BUILTIN_FETCH_AND_SUB_SI:
8067 case IA64_BUILTIN_FETCH_AND_OR_SI:
8068 case IA64_BUILTIN_FETCH_AND_AND_SI:
8069 case IA64_BUILTIN_FETCH_AND_XOR_SI:
8070 case IA64_BUILTIN_FETCH_AND_NAND_SI:
8071 case IA64_BUILTIN_ADD_AND_FETCH_SI:
8072 case IA64_BUILTIN_SUB_AND_FETCH_SI:
8073 case IA64_BUILTIN_OR_AND_FETCH_SI:
8074 case IA64_BUILTIN_AND_AND_FETCH_SI:
8075 case IA64_BUILTIN_XOR_AND_FETCH_SI:
8076 case IA64_BUILTIN_NAND_AND_FETCH_SI:
8077 mode = SImode;
8078 break;
8080 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI:
8081 mode = DImode;
8082 rmode = SImode;
8083 break;
8085 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI:
8086 mode = DImode;
8087 rmode = DImode;
8088 break;
8090 case IA64_BUILTIN_LOCK_TEST_AND_SET_DI:
8091 case IA64_BUILTIN_LOCK_RELEASE_DI:
8092 case IA64_BUILTIN_FETCH_AND_ADD_DI:
8093 case IA64_BUILTIN_FETCH_AND_SUB_DI:
8094 case IA64_BUILTIN_FETCH_AND_OR_DI:
8095 case IA64_BUILTIN_FETCH_AND_AND_DI:
8096 case IA64_BUILTIN_FETCH_AND_XOR_DI:
8097 case IA64_BUILTIN_FETCH_AND_NAND_DI:
8098 case IA64_BUILTIN_ADD_AND_FETCH_DI:
8099 case IA64_BUILTIN_SUB_AND_FETCH_DI:
8100 case IA64_BUILTIN_OR_AND_FETCH_DI:
8101 case IA64_BUILTIN_AND_AND_FETCH_DI:
8102 case IA64_BUILTIN_XOR_AND_FETCH_DI:
8103 case IA64_BUILTIN_NAND_AND_FETCH_DI:
8104 mode = DImode;
8105 break;
8107 default:
8108 break;
8111 switch (fcode)
8113 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI:
8114 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI:
8115 return ia64_expand_compare_and_swap (rmode, mode, 1, arglist,
8116 target);
8118 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI:
8119 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI:
8120 return ia64_expand_compare_and_swap (rmode, mode, 0, arglist,
8121 target);
8123 case IA64_BUILTIN_SYNCHRONIZE:
8124 emit_insn (gen_mf ());
8125 return const0_rtx;
8127 case IA64_BUILTIN_LOCK_TEST_AND_SET_SI:
8128 case IA64_BUILTIN_LOCK_TEST_AND_SET_DI:
8129 return ia64_expand_lock_test_and_set (mode, arglist, target);
8131 case IA64_BUILTIN_LOCK_RELEASE_SI:
8132 case IA64_BUILTIN_LOCK_RELEASE_DI:
8133 return ia64_expand_lock_release (mode, arglist, target);
8135 case IA64_BUILTIN_BSP:
8136 if (! target || ! register_operand (target, DImode))
8137 target = gen_reg_rtx (DImode);
8138 emit_insn (gen_bsp_value (target));
8139 #ifdef POINTERS_EXTEND_UNSIGNED
8140 target = convert_memory_address (ptr_mode, target);
8141 #endif
8142 return target;
8144 case IA64_BUILTIN_FLUSHRS:
8145 emit_insn (gen_flushrs ());
8146 return const0_rtx;
8148 case IA64_BUILTIN_FETCH_AND_ADD_SI:
8149 case IA64_BUILTIN_FETCH_AND_ADD_DI:
8150 return ia64_expand_fetch_and_op (add_optab, mode, arglist, target);
8152 case IA64_BUILTIN_FETCH_AND_SUB_SI:
8153 case IA64_BUILTIN_FETCH_AND_SUB_DI:
8154 return ia64_expand_fetch_and_op (sub_optab, mode, arglist, target);
8156 case IA64_BUILTIN_FETCH_AND_OR_SI:
8157 case IA64_BUILTIN_FETCH_AND_OR_DI:
8158 return ia64_expand_fetch_and_op (ior_optab, mode, arglist, target);
8160 case IA64_BUILTIN_FETCH_AND_AND_SI:
8161 case IA64_BUILTIN_FETCH_AND_AND_DI:
8162 return ia64_expand_fetch_and_op (and_optab, mode, arglist, target);
8164 case IA64_BUILTIN_FETCH_AND_XOR_SI:
8165 case IA64_BUILTIN_FETCH_AND_XOR_DI:
8166 return ia64_expand_fetch_and_op (xor_optab, mode, arglist, target);
8168 case IA64_BUILTIN_FETCH_AND_NAND_SI:
8169 case IA64_BUILTIN_FETCH_AND_NAND_DI:
8170 return ia64_expand_fetch_and_op (one_cmpl_optab, mode, arglist, target);
8172 case IA64_BUILTIN_ADD_AND_FETCH_SI:
8173 case IA64_BUILTIN_ADD_AND_FETCH_DI:
8174 return ia64_expand_op_and_fetch (add_optab, mode, arglist, target);
8176 case IA64_BUILTIN_SUB_AND_FETCH_SI:
8177 case IA64_BUILTIN_SUB_AND_FETCH_DI:
8178 return ia64_expand_op_and_fetch (sub_optab, mode, arglist, target);
8180 case IA64_BUILTIN_OR_AND_FETCH_SI:
8181 case IA64_BUILTIN_OR_AND_FETCH_DI:
8182 return ia64_expand_op_and_fetch (ior_optab, mode, arglist, target);
8184 case IA64_BUILTIN_AND_AND_FETCH_SI:
8185 case IA64_BUILTIN_AND_AND_FETCH_DI:
8186 return ia64_expand_op_and_fetch (and_optab, mode, arglist, target);
8188 case IA64_BUILTIN_XOR_AND_FETCH_SI:
8189 case IA64_BUILTIN_XOR_AND_FETCH_DI:
8190 return ia64_expand_op_and_fetch (xor_optab, mode, arglist, target);
8192 case IA64_BUILTIN_NAND_AND_FETCH_SI:
8193 case IA64_BUILTIN_NAND_AND_FETCH_DI:
8194 return ia64_expand_op_and_fetch (one_cmpl_optab, mode, arglist, target);
8196 default:
8197 break;
8200 return NULL_RTX;
8203 /* For the HP-UX IA64 aggregate parameters are passed stored in the
8204 most significant bits of the stack slot. */
8206 enum direction
8207 ia64_hpux_function_arg_padding (enum machine_mode mode, tree type)
8209 /* Exception to normal case for structures/unions/etc. */
8211 if (type && AGGREGATE_TYPE_P (type)
8212 && int_size_in_bytes (type) < UNITS_PER_WORD)
8213 return upward;
8215 /* This is the standard FUNCTION_ARG_PADDING with !BYTES_BIG_ENDIAN
8216 hardwired to be true. */
8218 return((mode == BLKmode
8219 ? (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
8220 && int_size_in_bytes (type) < (PARM_BOUNDARY / BITS_PER_UNIT))
8221 : GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
8222 ? downward : upward);
8225 /* Linked list of all external functions that are to be emitted by GCC.
8226 We output the name if and only if TREE_SYMBOL_REFERENCED is set in
8227 order to avoid putting out names that are never really used. */
8229 struct extern_func_list
8231 struct extern_func_list *next; /* next external */
8232 char *name; /* name of the external */
8233 } *extern_func_head = 0;
8235 static void
8236 ia64_hpux_add_extern_decl (const char *name)
8238 struct extern_func_list *p;
8240 p = (struct extern_func_list *) xmalloc (sizeof (struct extern_func_list));
8241 p->name = xmalloc (strlen (name) + 1);
8242 strcpy(p->name, name);
8243 p->next = extern_func_head;
8244 extern_func_head = p;
8247 /* Print out the list of used global functions. */
8249 static void
8250 ia64_hpux_file_end (void)
8252 while (extern_func_head)
8254 const char *real_name;
8255 tree decl;
8257 real_name = (* targetm.strip_name_encoding) (extern_func_head->name);
8258 decl = maybe_get_identifier (real_name);
8260 if (!decl
8261 || (! TREE_ASM_WRITTEN (decl) && TREE_SYMBOL_REFERENCED (decl)))
8263 if (decl)
8264 TREE_ASM_WRITTEN (decl) = 1;
8265 (*targetm.asm_out.globalize_label) (asm_out_file,
8266 extern_func_head->name);
8267 fputs (TYPE_ASM_OP, asm_out_file);
8268 assemble_name (asm_out_file, extern_func_head->name);
8269 putc (',', asm_out_file);
8270 fprintf (asm_out_file, TYPE_OPERAND_FMT, "function");
8271 putc ('\n', asm_out_file);
8273 extern_func_head = extern_func_head->next;
8278 /* Switch to the section to which we should output X. The only thing
8279 special we do here is to honor small data. */
8281 static void
8282 ia64_select_rtx_section (enum machine_mode mode, rtx x,
8283 unsigned HOST_WIDE_INT align)
8285 if (GET_MODE_SIZE (mode) > 0
8286 && GET_MODE_SIZE (mode) <= ia64_section_threshold)
8287 sdata_section ();
8288 else
8289 default_elf_select_rtx_section (mode, x, align);
8292 /* It is illegal to have relocations in shared segments on AIX and HPUX.
8293 Pretend flag_pic is always set. */
8295 static void
8296 ia64_rwreloc_select_section (tree exp, int reloc, unsigned HOST_WIDE_INT align)
8298 default_elf_select_section_1 (exp, reloc, align, true);
8301 static void
8302 ia64_rwreloc_unique_section (tree decl, int reloc)
8304 default_unique_section_1 (decl, reloc, true);
8307 static void
8308 ia64_rwreloc_select_rtx_section (enum machine_mode mode, rtx x,
8309 unsigned HOST_WIDE_INT align)
8311 int save_pic = flag_pic;
8312 flag_pic = 1;
8313 ia64_select_rtx_section (mode, x, align);
8314 flag_pic = save_pic;
8317 static unsigned int
8318 ia64_rwreloc_section_type_flags (tree decl, const char *name, int reloc)
8320 return default_section_type_flags_1 (decl, name, reloc, true);
8324 /* Output the assembler code for a thunk function. THUNK_DECL is the
8325 declaration for the thunk function itself, FUNCTION is the decl for
8326 the target function. DELTA is an immediate constant offset to be
8327 added to THIS. If VCALL_OFFSET is nonzero, the word at
8328 *(*this + vcall_offset) should be added to THIS. */
8330 static void
8331 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
8332 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8333 tree function)
8335 rtx this, insn, funexp;
8337 reload_completed = 1;
8338 epilogue_completed = 1;
8339 no_new_pseudos = 1;
8341 /* Set things up as ia64_expand_prologue might. */
8342 last_scratch_gr_reg = 15;
8344 memset (&current_frame_info, 0, sizeof (current_frame_info));
8345 current_frame_info.spill_cfa_off = -16;
8346 current_frame_info.n_input_regs = 1;
8347 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
8349 if (!TARGET_REG_NAMES)
8350 reg_names[IN_REG (0)] = ia64_reg_numbers[0];
8352 /* Mark the end of the (empty) prologue. */
8353 emit_note (NOTE_INSN_PROLOGUE_END);
8355 this = gen_rtx_REG (Pmode, IN_REG (0));
8356 if (TARGET_ILP32)
8358 rtx tmp = gen_rtx_REG (ptr_mode, IN_REG (0));
8359 REG_POINTER (tmp) = 1;
8360 if (delta && CONST_OK_FOR_I (delta))
8362 emit_insn (gen_ptr_extend_plus_imm (this, tmp, GEN_INT (delta)));
8363 delta = 0;
8365 else
8366 emit_insn (gen_ptr_extend (this, tmp));
8369 /* Apply the constant offset, if required. */
8370 if (delta)
8372 rtx delta_rtx = GEN_INT (delta);
8374 if (!CONST_OK_FOR_I (delta))
8376 rtx tmp = gen_rtx_REG (Pmode, 2);
8377 emit_move_insn (tmp, delta_rtx);
8378 delta_rtx = tmp;
8380 emit_insn (gen_adddi3 (this, this, delta_rtx));
8383 /* Apply the offset from the vtable, if required. */
8384 if (vcall_offset)
8386 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8387 rtx tmp = gen_rtx_REG (Pmode, 2);
8389 if (TARGET_ILP32)
8391 rtx t = gen_rtx_REG (ptr_mode, 2);
8392 REG_POINTER (t) = 1;
8393 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this));
8394 if (CONST_OK_FOR_I (vcall_offset))
8396 emit_insn (gen_ptr_extend_plus_imm (tmp, t,
8397 vcall_offset_rtx));
8398 vcall_offset = 0;
8400 else
8401 emit_insn (gen_ptr_extend (tmp, t));
8403 else
8404 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8406 if (vcall_offset)
8408 if (!CONST_OK_FOR_J (vcall_offset))
8410 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
8411 emit_move_insn (tmp2, vcall_offset_rtx);
8412 vcall_offset_rtx = tmp2;
8414 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
8417 if (TARGET_ILP32)
8418 emit_move_insn (gen_rtx_REG (ptr_mode, 2),
8419 gen_rtx_MEM (ptr_mode, tmp));
8420 else
8421 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
8423 emit_insn (gen_adddi3 (this, this, tmp));
8426 /* Generate a tail call to the target function. */
8427 if (! TREE_USED (function))
8429 assemble_external (function);
8430 TREE_USED (function) = 1;
8432 funexp = XEXP (DECL_RTL (function), 0);
8433 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8434 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
8435 insn = get_last_insn ();
8436 SIBLING_CALL_P (insn) = 1;
8438 /* Code generation for calls relies on splitting. */
8439 reload_completed = 1;
8440 epilogue_completed = 1;
8441 try_split (PATTERN (insn), insn, 0);
8443 emit_barrier ();
8445 /* Run just enough of rest_of_compilation to get the insns emitted.
8446 There's not really enough bulk here to make other passes such as
8447 instruction scheduling worth while. Note that use_thunk calls
8448 assemble_start_function and assemble_end_function. */
8450 insn_locators_initialize ();
8451 emit_all_insn_group_barriers (NULL);
8452 insn = get_insns ();
8453 shorten_branches (insn);
8454 final_start_function (insn, file, 1);
8455 final (insn, file, 1, 0);
8456 final_end_function ();
8458 reload_completed = 0;
8459 epilogue_completed = 0;
8460 no_new_pseudos = 0;
8463 #include "gt-ia64.h"