PR c/81544 - attribute noreturn and warn_unused_result on the same function accepted
[official-gcc.git] / gcc / config / pa / pa.c
blob5d28ecda2edffac2ff2cbd9d017ac9e2d499f66d
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992-2017 Free Software Foundation, Inc.
3 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "memmodel.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "df.h"
30 #include "tm_p.h"
31 #include "stringpool.h"
32 #include "attribs.h"
33 #include "optabs.h"
34 #include "regs.h"
35 #include "emit-rtl.h"
36 #include "recog.h"
37 #include "diagnostic-core.h"
38 #include "insn-attr.h"
39 #include "alias.h"
40 #include "fold-const.h"
41 #include "stor-layout.h"
42 #include "varasm.h"
43 #include "calls.h"
44 #include "output.h"
45 #include "except.h"
46 #include "explow.h"
47 #include "expr.h"
48 #include "reload.h"
49 #include "common/common-target.h"
50 #include "langhooks.h"
51 #include "cfgrtl.h"
52 #include "opts.h"
53 #include "builtins.h"
55 /* This file should be included last. */
56 #include "target-def.h"
58 /* Return nonzero if there is a bypass for the output of
59 OUT_INSN and the fp store IN_INSN. */
60 int
61 pa_fpstore_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
63 machine_mode store_mode;
64 machine_mode other_mode;
65 rtx set;
67 if (recog_memoized (in_insn) < 0
68 || (get_attr_type (in_insn) != TYPE_FPSTORE
69 && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD)
70 || recog_memoized (out_insn) < 0)
71 return 0;
73 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
75 set = single_set (out_insn);
76 if (!set)
77 return 0;
79 other_mode = GET_MODE (SET_SRC (set));
81 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
85 #ifndef DO_FRAME_NOTES
86 #ifdef INCOMING_RETURN_ADDR_RTX
87 #define DO_FRAME_NOTES 1
88 #else
89 #define DO_FRAME_NOTES 0
90 #endif
91 #endif
93 static void pa_option_override (void);
94 static void copy_reg_pointer (rtx, rtx);
95 static void fix_range (const char *);
96 static int hppa_register_move_cost (machine_mode mode, reg_class_t,
97 reg_class_t);
98 static int hppa_address_cost (rtx, machine_mode mode, addr_space_t, bool);
99 static bool hppa_rtx_costs (rtx, machine_mode, int, int, int *, bool);
100 static inline rtx force_mode (machine_mode, rtx);
101 static void pa_reorg (void);
102 static void pa_combine_instructions (void);
103 static int pa_can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, int, rtx,
104 rtx, rtx);
105 static bool forward_branch_p (rtx_insn *);
106 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
107 static void compute_zdepdi_operands (unsigned HOST_WIDE_INT, unsigned *);
108 static int compute_movmem_length (rtx_insn *);
109 static int compute_clrmem_length (rtx_insn *);
110 static bool pa_assemble_integer (rtx, unsigned int, int);
111 static void remove_useless_addtr_insns (int);
112 static void store_reg (int, HOST_WIDE_INT, int);
113 static void store_reg_modify (int, int, HOST_WIDE_INT);
114 static void load_reg (int, HOST_WIDE_INT, int);
115 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
116 static rtx pa_function_value (const_tree, const_tree, bool);
117 static rtx pa_libcall_value (machine_mode, const_rtx);
118 static bool pa_function_value_regno_p (const unsigned int);
119 static void pa_output_function_prologue (FILE *);
120 static void update_total_code_bytes (unsigned int);
121 static void pa_output_function_epilogue (FILE *);
122 static int pa_adjust_cost (rtx_insn *, int, rtx_insn *, int, unsigned int);
123 static int pa_adjust_priority (rtx_insn *, int);
124 static int pa_issue_rate (void);
125 static int pa_reloc_rw_mask (void);
126 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
127 static section *pa_som_tm_clone_table_section (void) ATTRIBUTE_UNUSED;
128 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
129 ATTRIBUTE_UNUSED;
130 static void pa_encode_section_info (tree, rtx, int);
131 static const char *pa_strip_name_encoding (const char *);
132 static bool pa_function_ok_for_sibcall (tree, tree);
133 static void pa_globalize_label (FILE *, const char *)
134 ATTRIBUTE_UNUSED;
135 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
136 HOST_WIDE_INT, tree);
137 #if !defined(USE_COLLECT2)
138 static void pa_asm_out_constructor (rtx, int);
139 static void pa_asm_out_destructor (rtx, int);
140 #endif
141 static void pa_init_builtins (void);
142 static rtx pa_expand_builtin (tree, rtx, rtx, machine_mode mode, int);
143 static rtx hppa_builtin_saveregs (void);
144 static void hppa_va_start (tree, rtx);
145 static tree hppa_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
146 static bool pa_scalar_mode_supported_p (scalar_mode);
147 static bool pa_commutative_p (const_rtx x, int outer_code);
148 static void copy_fp_args (rtx_insn *) ATTRIBUTE_UNUSED;
149 static int length_fp_args (rtx_insn *) ATTRIBUTE_UNUSED;
150 static rtx hppa_legitimize_address (rtx, rtx, machine_mode);
151 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
152 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
153 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
154 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
155 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
156 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
157 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
158 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
159 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
160 static void output_deferred_plabels (void);
161 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
162 static void pa_file_end (void);
163 static void pa_init_libfuncs (void);
164 static rtx pa_struct_value_rtx (tree, int);
165 static bool pa_pass_by_reference (cumulative_args_t, machine_mode,
166 const_tree, bool);
167 static int pa_arg_partial_bytes (cumulative_args_t, machine_mode,
168 tree, bool);
169 static void pa_function_arg_advance (cumulative_args_t, machine_mode,
170 const_tree, bool);
171 static rtx pa_function_arg (cumulative_args_t, machine_mode,
172 const_tree, bool);
173 static pad_direction pa_function_arg_padding (machine_mode, const_tree);
174 static unsigned int pa_function_arg_boundary (machine_mode, const_tree);
175 static struct machine_function * pa_init_machine_status (void);
176 static reg_class_t pa_secondary_reload (bool, rtx, reg_class_t,
177 machine_mode,
178 secondary_reload_info *);
179 static bool pa_secondary_memory_needed (machine_mode,
180 reg_class_t, reg_class_t);
181 static void pa_extra_live_on_entry (bitmap);
182 static machine_mode pa_promote_function_mode (const_tree,
183 machine_mode, int *,
184 const_tree, int);
186 static void pa_asm_trampoline_template (FILE *);
187 static void pa_trampoline_init (rtx, tree, rtx);
188 static rtx pa_trampoline_adjust_address (rtx);
189 static rtx pa_delegitimize_address (rtx);
190 static bool pa_print_operand_punct_valid_p (unsigned char);
191 static rtx pa_internal_arg_pointer (void);
192 static bool pa_can_eliminate (const int, const int);
193 static void pa_conditional_register_usage (void);
194 static machine_mode pa_c_mode_for_suffix (char);
195 static section *pa_function_section (tree, enum node_frequency, bool, bool);
196 static bool pa_cannot_force_const_mem (machine_mode, rtx);
197 static bool pa_legitimate_constant_p (machine_mode, rtx);
198 static unsigned int pa_section_type_flags (tree, const char *, int);
199 static bool pa_legitimate_address_p (machine_mode, rtx, bool);
200 static bool pa_callee_copies (cumulative_args_t, machine_mode,
201 const_tree, bool);
202 static unsigned int pa_hard_regno_nregs (unsigned int, machine_mode);
203 static bool pa_hard_regno_mode_ok (unsigned int, machine_mode);
204 static bool pa_modes_tieable_p (machine_mode, machine_mode);
205 static bool pa_can_change_mode_class (machine_mode, machine_mode, reg_class_t);
206 static HOST_WIDE_INT pa_starting_frame_offset (void);
208 /* The following extra sections are only used for SOM. */
209 static GTY(()) section *som_readonly_data_section;
210 static GTY(()) section *som_one_only_readonly_data_section;
211 static GTY(()) section *som_one_only_data_section;
212 static GTY(()) section *som_tm_clone_table_section;
214 /* Counts for the number of callee-saved general and floating point
215 registers which were saved by the current function's prologue. */
216 static int gr_saved, fr_saved;
218 /* Boolean indicating whether the return pointer was saved by the
219 current function's prologue. */
220 static bool rp_saved;
222 static rtx find_addr_reg (rtx);
224 /* Keep track of the number of bytes we have output in the CODE subspace
225 during this compilation so we'll know when to emit inline long-calls. */
226 unsigned long total_code_bytes;
228 /* The last address of the previous function plus the number of bytes in
229 associated thunks that have been output. This is used to determine if
230 a thunk can use an IA-relative branch to reach its target function. */
231 static unsigned int last_address;
233 /* Variables to handle plabels that we discover are necessary at assembly
234 output time. They are output after the current function. */
235 struct GTY(()) deferred_plabel
237 rtx internal_label;
238 rtx symbol;
240 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
241 deferred_plabels;
242 static size_t n_deferred_plabels = 0;
244 /* Initialize the GCC target structure. */
246 #undef TARGET_OPTION_OVERRIDE
247 #define TARGET_OPTION_OVERRIDE pa_option_override
249 #undef TARGET_ASM_ALIGNED_HI_OP
250 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
251 #undef TARGET_ASM_ALIGNED_SI_OP
252 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
253 #undef TARGET_ASM_ALIGNED_DI_OP
254 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
255 #undef TARGET_ASM_UNALIGNED_HI_OP
256 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
257 #undef TARGET_ASM_UNALIGNED_SI_OP
258 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
259 #undef TARGET_ASM_UNALIGNED_DI_OP
260 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
261 #undef TARGET_ASM_INTEGER
262 #define TARGET_ASM_INTEGER pa_assemble_integer
264 #undef TARGET_ASM_FUNCTION_PROLOGUE
265 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
266 #undef TARGET_ASM_FUNCTION_EPILOGUE
267 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
269 #undef TARGET_FUNCTION_VALUE
270 #define TARGET_FUNCTION_VALUE pa_function_value
271 #undef TARGET_LIBCALL_VALUE
272 #define TARGET_LIBCALL_VALUE pa_libcall_value
273 #undef TARGET_FUNCTION_VALUE_REGNO_P
274 #define TARGET_FUNCTION_VALUE_REGNO_P pa_function_value_regno_p
276 #undef TARGET_LEGITIMIZE_ADDRESS
277 #define TARGET_LEGITIMIZE_ADDRESS hppa_legitimize_address
279 #undef TARGET_SCHED_ADJUST_COST
280 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
281 #undef TARGET_SCHED_ADJUST_PRIORITY
282 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
283 #undef TARGET_SCHED_ISSUE_RATE
284 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
286 #undef TARGET_ENCODE_SECTION_INFO
287 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
288 #undef TARGET_STRIP_NAME_ENCODING
289 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
291 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
292 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
294 #undef TARGET_COMMUTATIVE_P
295 #define TARGET_COMMUTATIVE_P pa_commutative_p
297 #undef TARGET_ASM_OUTPUT_MI_THUNK
298 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
299 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
300 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
302 #undef TARGET_ASM_FILE_END
303 #define TARGET_ASM_FILE_END pa_file_end
305 #undef TARGET_ASM_RELOC_RW_MASK
306 #define TARGET_ASM_RELOC_RW_MASK pa_reloc_rw_mask
308 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
309 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P pa_print_operand_punct_valid_p
311 #if !defined(USE_COLLECT2)
312 #undef TARGET_ASM_CONSTRUCTOR
313 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
314 #undef TARGET_ASM_DESTRUCTOR
315 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
316 #endif
318 #undef TARGET_INIT_BUILTINS
319 #define TARGET_INIT_BUILTINS pa_init_builtins
321 #undef TARGET_EXPAND_BUILTIN
322 #define TARGET_EXPAND_BUILTIN pa_expand_builtin
324 #undef TARGET_REGISTER_MOVE_COST
325 #define TARGET_REGISTER_MOVE_COST hppa_register_move_cost
326 #undef TARGET_RTX_COSTS
327 #define TARGET_RTX_COSTS hppa_rtx_costs
328 #undef TARGET_ADDRESS_COST
329 #define TARGET_ADDRESS_COST hppa_address_cost
331 #undef TARGET_MACHINE_DEPENDENT_REORG
332 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
334 #undef TARGET_INIT_LIBFUNCS
335 #define TARGET_INIT_LIBFUNCS pa_init_libfuncs
337 #undef TARGET_PROMOTE_FUNCTION_MODE
338 #define TARGET_PROMOTE_FUNCTION_MODE pa_promote_function_mode
339 #undef TARGET_PROMOTE_PROTOTYPES
340 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
342 #undef TARGET_STRUCT_VALUE_RTX
343 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
344 #undef TARGET_RETURN_IN_MEMORY
345 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
346 #undef TARGET_MUST_PASS_IN_STACK
347 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
348 #undef TARGET_PASS_BY_REFERENCE
349 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
350 #undef TARGET_CALLEE_COPIES
351 #define TARGET_CALLEE_COPIES pa_callee_copies
352 #undef TARGET_ARG_PARTIAL_BYTES
353 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
354 #undef TARGET_FUNCTION_ARG
355 #define TARGET_FUNCTION_ARG pa_function_arg
356 #undef TARGET_FUNCTION_ARG_ADVANCE
357 #define TARGET_FUNCTION_ARG_ADVANCE pa_function_arg_advance
358 #undef TARGET_FUNCTION_ARG_PADDING
359 #define TARGET_FUNCTION_ARG_PADDING pa_function_arg_padding
360 #undef TARGET_FUNCTION_ARG_BOUNDARY
361 #define TARGET_FUNCTION_ARG_BOUNDARY pa_function_arg_boundary
363 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
364 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
365 #undef TARGET_EXPAND_BUILTIN_VA_START
366 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
367 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
368 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
370 #undef TARGET_SCALAR_MODE_SUPPORTED_P
371 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
373 #undef TARGET_CANNOT_FORCE_CONST_MEM
374 #define TARGET_CANNOT_FORCE_CONST_MEM pa_cannot_force_const_mem
376 #undef TARGET_SECONDARY_RELOAD
377 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
378 #undef TARGET_SECONDARY_MEMORY_NEEDED
379 #define TARGET_SECONDARY_MEMORY_NEEDED pa_secondary_memory_needed
381 #undef TARGET_EXTRA_LIVE_ON_ENTRY
382 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
384 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
385 #define TARGET_ASM_TRAMPOLINE_TEMPLATE pa_asm_trampoline_template
386 #undef TARGET_TRAMPOLINE_INIT
387 #define TARGET_TRAMPOLINE_INIT pa_trampoline_init
388 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
389 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS pa_trampoline_adjust_address
390 #undef TARGET_DELEGITIMIZE_ADDRESS
391 #define TARGET_DELEGITIMIZE_ADDRESS pa_delegitimize_address
392 #undef TARGET_INTERNAL_ARG_POINTER
393 #define TARGET_INTERNAL_ARG_POINTER pa_internal_arg_pointer
394 #undef TARGET_CAN_ELIMINATE
395 #define TARGET_CAN_ELIMINATE pa_can_eliminate
396 #undef TARGET_CONDITIONAL_REGISTER_USAGE
397 #define TARGET_CONDITIONAL_REGISTER_USAGE pa_conditional_register_usage
398 #undef TARGET_C_MODE_FOR_SUFFIX
399 #define TARGET_C_MODE_FOR_SUFFIX pa_c_mode_for_suffix
400 #undef TARGET_ASM_FUNCTION_SECTION
401 #define TARGET_ASM_FUNCTION_SECTION pa_function_section
403 #undef TARGET_LEGITIMATE_CONSTANT_P
404 #define TARGET_LEGITIMATE_CONSTANT_P pa_legitimate_constant_p
405 #undef TARGET_SECTION_TYPE_FLAGS
406 #define TARGET_SECTION_TYPE_FLAGS pa_section_type_flags
407 #undef TARGET_LEGITIMATE_ADDRESS_P
408 #define TARGET_LEGITIMATE_ADDRESS_P pa_legitimate_address_p
410 #undef TARGET_LRA_P
411 #define TARGET_LRA_P hook_bool_void_false
413 #undef TARGET_HARD_REGNO_NREGS
414 #define TARGET_HARD_REGNO_NREGS pa_hard_regno_nregs
415 #undef TARGET_HARD_REGNO_MODE_OK
416 #define TARGET_HARD_REGNO_MODE_OK pa_hard_regno_mode_ok
417 #undef TARGET_MODES_TIEABLE_P
418 #define TARGET_MODES_TIEABLE_P pa_modes_tieable_p
420 #undef TARGET_CAN_CHANGE_MODE_CLASS
421 #define TARGET_CAN_CHANGE_MODE_CLASS pa_can_change_mode_class
423 #undef TARGET_CONSTANT_ALIGNMENT
424 #define TARGET_CONSTANT_ALIGNMENT constant_alignment_word_strings
426 #undef TARGET_STARTING_FRAME_OFFSET
427 #define TARGET_STARTING_FRAME_OFFSET pa_starting_frame_offset
429 struct gcc_target targetm = TARGET_INITIALIZER;
431 /* Parse the -mfixed-range= option string. */
433 static void
434 fix_range (const char *const_str)
436 int i, first, last;
437 char *str, *dash, *comma;
439 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
440 REG2 are either register names or register numbers. The effect
441 of this option is to mark the registers in the range from REG1 to
442 REG2 as ``fixed'' so they won't be used by the compiler. This is
443 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
445 i = strlen (const_str);
446 str = (char *) alloca (i + 1);
447 memcpy (str, const_str, i + 1);
449 while (1)
451 dash = strchr (str, '-');
452 if (!dash)
454 warning (0, "value of -mfixed-range must have form REG1-REG2");
455 return;
457 *dash = '\0';
459 comma = strchr (dash + 1, ',');
460 if (comma)
461 *comma = '\0';
463 first = decode_reg_name (str);
464 if (first < 0)
466 warning (0, "unknown register name: %s", str);
467 return;
470 last = decode_reg_name (dash + 1);
471 if (last < 0)
473 warning (0, "unknown register name: %s", dash + 1);
474 return;
477 *dash = '-';
479 if (first > last)
481 warning (0, "%s-%s is an empty range", str, dash + 1);
482 return;
485 for (i = first; i <= last; ++i)
486 fixed_regs[i] = call_used_regs[i] = 1;
488 if (!comma)
489 break;
491 *comma = ',';
492 str = comma + 1;
495 /* Check if all floating point registers have been fixed. */
496 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
497 if (!fixed_regs[i])
498 break;
500 if (i > FP_REG_LAST)
501 target_flags |= MASK_DISABLE_FPREGS;
504 /* Implement the TARGET_OPTION_OVERRIDE hook. */
506 static void
507 pa_option_override (void)
509 unsigned int i;
510 cl_deferred_option *opt;
511 vec<cl_deferred_option> *v
512 = (vec<cl_deferred_option> *) pa_deferred_options;
514 if (v)
515 FOR_EACH_VEC_ELT (*v, i, opt)
517 switch (opt->opt_index)
519 case OPT_mfixed_range_:
520 fix_range (opt->arg);
521 break;
523 default:
524 gcc_unreachable ();
528 if (flag_pic && TARGET_PORTABLE_RUNTIME)
530 warning (0, "PIC code generation is not supported in the portable runtime model");
533 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
535 warning (0, "PIC code generation is not compatible with fast indirect calls");
538 if (! TARGET_GAS && write_symbols != NO_DEBUG)
540 warning (0, "-g is only supported when using GAS on this processor,");
541 warning (0, "-g option disabled");
542 write_symbols = NO_DEBUG;
545 /* We only support the "big PIC" model now. And we always generate PIC
546 code when in 64bit mode. */
547 if (flag_pic == 1 || TARGET_64BIT)
548 flag_pic = 2;
550 /* Disable -freorder-blocks-and-partition as we don't support hot and
551 cold partitioning. */
552 if (flag_reorder_blocks_and_partition)
554 inform (input_location,
555 "-freorder-blocks-and-partition does not work "
556 "on this architecture");
557 flag_reorder_blocks_and_partition = 0;
558 flag_reorder_blocks = 1;
561 /* We can't guarantee that .dword is available for 32-bit targets. */
562 if (UNITS_PER_WORD == 4)
563 targetm.asm_out.aligned_op.di = NULL;
565 /* The unaligned ops are only available when using GAS. */
566 if (!TARGET_GAS)
568 targetm.asm_out.unaligned_op.hi = NULL;
569 targetm.asm_out.unaligned_op.si = NULL;
570 targetm.asm_out.unaligned_op.di = NULL;
573 init_machine_status = pa_init_machine_status;
576 enum pa_builtins
578 PA_BUILTIN_COPYSIGNQ,
579 PA_BUILTIN_FABSQ,
580 PA_BUILTIN_INFQ,
581 PA_BUILTIN_HUGE_VALQ,
582 PA_BUILTIN_max
585 static GTY(()) tree pa_builtins[(int) PA_BUILTIN_max];
587 static void
588 pa_init_builtins (void)
590 #ifdef DONT_HAVE_FPUTC_UNLOCKED
592 tree decl = builtin_decl_explicit (BUILT_IN_PUTC_UNLOCKED);
593 set_builtin_decl (BUILT_IN_FPUTC_UNLOCKED, decl,
594 builtin_decl_implicit_p (BUILT_IN_PUTC_UNLOCKED));
596 #endif
597 #if TARGET_HPUX_11
599 tree decl;
601 if ((decl = builtin_decl_explicit (BUILT_IN_FINITE)) != NULL_TREE)
602 set_user_assembler_name (decl, "_Isfinite");
603 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEF)) != NULL_TREE)
604 set_user_assembler_name (decl, "_Isfinitef");
606 #endif
608 if (HPUX_LONG_DOUBLE_LIBRARY)
610 tree decl, ftype;
612 /* Under HPUX, the __float128 type is a synonym for "long double". */
613 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
614 "__float128");
616 /* TFmode support builtins. */
617 ftype = build_function_type_list (long_double_type_node,
618 long_double_type_node,
619 NULL_TREE);
620 decl = add_builtin_function ("__builtin_fabsq", ftype,
621 PA_BUILTIN_FABSQ, BUILT_IN_MD,
622 "_U_Qfabs", NULL_TREE);
623 TREE_READONLY (decl) = 1;
624 pa_builtins[PA_BUILTIN_FABSQ] = decl;
626 ftype = build_function_type_list (long_double_type_node,
627 long_double_type_node,
628 long_double_type_node,
629 NULL_TREE);
630 decl = add_builtin_function ("__builtin_copysignq", ftype,
631 PA_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
632 "_U_Qfcopysign", NULL_TREE);
633 TREE_READONLY (decl) = 1;
634 pa_builtins[PA_BUILTIN_COPYSIGNQ] = decl;
636 ftype = build_function_type_list (long_double_type_node, NULL_TREE);
637 decl = add_builtin_function ("__builtin_infq", ftype,
638 PA_BUILTIN_INFQ, BUILT_IN_MD,
639 NULL, NULL_TREE);
640 pa_builtins[PA_BUILTIN_INFQ] = decl;
642 decl = add_builtin_function ("__builtin_huge_valq", ftype,
643 PA_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
644 NULL, NULL_TREE);
645 pa_builtins[PA_BUILTIN_HUGE_VALQ] = decl;
649 static rtx
650 pa_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
651 machine_mode mode ATTRIBUTE_UNUSED,
652 int ignore ATTRIBUTE_UNUSED)
654 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
655 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
657 switch (fcode)
659 case PA_BUILTIN_FABSQ:
660 case PA_BUILTIN_COPYSIGNQ:
661 return expand_call (exp, target, ignore);
663 case PA_BUILTIN_INFQ:
664 case PA_BUILTIN_HUGE_VALQ:
666 machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
667 REAL_VALUE_TYPE inf;
668 rtx tmp;
670 real_inf (&inf);
671 tmp = const_double_from_real_value (inf, target_mode);
673 tmp = validize_mem (force_const_mem (target_mode, tmp));
675 if (target == 0)
676 target = gen_reg_rtx (target_mode);
678 emit_move_insn (target, tmp);
679 return target;
682 default:
683 gcc_unreachable ();
686 return NULL_RTX;
689 /* Function to init struct machine_function.
690 This will be called, via a pointer variable,
691 from push_function_context. */
693 static struct machine_function *
694 pa_init_machine_status (void)
696 return ggc_cleared_alloc<machine_function> ();
699 /* If FROM is a probable pointer register, mark TO as a probable
700 pointer register with the same pointer alignment as FROM. */
702 static void
703 copy_reg_pointer (rtx to, rtx from)
705 if (REG_POINTER (from))
706 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
709 /* Return 1 if X contains a symbolic expression. We know these
710 expressions will have one of a few well defined forms, so
711 we need only check those forms. */
713 pa_symbolic_expression_p (rtx x)
716 /* Strip off any HIGH. */
717 if (GET_CODE (x) == HIGH)
718 x = XEXP (x, 0);
720 return symbolic_operand (x, VOIDmode);
723 /* Accept any constant that can be moved in one instruction into a
724 general register. */
726 pa_cint_ok_for_move (unsigned HOST_WIDE_INT ival)
728 /* OK if ldo, ldil, or zdepi, can be used. */
729 return (VAL_14_BITS_P (ival)
730 || pa_ldil_cint_p (ival)
731 || pa_zdepi_cint_p (ival));
734 /* True iff ldil can be used to load this CONST_INT. The least
735 significant 11 bits of the value must be zero and the value must
736 not change sign when extended from 32 to 64 bits. */
738 pa_ldil_cint_p (unsigned HOST_WIDE_INT ival)
740 unsigned HOST_WIDE_INT x;
742 x = ival & (((unsigned HOST_WIDE_INT) -1 << 31) | 0x7ff);
743 return x == 0 || x == ((unsigned HOST_WIDE_INT) -1 << 31);
746 /* True iff zdepi can be used to generate this CONST_INT.
747 zdepi first sign extends a 5-bit signed number to a given field
748 length, then places this field anywhere in a zero. */
750 pa_zdepi_cint_p (unsigned HOST_WIDE_INT x)
752 unsigned HOST_WIDE_INT lsb_mask, t;
754 /* This might not be obvious, but it's at least fast.
755 This function is critical; we don't have the time loops would take. */
756 lsb_mask = x & -x;
757 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
758 /* Return true iff t is a power of two. */
759 return ((t & (t - 1)) == 0);
762 /* True iff depi or extru can be used to compute (reg & mask).
763 Accept bit pattern like these:
764 0....01....1
765 1....10....0
766 1..10..01..1 */
768 pa_and_mask_p (unsigned HOST_WIDE_INT mask)
770 mask = ~mask;
771 mask += mask & -mask;
772 return (mask & (mask - 1)) == 0;
775 /* True iff depi can be used to compute (reg | MASK). */
777 pa_ior_mask_p (unsigned HOST_WIDE_INT mask)
779 mask += mask & -mask;
780 return (mask & (mask - 1)) == 0;
783 /* Legitimize PIC addresses. If the address is already
784 position-independent, we return ORIG. Newly generated
785 position-independent addresses go to REG. If we need more
786 than one register, we lose. */
788 static rtx
789 legitimize_pic_address (rtx orig, machine_mode mode, rtx reg)
791 rtx pic_ref = orig;
793 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
795 /* Labels need special handling. */
796 if (pic_label_operand (orig, mode))
798 rtx_insn *insn;
800 /* We do not want to go through the movXX expanders here since that
801 would create recursion.
803 Nor do we really want to call a generator for a named pattern
804 since that requires multiple patterns if we want to support
805 multiple word sizes.
807 So instead we just emit the raw set, which avoids the movXX
808 expanders completely. */
809 mark_reg_pointer (reg, BITS_PER_UNIT);
810 insn = emit_insn (gen_rtx_SET (reg, orig));
812 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
813 add_reg_note (insn, REG_EQUAL, orig);
815 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
816 and update LABEL_NUSES because this is not done automatically. */
817 if (reload_in_progress || reload_completed)
819 /* Extract LABEL_REF. */
820 if (GET_CODE (orig) == CONST)
821 orig = XEXP (XEXP (orig, 0), 0);
822 /* Extract CODE_LABEL. */
823 orig = XEXP (orig, 0);
824 add_reg_note (insn, REG_LABEL_OPERAND, orig);
825 /* Make sure we have label and not a note. */
826 if (LABEL_P (orig))
827 LABEL_NUSES (orig)++;
829 crtl->uses_pic_offset_table = 1;
830 return reg;
832 if (GET_CODE (orig) == SYMBOL_REF)
834 rtx_insn *insn;
835 rtx tmp_reg;
837 gcc_assert (reg);
839 /* Before reload, allocate a temporary register for the intermediate
840 result. This allows the sequence to be deleted when the final
841 result is unused and the insns are trivially dead. */
842 tmp_reg = ((reload_in_progress || reload_completed)
843 ? reg : gen_reg_rtx (Pmode));
845 if (function_label_operand (orig, VOIDmode))
847 /* Force function label into memory in word mode. */
848 orig = XEXP (force_const_mem (word_mode, orig), 0);
849 /* Load plabel address from DLT. */
850 emit_move_insn (tmp_reg,
851 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
852 gen_rtx_HIGH (word_mode, orig)));
853 pic_ref
854 = gen_const_mem (Pmode,
855 gen_rtx_LO_SUM (Pmode, tmp_reg,
856 gen_rtx_UNSPEC (Pmode,
857 gen_rtvec (1, orig),
858 UNSPEC_DLTIND14R)));
859 emit_move_insn (reg, pic_ref);
860 /* Now load address of function descriptor. */
861 pic_ref = gen_rtx_MEM (Pmode, reg);
863 else
865 /* Load symbol reference from DLT. */
866 emit_move_insn (tmp_reg,
867 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
868 gen_rtx_HIGH (word_mode, orig)));
869 pic_ref
870 = gen_const_mem (Pmode,
871 gen_rtx_LO_SUM (Pmode, tmp_reg,
872 gen_rtx_UNSPEC (Pmode,
873 gen_rtvec (1, orig),
874 UNSPEC_DLTIND14R)));
877 crtl->uses_pic_offset_table = 1;
878 mark_reg_pointer (reg, BITS_PER_UNIT);
879 insn = emit_move_insn (reg, pic_ref);
881 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
882 set_unique_reg_note (insn, REG_EQUAL, orig);
884 return reg;
886 else if (GET_CODE (orig) == CONST)
888 rtx base;
890 if (GET_CODE (XEXP (orig, 0)) == PLUS
891 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
892 return orig;
894 gcc_assert (reg);
895 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
897 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
898 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
899 base == reg ? 0 : reg);
901 if (GET_CODE (orig) == CONST_INT)
903 if (INT_14_BITS (orig))
904 return plus_constant (Pmode, base, INTVAL (orig));
905 orig = force_reg (Pmode, orig);
907 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
908 /* Likewise, should we set special REG_NOTEs here? */
911 return pic_ref;
914 static GTY(()) rtx gen_tls_tga;
916 static rtx
917 gen_tls_get_addr (void)
919 if (!gen_tls_tga)
920 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
921 return gen_tls_tga;
924 static rtx
925 hppa_tls_call (rtx arg)
927 rtx ret;
929 ret = gen_reg_rtx (Pmode);
930 emit_library_call_value (gen_tls_get_addr (), ret,
931 LCT_CONST, Pmode, arg, Pmode);
933 return ret;
936 static rtx
937 legitimize_tls_address (rtx addr)
939 rtx ret, tmp, t1, t2, tp;
940 rtx_insn *insn;
942 /* Currently, we can't handle anything but a SYMBOL_REF. */
943 if (GET_CODE (addr) != SYMBOL_REF)
944 return addr;
946 switch (SYMBOL_REF_TLS_MODEL (addr))
948 case TLS_MODEL_GLOBAL_DYNAMIC:
949 tmp = gen_reg_rtx (Pmode);
950 if (flag_pic)
951 emit_insn (gen_tgd_load_pic (tmp, addr));
952 else
953 emit_insn (gen_tgd_load (tmp, addr));
954 ret = hppa_tls_call (tmp);
955 break;
957 case TLS_MODEL_LOCAL_DYNAMIC:
958 ret = gen_reg_rtx (Pmode);
959 tmp = gen_reg_rtx (Pmode);
960 start_sequence ();
961 if (flag_pic)
962 emit_insn (gen_tld_load_pic (tmp, addr));
963 else
964 emit_insn (gen_tld_load (tmp, addr));
965 t1 = hppa_tls_call (tmp);
966 insn = get_insns ();
967 end_sequence ();
968 t2 = gen_reg_rtx (Pmode);
969 emit_libcall_block (insn, t2, t1,
970 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
971 UNSPEC_TLSLDBASE));
972 emit_insn (gen_tld_offset_load (ret, addr, t2));
973 break;
975 case TLS_MODEL_INITIAL_EXEC:
976 tp = gen_reg_rtx (Pmode);
977 tmp = gen_reg_rtx (Pmode);
978 ret = gen_reg_rtx (Pmode);
979 emit_insn (gen_tp_load (tp));
980 if (flag_pic)
981 emit_insn (gen_tie_load_pic (tmp, addr));
982 else
983 emit_insn (gen_tie_load (tmp, addr));
984 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
985 break;
987 case TLS_MODEL_LOCAL_EXEC:
988 tp = gen_reg_rtx (Pmode);
989 ret = gen_reg_rtx (Pmode);
990 emit_insn (gen_tp_load (tp));
991 emit_insn (gen_tle_load (ret, addr, tp));
992 break;
994 default:
995 gcc_unreachable ();
998 return ret;
1001 /* Helper for hppa_legitimize_address. Given X, return true if it
1002 is a left shift by 1, 2 or 3 positions or a multiply by 2, 4 or 8.
1004 This respectively represent canonical shift-add rtxs or scaled
1005 memory addresses. */
1006 static bool
1007 mem_shadd_or_shadd_rtx_p (rtx x)
1009 return ((GET_CODE (x) == ASHIFT
1010 || GET_CODE (x) == MULT)
1011 && GET_CODE (XEXP (x, 1)) == CONST_INT
1012 && ((GET_CODE (x) == ASHIFT
1013 && pa_shadd_constant_p (INTVAL (XEXP (x, 1))))
1014 || (GET_CODE (x) == MULT
1015 && pa_mem_shadd_constant_p (INTVAL (XEXP (x, 1))))));
1018 /* Try machine-dependent ways of modifying an illegitimate address
1019 to be legitimate. If we find one, return the new, valid address.
1020 This macro is used in only one place: `memory_address' in explow.c.
1022 OLDX is the address as it was before break_out_memory_refs was called.
1023 In some cases it is useful to look at this to decide what needs to be done.
1025 It is always safe for this macro to do nothing. It exists to recognize
1026 opportunities to optimize the output.
1028 For the PA, transform:
1030 memory(X + <large int>)
1032 into:
1034 if (<large int> & mask) >= 16
1035 Y = (<large int> & ~mask) + mask + 1 Round up.
1036 else
1037 Y = (<large int> & ~mask) Round down.
1038 Z = X + Y
1039 memory (Z + (<large int> - Y));
1041 This is for CSE to find several similar references, and only use one Z.
1043 X can either be a SYMBOL_REF or REG, but because combine cannot
1044 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
1045 D will not fit in 14 bits.
1047 MODE_FLOAT references allow displacements which fit in 5 bits, so use
1048 0x1f as the mask.
1050 MODE_INT references allow displacements which fit in 14 bits, so use
1051 0x3fff as the mask.
1053 This relies on the fact that most mode MODE_FLOAT references will use FP
1054 registers and most mode MODE_INT references will use integer registers.
1055 (In the rare case of an FP register used in an integer MODE, we depend
1056 on secondary reloads to clean things up.)
1059 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
1060 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
1061 addressing modes to be used).
1063 Note that the addresses passed into hppa_legitimize_address always
1064 come from a MEM, so we only have to match the MULT form on incoming
1065 addresses. But to be future proof we also match the ASHIFT form.
1067 However, this routine always places those shift-add sequences into
1068 registers, so we have to generate the ASHIFT form as our output.
1070 Put X and Z into registers. Then put the entire expression into
1071 a register. */
1074 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1075 machine_mode mode)
1077 rtx orig = x;
1079 /* We need to canonicalize the order of operands in unscaled indexed
1080 addresses since the code that checks if an address is valid doesn't
1081 always try both orders. */
1082 if (!TARGET_NO_SPACE_REGS
1083 && GET_CODE (x) == PLUS
1084 && GET_MODE (x) == Pmode
1085 && REG_P (XEXP (x, 0))
1086 && REG_P (XEXP (x, 1))
1087 && REG_POINTER (XEXP (x, 0))
1088 && !REG_POINTER (XEXP (x, 1)))
1089 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
1091 if (tls_referenced_p (x))
1092 return legitimize_tls_address (x);
1093 else if (flag_pic)
1094 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
1096 /* Strip off CONST. */
1097 if (GET_CODE (x) == CONST)
1098 x = XEXP (x, 0);
1100 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1101 That should always be safe. */
1102 if (GET_CODE (x) == PLUS
1103 && GET_CODE (XEXP (x, 0)) == REG
1104 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
1106 rtx reg = force_reg (Pmode, XEXP (x, 1));
1107 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
1110 /* Note we must reject symbols which represent function addresses
1111 since the assembler/linker can't handle arithmetic on plabels. */
1112 if (GET_CODE (x) == PLUS
1113 && GET_CODE (XEXP (x, 1)) == CONST_INT
1114 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1115 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
1116 || GET_CODE (XEXP (x, 0)) == REG))
1118 rtx int_part, ptr_reg;
1119 int newoffset;
1120 int offset = INTVAL (XEXP (x, 1));
1121 int mask;
1123 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
1124 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
1126 /* Choose which way to round the offset. Round up if we
1127 are >= halfway to the next boundary. */
1128 if ((offset & mask) >= ((mask + 1) / 2))
1129 newoffset = (offset & ~ mask) + mask + 1;
1130 else
1131 newoffset = (offset & ~ mask);
1133 /* If the newoffset will not fit in 14 bits (ldo), then
1134 handling this would take 4 or 5 instructions (2 to load
1135 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1136 add the new offset and the SYMBOL_REF.) Combine can
1137 not handle 4->2 or 5->2 combinations, so do not create
1138 them. */
1139 if (! VAL_14_BITS_P (newoffset)
1140 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
1142 rtx const_part = plus_constant (Pmode, XEXP (x, 0), newoffset);
1143 rtx tmp_reg
1144 = force_reg (Pmode,
1145 gen_rtx_HIGH (Pmode, const_part));
1146 ptr_reg
1147 = force_reg (Pmode,
1148 gen_rtx_LO_SUM (Pmode,
1149 tmp_reg, const_part));
1151 else
1153 if (! VAL_14_BITS_P (newoffset))
1154 int_part = force_reg (Pmode, GEN_INT (newoffset));
1155 else
1156 int_part = GEN_INT (newoffset);
1158 ptr_reg = force_reg (Pmode,
1159 gen_rtx_PLUS (Pmode,
1160 force_reg (Pmode, XEXP (x, 0)),
1161 int_part));
1163 return plus_constant (Pmode, ptr_reg, offset - newoffset);
1166 /* Handle (plus (mult (a) (mem_shadd_constant)) (b)). */
1168 if (GET_CODE (x) == PLUS
1169 && mem_shadd_or_shadd_rtx_p (XEXP (x, 0))
1170 && (OBJECT_P (XEXP (x, 1))
1171 || GET_CODE (XEXP (x, 1)) == SUBREG)
1172 && GET_CODE (XEXP (x, 1)) != CONST)
1174 /* If we were given a MULT, we must fix the constant
1175 as we're going to create the ASHIFT form. */
1176 int shift_val = INTVAL (XEXP (XEXP (x, 0), 1));
1177 if (GET_CODE (XEXP (x, 0)) == MULT)
1178 shift_val = exact_log2 (shift_val);
1180 rtx reg1, reg2;
1181 reg1 = XEXP (x, 1);
1182 if (GET_CODE (reg1) != REG)
1183 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1185 reg2 = XEXP (XEXP (x, 0), 0);
1186 if (GET_CODE (reg2) != REG)
1187 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1189 return force_reg (Pmode,
1190 gen_rtx_PLUS (Pmode,
1191 gen_rtx_ASHIFT (Pmode, reg2,
1192 GEN_INT (shift_val)),
1193 reg1));
1196 /* Similarly for (plus (plus (mult (a) (mem_shadd_constant)) (b)) (c)).
1198 Only do so for floating point modes since this is more speculative
1199 and we lose if it's an integer store. */
1200 if (GET_CODE (x) == PLUS
1201 && GET_CODE (XEXP (x, 0)) == PLUS
1202 && mem_shadd_or_shadd_rtx_p (XEXP (XEXP (x, 0), 0))
1203 && (mode == SFmode || mode == DFmode))
1205 int shift_val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
1207 /* If we were given a MULT, we must fix the constant
1208 as we're going to create the ASHIFT form. */
1209 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
1210 shift_val = exact_log2 (shift_val);
1212 /* Try and figure out what to use as a base register. */
1213 rtx reg1, reg2, base, idx;
1215 reg1 = XEXP (XEXP (x, 0), 1);
1216 reg2 = XEXP (x, 1);
1217 base = NULL_RTX;
1218 idx = NULL_RTX;
1220 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1221 then pa_emit_move_sequence will turn on REG_POINTER so we'll know
1222 it's a base register below. */
1223 if (GET_CODE (reg1) != REG)
1224 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1226 if (GET_CODE (reg2) != REG)
1227 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1229 /* Figure out what the base and index are. */
1231 if (GET_CODE (reg1) == REG
1232 && REG_POINTER (reg1))
1234 base = reg1;
1235 idx = gen_rtx_PLUS (Pmode,
1236 gen_rtx_ASHIFT (Pmode,
1237 XEXP (XEXP (XEXP (x, 0), 0), 0),
1238 GEN_INT (shift_val)),
1239 XEXP (x, 1));
1241 else if (GET_CODE (reg2) == REG
1242 && REG_POINTER (reg2))
1244 base = reg2;
1245 idx = XEXP (x, 0);
1248 if (base == 0)
1249 return orig;
1251 /* If the index adds a large constant, try to scale the
1252 constant so that it can be loaded with only one insn. */
1253 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1254 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1255 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1256 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1258 /* Divide the CONST_INT by the scale factor, then add it to A. */
1259 int val = INTVAL (XEXP (idx, 1));
1260 val /= (1 << shift_val);
1262 reg1 = XEXP (XEXP (idx, 0), 0);
1263 if (GET_CODE (reg1) != REG)
1264 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1266 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1268 /* We can now generate a simple scaled indexed address. */
1269 return
1270 force_reg
1271 (Pmode, gen_rtx_PLUS (Pmode,
1272 gen_rtx_ASHIFT (Pmode, reg1,
1273 GEN_INT (shift_val)),
1274 base));
1277 /* If B + C is still a valid base register, then add them. */
1278 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1279 && INTVAL (XEXP (idx, 1)) <= 4096
1280 && INTVAL (XEXP (idx, 1)) >= -4096)
1282 rtx reg1, reg2;
1284 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1286 reg2 = XEXP (XEXP (idx, 0), 0);
1287 if (GET_CODE (reg2) != CONST_INT)
1288 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1290 return force_reg (Pmode,
1291 gen_rtx_PLUS (Pmode,
1292 gen_rtx_ASHIFT (Pmode, reg2,
1293 GEN_INT (shift_val)),
1294 reg1));
1297 /* Get the index into a register, then add the base + index and
1298 return a register holding the result. */
1300 /* First get A into a register. */
1301 reg1 = XEXP (XEXP (idx, 0), 0);
1302 if (GET_CODE (reg1) != REG)
1303 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1305 /* And get B into a register. */
1306 reg2 = XEXP (idx, 1);
1307 if (GET_CODE (reg2) != REG)
1308 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1310 reg1 = force_reg (Pmode,
1311 gen_rtx_PLUS (Pmode,
1312 gen_rtx_ASHIFT (Pmode, reg1,
1313 GEN_INT (shift_val)),
1314 reg2));
1316 /* Add the result to our base register and return. */
1317 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1321 /* Uh-oh. We might have an address for x[n-100000]. This needs
1322 special handling to avoid creating an indexed memory address
1323 with x-100000 as the base.
1325 If the constant part is small enough, then it's still safe because
1326 there is a guard page at the beginning and end of the data segment.
1328 Scaled references are common enough that we want to try and rearrange the
1329 terms so that we can use indexing for these addresses too. Only
1330 do the optimization for floatint point modes. */
1332 if (GET_CODE (x) == PLUS
1333 && pa_symbolic_expression_p (XEXP (x, 1)))
1335 /* Ugly. We modify things here so that the address offset specified
1336 by the index expression is computed first, then added to x to form
1337 the entire address. */
1339 rtx regx1, regx2, regy1, regy2, y;
1341 /* Strip off any CONST. */
1342 y = XEXP (x, 1);
1343 if (GET_CODE (y) == CONST)
1344 y = XEXP (y, 0);
1346 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1348 /* See if this looks like
1349 (plus (mult (reg) (mem_shadd_const))
1350 (const (plus (symbol_ref) (const_int))))
1352 Where const_int is small. In that case the const
1353 expression is a valid pointer for indexing.
1355 If const_int is big, but can be divided evenly by shadd_const
1356 and added to (reg). This allows more scaled indexed addresses. */
1357 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1358 && mem_shadd_or_shadd_rtx_p (XEXP (x, 0))
1359 && GET_CODE (XEXP (y, 1)) == CONST_INT
1360 && INTVAL (XEXP (y, 1)) >= -4096
1361 && INTVAL (XEXP (y, 1)) <= 4095)
1363 int shift_val = INTVAL (XEXP (XEXP (x, 0), 1));
1365 /* If we were given a MULT, we must fix the constant
1366 as we're going to create the ASHIFT form. */
1367 if (GET_CODE (XEXP (x, 0)) == MULT)
1368 shift_val = exact_log2 (shift_val);
1370 rtx reg1, reg2;
1372 reg1 = XEXP (x, 1);
1373 if (GET_CODE (reg1) != REG)
1374 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1376 reg2 = XEXP (XEXP (x, 0), 0);
1377 if (GET_CODE (reg2) != REG)
1378 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1380 return
1381 force_reg (Pmode,
1382 gen_rtx_PLUS (Pmode,
1383 gen_rtx_ASHIFT (Pmode,
1384 reg2,
1385 GEN_INT (shift_val)),
1386 reg1));
1388 else if ((mode == DFmode || mode == SFmode)
1389 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1390 && mem_shadd_or_shadd_rtx_p (XEXP (x, 0))
1391 && GET_CODE (XEXP (y, 1)) == CONST_INT
1392 && INTVAL (XEXP (y, 1)) % (1 << INTVAL (XEXP (XEXP (x, 0), 1))) == 0)
1394 int shift_val = INTVAL (XEXP (XEXP (x, 0), 1));
1396 /* If we were given a MULT, we must fix the constant
1397 as we're going to create the ASHIFT form. */
1398 if (GET_CODE (XEXP (x, 0)) == MULT)
1399 shift_val = exact_log2 (shift_val);
1401 regx1
1402 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1403 / INTVAL (XEXP (XEXP (x, 0), 1))));
1404 regx2 = XEXP (XEXP (x, 0), 0);
1405 if (GET_CODE (regx2) != REG)
1406 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1407 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1408 regx2, regx1));
1409 return
1410 force_reg (Pmode,
1411 gen_rtx_PLUS (Pmode,
1412 gen_rtx_ASHIFT (Pmode, regx2,
1413 GEN_INT (shift_val)),
1414 force_reg (Pmode, XEXP (y, 0))));
1416 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1417 && INTVAL (XEXP (y, 1)) >= -4096
1418 && INTVAL (XEXP (y, 1)) <= 4095)
1420 /* This is safe because of the guard page at the
1421 beginning and end of the data space. Just
1422 return the original address. */
1423 return orig;
1425 else
1427 /* Doesn't look like one we can optimize. */
1428 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1429 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1430 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1431 regx1 = force_reg (Pmode,
1432 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1433 regx1, regy2));
1434 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1439 return orig;
1442 /* Implement the TARGET_REGISTER_MOVE_COST hook.
1444 Compute extra cost of moving data between one register class
1445 and another.
1447 Make moves from SAR so expensive they should never happen. We used to
1448 have 0xffff here, but that generates overflow in rare cases.
1450 Copies involving a FP register and a non-FP register are relatively
1451 expensive because they must go through memory.
1453 Other copies are reasonably cheap. */
1455 static int
1456 hppa_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
1457 reg_class_t from, reg_class_t to)
1459 if (from == SHIFT_REGS)
1460 return 0x100;
1461 else if (to == SHIFT_REGS && FP_REG_CLASS_P (from))
1462 return 18;
1463 else if ((FP_REG_CLASS_P (from) && ! FP_REG_CLASS_P (to))
1464 || (FP_REG_CLASS_P (to) && ! FP_REG_CLASS_P (from)))
1465 return 16;
1466 else
1467 return 2;
1470 /* For the HPPA, REG and REG+CONST is cost 0
1471 and addresses involving symbolic constants are cost 2.
1473 PIC addresses are very expensive.
1475 It is no coincidence that this has the same structure
1476 as pa_legitimate_address_p. */
1478 static int
1479 hppa_address_cost (rtx X, machine_mode mode ATTRIBUTE_UNUSED,
1480 addr_space_t as ATTRIBUTE_UNUSED,
1481 bool speed ATTRIBUTE_UNUSED)
1483 switch (GET_CODE (X))
1485 case REG:
1486 case PLUS:
1487 case LO_SUM:
1488 return 1;
1489 case HIGH:
1490 return 2;
1491 default:
1492 return 4;
1496 /* Compute a (partial) cost for rtx X. Return true if the complete
1497 cost has been computed, and false if subexpressions should be
1498 scanned. In either case, *TOTAL contains the cost result. */
1500 static bool
1501 hppa_rtx_costs (rtx x, machine_mode mode, int outer_code,
1502 int opno ATTRIBUTE_UNUSED,
1503 int *total, bool speed ATTRIBUTE_UNUSED)
1505 int factor;
1506 int code = GET_CODE (x);
1508 switch (code)
1510 case CONST_INT:
1511 if (INTVAL (x) == 0)
1512 *total = 0;
1513 else if (INT_14_BITS (x))
1514 *total = 1;
1515 else
1516 *total = 2;
1517 return true;
1519 case HIGH:
1520 *total = 2;
1521 return true;
1523 case CONST:
1524 case LABEL_REF:
1525 case SYMBOL_REF:
1526 *total = 4;
1527 return true;
1529 case CONST_DOUBLE:
1530 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1531 && outer_code != SET)
1532 *total = 0;
1533 else
1534 *total = 8;
1535 return true;
1537 case MULT:
1538 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1540 *total = COSTS_N_INSNS (3);
1541 return true;
1544 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1545 factor = GET_MODE_SIZE (mode) / 4;
1546 if (factor == 0)
1547 factor = 1;
1549 if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1550 *total = factor * factor * COSTS_N_INSNS (8);
1551 else
1552 *total = factor * factor * COSTS_N_INSNS (20);
1553 return true;
1555 case DIV:
1556 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1558 *total = COSTS_N_INSNS (14);
1559 return true;
1561 /* FALLTHRU */
1563 case UDIV:
1564 case MOD:
1565 case UMOD:
1566 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1567 factor = GET_MODE_SIZE (mode) / 4;
1568 if (factor == 0)
1569 factor = 1;
1571 *total = factor * factor * COSTS_N_INSNS (60);
1572 return true;
1574 case PLUS: /* this includes shNadd insns */
1575 case MINUS:
1576 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1578 *total = COSTS_N_INSNS (3);
1579 return true;
1582 /* A size N times larger than UNITS_PER_WORD needs N times as
1583 many insns, taking N times as long. */
1584 factor = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
1585 if (factor == 0)
1586 factor = 1;
1587 *total = factor * COSTS_N_INSNS (1);
1588 return true;
1590 case ASHIFT:
1591 case ASHIFTRT:
1592 case LSHIFTRT:
1593 *total = COSTS_N_INSNS (1);
1594 return true;
1596 default:
1597 return false;
1601 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1602 new rtx with the correct mode. */
1603 static inline rtx
1604 force_mode (machine_mode mode, rtx orig)
1606 if (mode == GET_MODE (orig))
1607 return orig;
1609 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1611 return gen_rtx_REG (mode, REGNO (orig));
1614 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1616 static bool
1617 pa_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1619 return tls_referenced_p (x);
1622 /* Emit insns to move operands[1] into operands[0].
1624 Return 1 if we have written out everything that needs to be done to
1625 do the move. Otherwise, return 0 and the caller will emit the move
1626 normally.
1628 Note SCRATCH_REG may not be in the proper mode depending on how it
1629 will be used. This routine is responsible for creating a new copy
1630 of SCRATCH_REG in the proper mode. */
1633 pa_emit_move_sequence (rtx *operands, machine_mode mode, rtx scratch_reg)
1635 register rtx operand0 = operands[0];
1636 register rtx operand1 = operands[1];
1637 register rtx tem;
1639 /* We can only handle indexed addresses in the destination operand
1640 of floating point stores. Thus, we need to break out indexed
1641 addresses from the destination operand. */
1642 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1644 gcc_assert (can_create_pseudo_p ());
1646 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1647 operand0 = replace_equiv_address (operand0, tem);
1650 /* On targets with non-equivalent space registers, break out unscaled
1651 indexed addresses from the source operand before the final CSE.
1652 We have to do this because the REG_POINTER flag is not correctly
1653 carried through various optimization passes and CSE may substitute
1654 a pseudo without the pointer set for one with the pointer set. As
1655 a result, we loose various opportunities to create insns with
1656 unscaled indexed addresses. */
1657 if (!TARGET_NO_SPACE_REGS
1658 && !cse_not_expected
1659 && GET_CODE (operand1) == MEM
1660 && GET_CODE (XEXP (operand1, 0)) == PLUS
1661 && REG_P (XEXP (XEXP (operand1, 0), 0))
1662 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1663 operand1
1664 = replace_equiv_address (operand1,
1665 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1667 if (scratch_reg
1668 && reload_in_progress && GET_CODE (operand0) == REG
1669 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1670 operand0 = reg_equiv_mem (REGNO (operand0));
1671 else if (scratch_reg
1672 && reload_in_progress && GET_CODE (operand0) == SUBREG
1673 && GET_CODE (SUBREG_REG (operand0)) == REG
1674 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1676 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1677 the code which tracks sets/uses for delete_output_reload. */
1678 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1679 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
1680 SUBREG_BYTE (operand0));
1681 operand0 = alter_subreg (&temp, true);
1684 if (scratch_reg
1685 && reload_in_progress && GET_CODE (operand1) == REG
1686 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1687 operand1 = reg_equiv_mem (REGNO (operand1));
1688 else if (scratch_reg
1689 && reload_in_progress && GET_CODE (operand1) == SUBREG
1690 && GET_CODE (SUBREG_REG (operand1)) == REG
1691 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1693 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1694 the code which tracks sets/uses for delete_output_reload. */
1695 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1696 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
1697 SUBREG_BYTE (operand1));
1698 operand1 = alter_subreg (&temp, true);
1701 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1702 && ((tem = find_replacement (&XEXP (operand0, 0)))
1703 != XEXP (operand0, 0)))
1704 operand0 = replace_equiv_address (operand0, tem);
1706 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1707 && ((tem = find_replacement (&XEXP (operand1, 0)))
1708 != XEXP (operand1, 0)))
1709 operand1 = replace_equiv_address (operand1, tem);
1711 /* Handle secondary reloads for loads/stores of FP registers from
1712 REG+D addresses where D does not fit in 5 or 14 bits, including
1713 (subreg (mem (addr))) cases, and reloads for other unsupported
1714 memory operands. */
1715 if (scratch_reg
1716 && FP_REG_P (operand0)
1717 && (MEM_P (operand1)
1718 || (GET_CODE (operand1) == SUBREG
1719 && MEM_P (XEXP (operand1, 0)))))
1721 rtx op1 = operand1;
1723 if (GET_CODE (op1) == SUBREG)
1724 op1 = XEXP (op1, 0);
1726 if (reg_plus_base_memory_operand (op1, GET_MODE (op1)))
1728 if (!(TARGET_PA_20
1729 && !TARGET_ELF32
1730 && INT_14_BITS (XEXP (XEXP (op1, 0), 1)))
1731 && !INT_5_BITS (XEXP (XEXP (op1, 0), 1)))
1733 /* SCRATCH_REG will hold an address and maybe the actual data.
1734 We want it in WORD_MODE regardless of what mode it was
1735 originally given to us. */
1736 scratch_reg = force_mode (word_mode, scratch_reg);
1738 /* D might not fit in 14 bits either; for such cases load D
1739 into scratch reg. */
1740 if (!INT_14_BITS (XEXP (XEXP (op1, 0), 1)))
1742 emit_move_insn (scratch_reg, XEXP (XEXP (op1, 0), 1));
1743 emit_move_insn (scratch_reg,
1744 gen_rtx_fmt_ee (GET_CODE (XEXP (op1, 0)),
1745 Pmode,
1746 XEXP (XEXP (op1, 0), 0),
1747 scratch_reg));
1749 else
1750 emit_move_insn (scratch_reg, XEXP (op1, 0));
1751 emit_insn (gen_rtx_SET (operand0,
1752 replace_equiv_address (op1, scratch_reg)));
1753 return 1;
1756 else if ((!INT14_OK_STRICT && symbolic_memory_operand (op1, VOIDmode))
1757 || IS_LO_SUM_DLT_ADDR_P (XEXP (op1, 0))
1758 || IS_INDEX_ADDR_P (XEXP (op1, 0)))
1760 /* Load memory address into SCRATCH_REG. */
1761 scratch_reg = force_mode (word_mode, scratch_reg);
1762 emit_move_insn (scratch_reg, XEXP (op1, 0));
1763 emit_insn (gen_rtx_SET (operand0,
1764 replace_equiv_address (op1, scratch_reg)));
1765 return 1;
1768 else if (scratch_reg
1769 && FP_REG_P (operand1)
1770 && (MEM_P (operand0)
1771 || (GET_CODE (operand0) == SUBREG
1772 && MEM_P (XEXP (operand0, 0)))))
1774 rtx op0 = operand0;
1776 if (GET_CODE (op0) == SUBREG)
1777 op0 = XEXP (op0, 0);
1779 if (reg_plus_base_memory_operand (op0, GET_MODE (op0)))
1781 if (!(TARGET_PA_20
1782 && !TARGET_ELF32
1783 && INT_14_BITS (XEXP (XEXP (op0, 0), 1)))
1784 && !INT_5_BITS (XEXP (XEXP (op0, 0), 1)))
1786 /* SCRATCH_REG will hold an address and maybe the actual data.
1787 We want it in WORD_MODE regardless of what mode it was
1788 originally given to us. */
1789 scratch_reg = force_mode (word_mode, scratch_reg);
1791 /* D might not fit in 14 bits either; for such cases load D
1792 into scratch reg. */
1793 if (!INT_14_BITS (XEXP (XEXP (op0, 0), 1)))
1795 emit_move_insn (scratch_reg, XEXP (XEXP (op0, 0), 1));
1796 emit_move_insn (scratch_reg,
1797 gen_rtx_fmt_ee (GET_CODE (XEXP (op0, 0)),
1798 Pmode,
1799 XEXP (XEXP (op0, 0), 0),
1800 scratch_reg));
1802 else
1803 emit_move_insn (scratch_reg, XEXP (op0, 0));
1804 emit_insn (gen_rtx_SET (replace_equiv_address (op0, scratch_reg),
1805 operand1));
1806 return 1;
1809 else if ((!INT14_OK_STRICT && symbolic_memory_operand (op0, VOIDmode))
1810 || IS_LO_SUM_DLT_ADDR_P (XEXP (op0, 0))
1811 || IS_INDEX_ADDR_P (XEXP (op0, 0)))
1813 /* Load memory address into SCRATCH_REG. */
1814 scratch_reg = force_mode (word_mode, scratch_reg);
1815 emit_move_insn (scratch_reg, XEXP (op0, 0));
1816 emit_insn (gen_rtx_SET (replace_equiv_address (op0, scratch_reg),
1817 operand1));
1818 return 1;
1821 /* Handle secondary reloads for loads of FP registers from constant
1822 expressions by forcing the constant into memory. For the most part,
1823 this is only necessary for SImode and DImode.
1825 Use scratch_reg to hold the address of the memory location. */
1826 else if (scratch_reg
1827 && CONSTANT_P (operand1)
1828 && FP_REG_P (operand0))
1830 rtx const_mem, xoperands[2];
1832 if (operand1 == CONST0_RTX (mode))
1834 emit_insn (gen_rtx_SET (operand0, operand1));
1835 return 1;
1838 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1839 it in WORD_MODE regardless of what mode it was originally given
1840 to us. */
1841 scratch_reg = force_mode (word_mode, scratch_reg);
1843 /* Force the constant into memory and put the address of the
1844 memory location into scratch_reg. */
1845 const_mem = force_const_mem (mode, operand1);
1846 xoperands[0] = scratch_reg;
1847 xoperands[1] = XEXP (const_mem, 0);
1848 pa_emit_move_sequence (xoperands, Pmode, 0);
1850 /* Now load the destination register. */
1851 emit_insn (gen_rtx_SET (operand0,
1852 replace_equiv_address (const_mem, scratch_reg)));
1853 return 1;
1855 /* Handle secondary reloads for SAR. These occur when trying to load
1856 the SAR from memory or a constant. */
1857 else if (scratch_reg
1858 && GET_CODE (operand0) == REG
1859 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1860 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1861 && (GET_CODE (operand1) == MEM || GET_CODE (operand1) == CONST_INT))
1863 /* D might not fit in 14 bits either; for such cases load D into
1864 scratch reg. */
1865 if (GET_CODE (operand1) == MEM
1866 && !memory_address_p (GET_MODE (operand0), XEXP (operand1, 0)))
1868 /* We are reloading the address into the scratch register, so we
1869 want to make sure the scratch register is a full register. */
1870 scratch_reg = force_mode (word_mode, scratch_reg);
1872 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1873 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1874 0)),
1875 Pmode,
1876 XEXP (XEXP (operand1, 0),
1878 scratch_reg));
1880 /* Now we are going to load the scratch register from memory,
1881 we want to load it in the same width as the original MEM,
1882 which must be the same as the width of the ultimate destination,
1883 OPERAND0. */
1884 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1886 emit_move_insn (scratch_reg,
1887 replace_equiv_address (operand1, scratch_reg));
1889 else
1891 /* We want to load the scratch register using the same mode as
1892 the ultimate destination. */
1893 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1895 emit_move_insn (scratch_reg, operand1);
1898 /* And emit the insn to set the ultimate destination. We know that
1899 the scratch register has the same mode as the destination at this
1900 point. */
1901 emit_move_insn (operand0, scratch_reg);
1902 return 1;
1905 /* Handle the most common case: storing into a register. */
1906 if (register_operand (operand0, mode))
1908 /* Legitimize TLS symbol references. This happens for references
1909 that aren't a legitimate constant. */
1910 if (PA_SYMBOL_REF_TLS_P (operand1))
1911 operand1 = legitimize_tls_address (operand1);
1913 if (register_operand (operand1, mode)
1914 || (GET_CODE (operand1) == CONST_INT
1915 && pa_cint_ok_for_move (UINTVAL (operand1)))
1916 || (operand1 == CONST0_RTX (mode))
1917 || (GET_CODE (operand1) == HIGH
1918 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1919 /* Only `general_operands' can come here, so MEM is ok. */
1920 || GET_CODE (operand1) == MEM)
1922 /* Various sets are created during RTL generation which don't
1923 have the REG_POINTER flag correctly set. After the CSE pass,
1924 instruction recognition can fail if we don't consistently
1925 set this flag when performing register copies. This should
1926 also improve the opportunities for creating insns that use
1927 unscaled indexing. */
1928 if (REG_P (operand0) && REG_P (operand1))
1930 if (REG_POINTER (operand1)
1931 && !REG_POINTER (operand0)
1932 && !HARD_REGISTER_P (operand0))
1933 copy_reg_pointer (operand0, operand1);
1936 /* When MEMs are broken out, the REG_POINTER flag doesn't
1937 get set. In some cases, we can set the REG_POINTER flag
1938 from the declaration for the MEM. */
1939 if (REG_P (operand0)
1940 && GET_CODE (operand1) == MEM
1941 && !REG_POINTER (operand0))
1943 tree decl = MEM_EXPR (operand1);
1945 /* Set the register pointer flag and register alignment
1946 if the declaration for this memory reference is a
1947 pointer type. */
1948 if (decl)
1950 tree type;
1952 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1953 tree operand 1. */
1954 if (TREE_CODE (decl) == COMPONENT_REF)
1955 decl = TREE_OPERAND (decl, 1);
1957 type = TREE_TYPE (decl);
1958 type = strip_array_types (type);
1960 if (POINTER_TYPE_P (type))
1961 mark_reg_pointer (operand0, BITS_PER_UNIT);
1965 emit_insn (gen_rtx_SET (operand0, operand1));
1966 return 1;
1969 else if (GET_CODE (operand0) == MEM)
1971 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1972 && !(reload_in_progress || reload_completed))
1974 rtx temp = gen_reg_rtx (DFmode);
1976 emit_insn (gen_rtx_SET (temp, operand1));
1977 emit_insn (gen_rtx_SET (operand0, temp));
1978 return 1;
1980 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1982 /* Run this case quickly. */
1983 emit_insn (gen_rtx_SET (operand0, operand1));
1984 return 1;
1986 if (! (reload_in_progress || reload_completed))
1988 operands[0] = validize_mem (operand0);
1989 operands[1] = operand1 = force_reg (mode, operand1);
1993 /* Simplify the source if we need to.
1994 Note we do have to handle function labels here, even though we do
1995 not consider them legitimate constants. Loop optimizations can
1996 call the emit_move_xxx with one as a source. */
1997 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1998 || (GET_CODE (operand1) == HIGH
1999 && symbolic_operand (XEXP (operand1, 0), mode))
2000 || function_label_operand (operand1, VOIDmode)
2001 || tls_referenced_p (operand1))
2003 int ishighonly = 0;
2005 if (GET_CODE (operand1) == HIGH)
2007 ishighonly = 1;
2008 operand1 = XEXP (operand1, 0);
2010 if (symbolic_operand (operand1, mode))
2012 /* Argh. The assembler and linker can't handle arithmetic
2013 involving plabels.
2015 So we force the plabel into memory, load operand0 from
2016 the memory location, then add in the constant part. */
2017 if ((GET_CODE (operand1) == CONST
2018 && GET_CODE (XEXP (operand1, 0)) == PLUS
2019 && function_label_operand (XEXP (XEXP (operand1, 0), 0),
2020 VOIDmode))
2021 || function_label_operand (operand1, VOIDmode))
2023 rtx temp, const_part;
2025 /* Figure out what (if any) scratch register to use. */
2026 if (reload_in_progress || reload_completed)
2028 scratch_reg = scratch_reg ? scratch_reg : operand0;
2029 /* SCRATCH_REG will hold an address and maybe the actual
2030 data. We want it in WORD_MODE regardless of what mode it
2031 was originally given to us. */
2032 scratch_reg = force_mode (word_mode, scratch_reg);
2034 else if (flag_pic)
2035 scratch_reg = gen_reg_rtx (Pmode);
2037 if (GET_CODE (operand1) == CONST)
2039 /* Save away the constant part of the expression. */
2040 const_part = XEXP (XEXP (operand1, 0), 1);
2041 gcc_assert (GET_CODE (const_part) == CONST_INT);
2043 /* Force the function label into memory. */
2044 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
2046 else
2048 /* No constant part. */
2049 const_part = NULL_RTX;
2051 /* Force the function label into memory. */
2052 temp = force_const_mem (mode, operand1);
2056 /* Get the address of the memory location. PIC-ify it if
2057 necessary. */
2058 temp = XEXP (temp, 0);
2059 if (flag_pic)
2060 temp = legitimize_pic_address (temp, mode, scratch_reg);
2062 /* Put the address of the memory location into our destination
2063 register. */
2064 operands[1] = temp;
2065 pa_emit_move_sequence (operands, mode, scratch_reg);
2067 /* Now load from the memory location into our destination
2068 register. */
2069 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
2070 pa_emit_move_sequence (operands, mode, scratch_reg);
2072 /* And add back in the constant part. */
2073 if (const_part != NULL_RTX)
2074 expand_inc (operand0, const_part);
2076 return 1;
2079 if (flag_pic)
2081 rtx_insn *insn;
2082 rtx temp;
2084 if (reload_in_progress || reload_completed)
2086 temp = scratch_reg ? scratch_reg : operand0;
2087 /* TEMP will hold an address and maybe the actual
2088 data. We want it in WORD_MODE regardless of what mode it
2089 was originally given to us. */
2090 temp = force_mode (word_mode, temp);
2092 else
2093 temp = gen_reg_rtx (Pmode);
2095 /* Force (const (plus (symbol) (const_int))) to memory
2096 if the const_int will not fit in 14 bits. Although
2097 this requires a relocation, the instruction sequence
2098 needed to load the value is shorter. */
2099 if (GET_CODE (operand1) == CONST
2100 && GET_CODE (XEXP (operand1, 0)) == PLUS
2101 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
2102 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1)))
2104 rtx x, m = force_const_mem (mode, operand1);
2106 x = legitimize_pic_address (XEXP (m, 0), mode, temp);
2107 x = replace_equiv_address (m, x);
2108 insn = emit_move_insn (operand0, x);
2110 else
2112 operands[1] = legitimize_pic_address (operand1, mode, temp);
2113 if (REG_P (operand0) && REG_P (operands[1]))
2114 copy_reg_pointer (operand0, operands[1]);
2115 insn = emit_move_insn (operand0, operands[1]);
2118 /* Put a REG_EQUAL note on this insn. */
2119 set_unique_reg_note (insn, REG_EQUAL, operand1);
2121 /* On the HPPA, references to data space are supposed to use dp,
2122 register 27, but showing it in the RTL inhibits various cse
2123 and loop optimizations. */
2124 else
2126 rtx temp, set;
2128 if (reload_in_progress || reload_completed)
2130 temp = scratch_reg ? scratch_reg : operand0;
2131 /* TEMP will hold an address and maybe the actual
2132 data. We want it in WORD_MODE regardless of what mode it
2133 was originally given to us. */
2134 temp = force_mode (word_mode, temp);
2136 else
2137 temp = gen_reg_rtx (mode);
2139 /* Loading a SYMBOL_REF into a register makes that register
2140 safe to be used as the base in an indexed address.
2142 Don't mark hard registers though. That loses. */
2143 if (GET_CODE (operand0) == REG
2144 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
2145 mark_reg_pointer (operand0, BITS_PER_UNIT);
2146 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
2147 mark_reg_pointer (temp, BITS_PER_UNIT);
2149 if (ishighonly)
2150 set = gen_rtx_SET (operand0, temp);
2151 else
2152 set = gen_rtx_SET (operand0,
2153 gen_rtx_LO_SUM (mode, temp, operand1));
2155 emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (mode, operand1)));
2156 emit_insn (set);
2159 return 1;
2161 else if (tls_referenced_p (operand1))
2163 rtx tmp = operand1;
2164 rtx addend = NULL;
2166 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
2168 addend = XEXP (XEXP (tmp, 0), 1);
2169 tmp = XEXP (XEXP (tmp, 0), 0);
2172 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
2173 tmp = legitimize_tls_address (tmp);
2174 if (addend)
2176 tmp = gen_rtx_PLUS (mode, tmp, addend);
2177 tmp = force_operand (tmp, operands[0]);
2179 operands[1] = tmp;
2181 else if (GET_CODE (operand1) != CONST_INT
2182 || !pa_cint_ok_for_move (UINTVAL (operand1)))
2184 rtx temp;
2185 rtx_insn *insn;
2186 rtx op1 = operand1;
2187 HOST_WIDE_INT value = 0;
2188 HOST_WIDE_INT insv = 0;
2189 int insert = 0;
2191 if (GET_CODE (operand1) == CONST_INT)
2192 value = INTVAL (operand1);
2194 if (TARGET_64BIT
2195 && GET_CODE (operand1) == CONST_INT
2196 && HOST_BITS_PER_WIDE_INT > 32
2197 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
2199 HOST_WIDE_INT nval;
2201 /* Extract the low order 32 bits of the value and sign extend.
2202 If the new value is the same as the original value, we can
2203 can use the original value as-is. If the new value is
2204 different, we use it and insert the most-significant 32-bits
2205 of the original value into the final result. */
2206 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
2207 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
2208 if (value != nval)
2210 #if HOST_BITS_PER_WIDE_INT > 32
2211 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
2212 #endif
2213 insert = 1;
2214 value = nval;
2215 operand1 = GEN_INT (nval);
2219 if (reload_in_progress || reload_completed)
2220 temp = scratch_reg ? scratch_reg : operand0;
2221 else
2222 temp = gen_reg_rtx (mode);
2224 /* We don't directly split DImode constants on 32-bit targets
2225 because PLUS uses an 11-bit immediate and the insn sequence
2226 generated is not as efficient as the one using HIGH/LO_SUM. */
2227 if (GET_CODE (operand1) == CONST_INT
2228 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
2229 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2230 && !insert)
2232 /* Directly break constant into high and low parts. This
2233 provides better optimization opportunities because various
2234 passes recognize constants split with PLUS but not LO_SUM.
2235 We use a 14-bit signed low part except when the addition
2236 of 0x4000 to the high part might change the sign of the
2237 high part. */
2238 HOST_WIDE_INT low = value & 0x3fff;
2239 HOST_WIDE_INT high = value & ~ 0x3fff;
2241 if (low >= 0x2000)
2243 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2244 high += 0x2000;
2245 else
2246 high += 0x4000;
2249 low = value - high;
2251 emit_insn (gen_rtx_SET (temp, GEN_INT (high)));
2252 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2254 else
2256 emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (mode, operand1)));
2257 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2260 insn = emit_move_insn (operands[0], operands[1]);
2262 /* Now insert the most significant 32 bits of the value
2263 into the register. When we don't have a second register
2264 available, it could take up to nine instructions to load
2265 a 64-bit integer constant. Prior to reload, we force
2266 constants that would take more than three instructions
2267 to load to the constant pool. During and after reload,
2268 we have to handle all possible values. */
2269 if (insert)
2271 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2272 register and the value to be inserted is outside the
2273 range that can be loaded with three depdi instructions. */
2274 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2276 operand1 = GEN_INT (insv);
2278 emit_insn (gen_rtx_SET (temp,
2279 gen_rtx_HIGH (mode, operand1)));
2280 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2281 if (mode == DImode)
2282 insn = emit_insn (gen_insvdi (operand0, GEN_INT (32),
2283 const0_rtx, temp));
2284 else
2285 insn = emit_insn (gen_insvsi (operand0, GEN_INT (32),
2286 const0_rtx, temp));
2288 else
2290 int len = 5, pos = 27;
2292 /* Insert the bits using the depdi instruction. */
2293 while (pos >= 0)
2295 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2296 HOST_WIDE_INT sign = v5 < 0;
2298 /* Left extend the insertion. */
2299 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2300 while (pos > 0 && (insv & 1) == sign)
2302 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2303 len += 1;
2304 pos -= 1;
2307 if (mode == DImode)
2308 insn = emit_insn (gen_insvdi (operand0,
2309 GEN_INT (len),
2310 GEN_INT (pos),
2311 GEN_INT (v5)));
2312 else
2313 insn = emit_insn (gen_insvsi (operand0,
2314 GEN_INT (len),
2315 GEN_INT (pos),
2316 GEN_INT (v5)));
2318 len = pos > 0 && pos < 5 ? pos : 5;
2319 pos -= len;
2324 set_unique_reg_note (insn, REG_EQUAL, op1);
2326 return 1;
2329 /* Now have insn-emit do whatever it normally does. */
2330 return 0;
2333 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2334 it will need a link/runtime reloc). */
2337 pa_reloc_needed (tree exp)
2339 int reloc = 0;
2341 switch (TREE_CODE (exp))
2343 case ADDR_EXPR:
2344 return 1;
2346 case POINTER_PLUS_EXPR:
2347 case PLUS_EXPR:
2348 case MINUS_EXPR:
2349 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2350 reloc |= pa_reloc_needed (TREE_OPERAND (exp, 1));
2351 break;
2353 CASE_CONVERT:
2354 case NON_LVALUE_EXPR:
2355 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2356 break;
2358 case CONSTRUCTOR:
2360 tree value;
2361 unsigned HOST_WIDE_INT ix;
2363 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2364 if (value)
2365 reloc |= pa_reloc_needed (value);
2367 break;
2369 case ERROR_MARK:
2370 break;
2372 default:
2373 break;
2375 return reloc;
2379 /* Return the best assembler insn template
2380 for moving operands[1] into operands[0] as a fullword. */
2381 const char *
2382 pa_singlemove_string (rtx *operands)
2384 HOST_WIDE_INT intval;
2386 if (GET_CODE (operands[0]) == MEM)
2387 return "stw %r1,%0";
2388 if (GET_CODE (operands[1]) == MEM)
2389 return "ldw %1,%0";
2390 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2392 long i;
2394 gcc_assert (GET_MODE (operands[1]) == SFmode);
2396 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2397 bit pattern. */
2398 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (operands[1]), i);
2400 operands[1] = GEN_INT (i);
2401 /* Fall through to CONST_INT case. */
2403 if (GET_CODE (operands[1]) == CONST_INT)
2405 intval = INTVAL (operands[1]);
2407 if (VAL_14_BITS_P (intval))
2408 return "ldi %1,%0";
2409 else if ((intval & 0x7ff) == 0)
2410 return "ldil L'%1,%0";
2411 else if (pa_zdepi_cint_p (intval))
2412 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2413 else
2414 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2416 return "copy %1,%0";
2420 /* Compute position (in OP[1]) and width (in OP[2])
2421 useful for copying IMM to a register using the zdepi
2422 instructions. Store the immediate value to insert in OP[0]. */
2423 static void
2424 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2426 int lsb, len;
2428 /* Find the least significant set bit in IMM. */
2429 for (lsb = 0; lsb < 32; lsb++)
2431 if ((imm & 1) != 0)
2432 break;
2433 imm >>= 1;
2436 /* Choose variants based on *sign* of the 5-bit field. */
2437 if ((imm & 0x10) == 0)
2438 len = (lsb <= 28) ? 4 : 32 - lsb;
2439 else
2441 /* Find the width of the bitstring in IMM. */
2442 for (len = 5; len < 32 - lsb; len++)
2444 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2445 break;
2448 /* Sign extend IMM as a 5-bit value. */
2449 imm = (imm & 0xf) - 0x10;
2452 op[0] = imm;
2453 op[1] = 31 - lsb;
2454 op[2] = len;
2457 /* Compute position (in OP[1]) and width (in OP[2])
2458 useful for copying IMM to a register using the depdi,z
2459 instructions. Store the immediate value to insert in OP[0]. */
2461 static void
2462 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2464 int lsb, len, maxlen;
2466 maxlen = MIN (HOST_BITS_PER_WIDE_INT, 64);
2468 /* Find the least significant set bit in IMM. */
2469 for (lsb = 0; lsb < maxlen; lsb++)
2471 if ((imm & 1) != 0)
2472 break;
2473 imm >>= 1;
2476 /* Choose variants based on *sign* of the 5-bit field. */
2477 if ((imm & 0x10) == 0)
2478 len = (lsb <= maxlen - 4) ? 4 : maxlen - lsb;
2479 else
2481 /* Find the width of the bitstring in IMM. */
2482 for (len = 5; len < maxlen - lsb; len++)
2484 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2485 break;
2488 /* Extend length if host is narrow and IMM is negative. */
2489 if (HOST_BITS_PER_WIDE_INT == 32 && len == maxlen - lsb)
2490 len += 32;
2492 /* Sign extend IMM as a 5-bit value. */
2493 imm = (imm & 0xf) - 0x10;
2496 op[0] = imm;
2497 op[1] = 63 - lsb;
2498 op[2] = len;
2501 /* Output assembler code to perform a doubleword move insn
2502 with operands OPERANDS. */
2504 const char *
2505 pa_output_move_double (rtx *operands)
2507 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2508 rtx latehalf[2];
2509 rtx addreg0 = 0, addreg1 = 0;
2510 int highonly = 0;
2512 /* First classify both operands. */
2514 if (REG_P (operands[0]))
2515 optype0 = REGOP;
2516 else if (offsettable_memref_p (operands[0]))
2517 optype0 = OFFSOP;
2518 else if (GET_CODE (operands[0]) == MEM)
2519 optype0 = MEMOP;
2520 else
2521 optype0 = RNDOP;
2523 if (REG_P (operands[1]))
2524 optype1 = REGOP;
2525 else if (CONSTANT_P (operands[1]))
2526 optype1 = CNSTOP;
2527 else if (offsettable_memref_p (operands[1]))
2528 optype1 = OFFSOP;
2529 else if (GET_CODE (operands[1]) == MEM)
2530 optype1 = MEMOP;
2531 else
2532 optype1 = RNDOP;
2534 /* Check for the cases that the operand constraints are not
2535 supposed to allow to happen. */
2536 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2538 /* Handle copies between general and floating registers. */
2540 if (optype0 == REGOP && optype1 == REGOP
2541 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2543 if (FP_REG_P (operands[0]))
2545 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2546 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2547 return "{fldds|fldd} -16(%%sp),%0";
2549 else
2551 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2552 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2553 return "{ldws|ldw} -12(%%sp),%R0";
2557 /* Handle auto decrementing and incrementing loads and stores
2558 specifically, since the structure of the function doesn't work
2559 for them without major modification. Do it better when we learn
2560 this port about the general inc/dec addressing of PA.
2561 (This was written by tege. Chide him if it doesn't work.) */
2563 if (optype0 == MEMOP)
2565 /* We have to output the address syntax ourselves, since print_operand
2566 doesn't deal with the addresses we want to use. Fix this later. */
2568 rtx addr = XEXP (operands[0], 0);
2569 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2571 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2573 operands[0] = XEXP (addr, 0);
2574 gcc_assert (GET_CODE (operands[1]) == REG
2575 && GET_CODE (operands[0]) == REG);
2577 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2579 /* No overlap between high target register and address
2580 register. (We do this in a non-obvious way to
2581 save a register file writeback) */
2582 if (GET_CODE (addr) == POST_INC)
2583 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2584 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2586 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2588 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2590 operands[0] = XEXP (addr, 0);
2591 gcc_assert (GET_CODE (operands[1]) == REG
2592 && GET_CODE (operands[0]) == REG);
2594 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2595 /* No overlap between high target register and address
2596 register. (We do this in a non-obvious way to save a
2597 register file writeback) */
2598 if (GET_CODE (addr) == PRE_INC)
2599 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2600 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2603 if (optype1 == MEMOP)
2605 /* We have to output the address syntax ourselves, since print_operand
2606 doesn't deal with the addresses we want to use. Fix this later. */
2608 rtx addr = XEXP (operands[1], 0);
2609 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2611 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2613 operands[1] = XEXP (addr, 0);
2614 gcc_assert (GET_CODE (operands[0]) == REG
2615 && GET_CODE (operands[1]) == REG);
2617 if (!reg_overlap_mentioned_p (high_reg, addr))
2619 /* No overlap between high target register and address
2620 register. (We do this in a non-obvious way to
2621 save a register file writeback) */
2622 if (GET_CODE (addr) == POST_INC)
2623 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2624 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2626 else
2628 /* This is an undefined situation. We should load into the
2629 address register *and* update that register. Probably
2630 we don't need to handle this at all. */
2631 if (GET_CODE (addr) == POST_INC)
2632 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2633 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2636 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2638 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2640 operands[1] = XEXP (addr, 0);
2641 gcc_assert (GET_CODE (operands[0]) == REG
2642 && GET_CODE (operands[1]) == REG);
2644 if (!reg_overlap_mentioned_p (high_reg, addr))
2646 /* No overlap between high target register and address
2647 register. (We do this in a non-obvious way to
2648 save a register file writeback) */
2649 if (GET_CODE (addr) == PRE_INC)
2650 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2651 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2653 else
2655 /* This is an undefined situation. We should load into the
2656 address register *and* update that register. Probably
2657 we don't need to handle this at all. */
2658 if (GET_CODE (addr) == PRE_INC)
2659 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2660 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2663 else if (GET_CODE (addr) == PLUS
2664 && GET_CODE (XEXP (addr, 0)) == MULT)
2666 rtx xoperands[4];
2668 /* Load address into left half of destination register. */
2669 xoperands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
2670 xoperands[1] = XEXP (addr, 1);
2671 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2672 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2673 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2674 xoperands);
2675 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2677 else if (GET_CODE (addr) == PLUS
2678 && REG_P (XEXP (addr, 0))
2679 && REG_P (XEXP (addr, 1)))
2681 rtx xoperands[3];
2683 /* Load address into left half of destination register. */
2684 xoperands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
2685 xoperands[1] = XEXP (addr, 0);
2686 xoperands[2] = XEXP (addr, 1);
2687 output_asm_insn ("{addl|add,l} %1,%2,%0",
2688 xoperands);
2689 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2693 /* If an operand is an unoffsettable memory ref, find a register
2694 we can increment temporarily to make it refer to the second word. */
2696 if (optype0 == MEMOP)
2697 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2699 if (optype1 == MEMOP)
2700 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2702 /* Ok, we can do one word at a time.
2703 Normally we do the low-numbered word first.
2705 In either case, set up in LATEHALF the operands to use
2706 for the high-numbered word and in some cases alter the
2707 operands in OPERANDS to be suitable for the low-numbered word. */
2709 if (optype0 == REGOP)
2710 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2711 else if (optype0 == OFFSOP)
2712 latehalf[0] = adjust_address_nv (operands[0], SImode, 4);
2713 else
2714 latehalf[0] = operands[0];
2716 if (optype1 == REGOP)
2717 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2718 else if (optype1 == OFFSOP)
2719 latehalf[1] = adjust_address_nv (operands[1], SImode, 4);
2720 else if (optype1 == CNSTOP)
2722 if (GET_CODE (operands[1]) == HIGH)
2724 operands[1] = XEXP (operands[1], 0);
2725 highonly = 1;
2727 split_double (operands[1], &operands[1], &latehalf[1]);
2729 else
2730 latehalf[1] = operands[1];
2732 /* If the first move would clobber the source of the second one,
2733 do them in the other order.
2735 This can happen in two cases:
2737 mem -> register where the first half of the destination register
2738 is the same register used in the memory's address. Reload
2739 can create such insns.
2741 mem in this case will be either register indirect or register
2742 indirect plus a valid offset.
2744 register -> register move where REGNO(dst) == REGNO(src + 1)
2745 someone (Tim/Tege?) claimed this can happen for parameter loads.
2747 Handle mem -> register case first. */
2748 if (optype0 == REGOP
2749 && (optype1 == MEMOP || optype1 == OFFSOP)
2750 && refers_to_regno_p (REGNO (operands[0]), operands[1]))
2752 /* Do the late half first. */
2753 if (addreg1)
2754 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2755 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2757 /* Then clobber. */
2758 if (addreg1)
2759 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2760 return pa_singlemove_string (operands);
2763 /* Now handle register -> register case. */
2764 if (optype0 == REGOP && optype1 == REGOP
2765 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2767 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2768 return pa_singlemove_string (operands);
2771 /* Normal case: do the two words, low-numbered first. */
2773 output_asm_insn (pa_singlemove_string (operands), operands);
2775 /* Make any unoffsettable addresses point at high-numbered word. */
2776 if (addreg0)
2777 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2778 if (addreg1)
2779 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2781 /* Do high-numbered word. */
2782 if (highonly)
2783 output_asm_insn ("ldil L'%1,%0", latehalf);
2784 else
2785 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2787 /* Undo the adds we just did. */
2788 if (addreg0)
2789 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2790 if (addreg1)
2791 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2793 return "";
2796 const char *
2797 pa_output_fp_move_double (rtx *operands)
2799 if (FP_REG_P (operands[0]))
2801 if (FP_REG_P (operands[1])
2802 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2803 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2804 else
2805 output_asm_insn ("fldd%F1 %1,%0", operands);
2807 else if (FP_REG_P (operands[1]))
2809 output_asm_insn ("fstd%F0 %1,%0", operands);
2811 else
2813 rtx xoperands[2];
2815 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2817 /* This is a pain. You have to be prepared to deal with an
2818 arbitrary address here including pre/post increment/decrement.
2820 so avoid this in the MD. */
2821 gcc_assert (GET_CODE (operands[0]) == REG);
2823 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2824 xoperands[0] = operands[0];
2825 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2827 return "";
2830 /* Return a REG that occurs in ADDR with coefficient 1.
2831 ADDR can be effectively incremented by incrementing REG. */
2833 static rtx
2834 find_addr_reg (rtx addr)
2836 while (GET_CODE (addr) == PLUS)
2838 if (GET_CODE (XEXP (addr, 0)) == REG)
2839 addr = XEXP (addr, 0);
2840 else if (GET_CODE (XEXP (addr, 1)) == REG)
2841 addr = XEXP (addr, 1);
2842 else if (CONSTANT_P (XEXP (addr, 0)))
2843 addr = XEXP (addr, 1);
2844 else if (CONSTANT_P (XEXP (addr, 1)))
2845 addr = XEXP (addr, 0);
2846 else
2847 gcc_unreachable ();
2849 gcc_assert (GET_CODE (addr) == REG);
2850 return addr;
2853 /* Emit code to perform a block move.
2855 OPERANDS[0] is the destination pointer as a REG, clobbered.
2856 OPERANDS[1] is the source pointer as a REG, clobbered.
2857 OPERANDS[2] is a register for temporary storage.
2858 OPERANDS[3] is a register for temporary storage.
2859 OPERANDS[4] is the size as a CONST_INT
2860 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2861 OPERANDS[6] is another temporary register. */
2863 const char *
2864 pa_output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2866 int align = INTVAL (operands[5]);
2867 unsigned long n_bytes = INTVAL (operands[4]);
2869 /* We can't move more than a word at a time because the PA
2870 has no longer integer move insns. (Could use fp mem ops?) */
2871 if (align > (TARGET_64BIT ? 8 : 4))
2872 align = (TARGET_64BIT ? 8 : 4);
2874 /* Note that we know each loop below will execute at least twice
2875 (else we would have open-coded the copy). */
2876 switch (align)
2878 case 8:
2879 /* Pre-adjust the loop counter. */
2880 operands[4] = GEN_INT (n_bytes - 16);
2881 output_asm_insn ("ldi %4,%2", operands);
2883 /* Copying loop. */
2884 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2885 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2886 output_asm_insn ("std,ma %3,8(%0)", operands);
2887 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2888 output_asm_insn ("std,ma %6,8(%0)", operands);
2890 /* Handle the residual. There could be up to 7 bytes of
2891 residual to copy! */
2892 if (n_bytes % 16 != 0)
2894 operands[4] = GEN_INT (n_bytes % 8);
2895 if (n_bytes % 16 >= 8)
2896 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2897 if (n_bytes % 8 != 0)
2898 output_asm_insn ("ldd 0(%1),%6", operands);
2899 if (n_bytes % 16 >= 8)
2900 output_asm_insn ("std,ma %3,8(%0)", operands);
2901 if (n_bytes % 8 != 0)
2902 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2904 return "";
2906 case 4:
2907 /* Pre-adjust the loop counter. */
2908 operands[4] = GEN_INT (n_bytes - 8);
2909 output_asm_insn ("ldi %4,%2", operands);
2911 /* Copying loop. */
2912 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2913 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2914 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2915 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2916 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2918 /* Handle the residual. There could be up to 7 bytes of
2919 residual to copy! */
2920 if (n_bytes % 8 != 0)
2922 operands[4] = GEN_INT (n_bytes % 4);
2923 if (n_bytes % 8 >= 4)
2924 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2925 if (n_bytes % 4 != 0)
2926 output_asm_insn ("ldw 0(%1),%6", operands);
2927 if (n_bytes % 8 >= 4)
2928 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2929 if (n_bytes % 4 != 0)
2930 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2932 return "";
2934 case 2:
2935 /* Pre-adjust the loop counter. */
2936 operands[4] = GEN_INT (n_bytes - 4);
2937 output_asm_insn ("ldi %4,%2", operands);
2939 /* Copying loop. */
2940 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2941 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2942 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2943 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2944 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2946 /* Handle the residual. */
2947 if (n_bytes % 4 != 0)
2949 if (n_bytes % 4 >= 2)
2950 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2951 if (n_bytes % 2 != 0)
2952 output_asm_insn ("ldb 0(%1),%6", operands);
2953 if (n_bytes % 4 >= 2)
2954 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2955 if (n_bytes % 2 != 0)
2956 output_asm_insn ("stb %6,0(%0)", operands);
2958 return "";
2960 case 1:
2961 /* Pre-adjust the loop counter. */
2962 operands[4] = GEN_INT (n_bytes - 2);
2963 output_asm_insn ("ldi %4,%2", operands);
2965 /* Copying loop. */
2966 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2967 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2968 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2969 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2970 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2972 /* Handle the residual. */
2973 if (n_bytes % 2 != 0)
2975 output_asm_insn ("ldb 0(%1),%3", operands);
2976 output_asm_insn ("stb %3,0(%0)", operands);
2978 return "";
2980 default:
2981 gcc_unreachable ();
2985 /* Count the number of insns necessary to handle this block move.
2987 Basic structure is the same as emit_block_move, except that we
2988 count insns rather than emit them. */
2990 static int
2991 compute_movmem_length (rtx_insn *insn)
2993 rtx pat = PATTERN (insn);
2994 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2995 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2996 unsigned int n_insns = 0;
2998 /* We can't move more than four bytes at a time because the PA
2999 has no longer integer move insns. (Could use fp mem ops?) */
3000 if (align > (TARGET_64BIT ? 8 : 4))
3001 align = (TARGET_64BIT ? 8 : 4);
3003 /* The basic copying loop. */
3004 n_insns = 6;
3006 /* Residuals. */
3007 if (n_bytes % (2 * align) != 0)
3009 if ((n_bytes % (2 * align)) >= align)
3010 n_insns += 2;
3012 if ((n_bytes % align) != 0)
3013 n_insns += 2;
3016 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3017 return n_insns * 4;
3020 /* Emit code to perform a block clear.
3022 OPERANDS[0] is the destination pointer as a REG, clobbered.
3023 OPERANDS[1] is a register for temporary storage.
3024 OPERANDS[2] is the size as a CONST_INT
3025 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
3027 const char *
3028 pa_output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
3030 int align = INTVAL (operands[3]);
3031 unsigned long n_bytes = INTVAL (operands[2]);
3033 /* We can't clear more than a word at a time because the PA
3034 has no longer integer move insns. */
3035 if (align > (TARGET_64BIT ? 8 : 4))
3036 align = (TARGET_64BIT ? 8 : 4);
3038 /* Note that we know each loop below will execute at least twice
3039 (else we would have open-coded the copy). */
3040 switch (align)
3042 case 8:
3043 /* Pre-adjust the loop counter. */
3044 operands[2] = GEN_INT (n_bytes - 16);
3045 output_asm_insn ("ldi %2,%1", operands);
3047 /* Loop. */
3048 output_asm_insn ("std,ma %%r0,8(%0)", operands);
3049 output_asm_insn ("addib,>= -16,%1,.-4", operands);
3050 output_asm_insn ("std,ma %%r0,8(%0)", operands);
3052 /* Handle the residual. There could be up to 7 bytes of
3053 residual to copy! */
3054 if (n_bytes % 16 != 0)
3056 operands[2] = GEN_INT (n_bytes % 8);
3057 if (n_bytes % 16 >= 8)
3058 output_asm_insn ("std,ma %%r0,8(%0)", operands);
3059 if (n_bytes % 8 != 0)
3060 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
3062 return "";
3064 case 4:
3065 /* Pre-adjust the loop counter. */
3066 operands[2] = GEN_INT (n_bytes - 8);
3067 output_asm_insn ("ldi %2,%1", operands);
3069 /* Loop. */
3070 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
3071 output_asm_insn ("addib,>= -8,%1,.-4", operands);
3072 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
3074 /* Handle the residual. There could be up to 7 bytes of
3075 residual to copy! */
3076 if (n_bytes % 8 != 0)
3078 operands[2] = GEN_INT (n_bytes % 4);
3079 if (n_bytes % 8 >= 4)
3080 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
3081 if (n_bytes % 4 != 0)
3082 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
3084 return "";
3086 case 2:
3087 /* Pre-adjust the loop counter. */
3088 operands[2] = GEN_INT (n_bytes - 4);
3089 output_asm_insn ("ldi %2,%1", operands);
3091 /* Loop. */
3092 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3093 output_asm_insn ("addib,>= -4,%1,.-4", operands);
3094 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3096 /* Handle the residual. */
3097 if (n_bytes % 4 != 0)
3099 if (n_bytes % 4 >= 2)
3100 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3101 if (n_bytes % 2 != 0)
3102 output_asm_insn ("stb %%r0,0(%0)", operands);
3104 return "";
3106 case 1:
3107 /* Pre-adjust the loop counter. */
3108 operands[2] = GEN_INT (n_bytes - 2);
3109 output_asm_insn ("ldi %2,%1", operands);
3111 /* Loop. */
3112 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3113 output_asm_insn ("addib,>= -2,%1,.-4", operands);
3114 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3116 /* Handle the residual. */
3117 if (n_bytes % 2 != 0)
3118 output_asm_insn ("stb %%r0,0(%0)", operands);
3120 return "";
3122 default:
3123 gcc_unreachable ();
3127 /* Count the number of insns necessary to handle this block move.
3129 Basic structure is the same as emit_block_move, except that we
3130 count insns rather than emit them. */
3132 static int
3133 compute_clrmem_length (rtx_insn *insn)
3135 rtx pat = PATTERN (insn);
3136 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
3137 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
3138 unsigned int n_insns = 0;
3140 /* We can't clear more than a word at a time because the PA
3141 has no longer integer move insns. */
3142 if (align > (TARGET_64BIT ? 8 : 4))
3143 align = (TARGET_64BIT ? 8 : 4);
3145 /* The basic loop. */
3146 n_insns = 4;
3148 /* Residuals. */
3149 if (n_bytes % (2 * align) != 0)
3151 if ((n_bytes % (2 * align)) >= align)
3152 n_insns++;
3154 if ((n_bytes % align) != 0)
3155 n_insns++;
3158 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3159 return n_insns * 4;
3163 const char *
3164 pa_output_and (rtx *operands)
3166 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3168 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3169 int ls0, ls1, ms0, p, len;
3171 for (ls0 = 0; ls0 < 32; ls0++)
3172 if ((mask & (1 << ls0)) == 0)
3173 break;
3175 for (ls1 = ls0; ls1 < 32; ls1++)
3176 if ((mask & (1 << ls1)) != 0)
3177 break;
3179 for (ms0 = ls1; ms0 < 32; ms0++)
3180 if ((mask & (1 << ms0)) == 0)
3181 break;
3183 gcc_assert (ms0 == 32);
3185 if (ls1 == 32)
3187 len = ls0;
3189 gcc_assert (len);
3191 operands[2] = GEN_INT (len);
3192 return "{extru|extrw,u} %1,31,%2,%0";
3194 else
3196 /* We could use this `depi' for the case above as well, but `depi'
3197 requires one more register file access than an `extru'. */
3199 p = 31 - ls0;
3200 len = ls1 - ls0;
3202 operands[2] = GEN_INT (p);
3203 operands[3] = GEN_INT (len);
3204 return "{depi|depwi} 0,%2,%3,%0";
3207 else
3208 return "and %1,%2,%0";
3211 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3212 storing the result in operands[0]. */
3213 const char *
3214 pa_output_64bit_and (rtx *operands)
3216 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3218 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3219 int ls0, ls1, ms0, p, len;
3221 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
3222 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
3223 break;
3225 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
3226 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
3227 break;
3229 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
3230 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
3231 break;
3233 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
3235 if (ls1 == HOST_BITS_PER_WIDE_INT)
3237 len = ls0;
3239 gcc_assert (len);
3241 operands[2] = GEN_INT (len);
3242 return "extrd,u %1,63,%2,%0";
3244 else
3246 /* We could use this `depi' for the case above as well, but `depi'
3247 requires one more register file access than an `extru'. */
3249 p = 63 - ls0;
3250 len = ls1 - ls0;
3252 operands[2] = GEN_INT (p);
3253 operands[3] = GEN_INT (len);
3254 return "depdi 0,%2,%3,%0";
3257 else
3258 return "and %1,%2,%0";
3261 const char *
3262 pa_output_ior (rtx *operands)
3264 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3265 int bs0, bs1, p, len;
3267 if (INTVAL (operands[2]) == 0)
3268 return "copy %1,%0";
3270 for (bs0 = 0; bs0 < 32; bs0++)
3271 if ((mask & (1 << bs0)) != 0)
3272 break;
3274 for (bs1 = bs0; bs1 < 32; bs1++)
3275 if ((mask & (1 << bs1)) == 0)
3276 break;
3278 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3280 p = 31 - bs0;
3281 len = bs1 - bs0;
3283 operands[2] = GEN_INT (p);
3284 operands[3] = GEN_INT (len);
3285 return "{depi|depwi} -1,%2,%3,%0";
3288 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3289 storing the result in operands[0]. */
3290 const char *
3291 pa_output_64bit_ior (rtx *operands)
3293 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3294 int bs0, bs1, p, len;
3296 if (INTVAL (operands[2]) == 0)
3297 return "copy %1,%0";
3299 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3300 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3301 break;
3303 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3304 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3305 break;
3307 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
3308 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3310 p = 63 - bs0;
3311 len = bs1 - bs0;
3313 operands[2] = GEN_INT (p);
3314 operands[3] = GEN_INT (len);
3315 return "depdi -1,%2,%3,%0";
3318 /* Target hook for assembling integer objects. This code handles
3319 aligned SI and DI integers specially since function references
3320 must be preceded by P%. */
3322 static bool
3323 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3325 bool result;
3326 tree decl = NULL;
3328 /* When we have a SYMBOL_REF with a SYMBOL_REF_DECL, we need to call
3329 call assemble_external and set the SYMBOL_REF_DECL to NULL before
3330 calling output_addr_const. Otherwise, it may call assemble_external
3331 in the midst of outputing the assembler code for the SYMBOL_REF.
3332 We restore the SYMBOL_REF_DECL after the output is done. */
3333 if (GET_CODE (x) == SYMBOL_REF)
3335 decl = SYMBOL_REF_DECL (x);
3336 if (decl)
3338 assemble_external (decl);
3339 SET_SYMBOL_REF_DECL (x, NULL);
3343 if (size == UNITS_PER_WORD
3344 && aligned_p
3345 && function_label_operand (x, VOIDmode))
3347 fputs (size == 8? "\t.dword\t" : "\t.word\t", asm_out_file);
3349 /* We don't want an OPD when generating fast indirect calls. */
3350 if (!TARGET_FAST_INDIRECT_CALLS)
3351 fputs ("P%", asm_out_file);
3353 output_addr_const (asm_out_file, x);
3354 fputc ('\n', asm_out_file);
3355 result = true;
3357 else
3358 result = default_assemble_integer (x, size, aligned_p);
3360 if (decl)
3361 SET_SYMBOL_REF_DECL (x, decl);
3363 return result;
3366 /* Output an ascii string. */
3367 void
3368 pa_output_ascii (FILE *file, const char *p, int size)
3370 int i;
3371 int chars_output;
3372 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3374 /* The HP assembler can only take strings of 256 characters at one
3375 time. This is a limitation on input line length, *not* the
3376 length of the string. Sigh. Even worse, it seems that the
3377 restriction is in number of input characters (see \xnn &
3378 \whatever). So we have to do this very carefully. */
3380 fputs ("\t.STRING \"", file);
3382 chars_output = 0;
3383 for (i = 0; i < size; i += 4)
3385 int co = 0;
3386 int io = 0;
3387 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3389 register unsigned int c = (unsigned char) p[i + io];
3391 if (c == '\"' || c == '\\')
3392 partial_output[co++] = '\\';
3393 if (c >= ' ' && c < 0177)
3394 partial_output[co++] = c;
3395 else
3397 unsigned int hexd;
3398 partial_output[co++] = '\\';
3399 partial_output[co++] = 'x';
3400 hexd = c / 16 - 0 + '0';
3401 if (hexd > '9')
3402 hexd -= '9' - 'a' + 1;
3403 partial_output[co++] = hexd;
3404 hexd = c % 16 - 0 + '0';
3405 if (hexd > '9')
3406 hexd -= '9' - 'a' + 1;
3407 partial_output[co++] = hexd;
3410 if (chars_output + co > 243)
3412 fputs ("\"\n\t.STRING \"", file);
3413 chars_output = 0;
3415 fwrite (partial_output, 1, (size_t) co, file);
3416 chars_output += co;
3417 co = 0;
3419 fputs ("\"\n", file);
3422 /* Try to rewrite floating point comparisons & branches to avoid
3423 useless add,tr insns.
3425 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3426 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3427 first attempt to remove useless add,tr insns. It is zero
3428 for the second pass as reorg sometimes leaves bogus REG_DEAD
3429 notes lying around.
3431 When CHECK_NOTES is zero we can only eliminate add,tr insns
3432 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3433 instructions. */
3434 static void
3435 remove_useless_addtr_insns (int check_notes)
3437 rtx_insn *insn;
3438 static int pass = 0;
3440 /* This is fairly cheap, so always run it when optimizing. */
3441 if (optimize > 0)
3443 int fcmp_count = 0;
3444 int fbranch_count = 0;
3446 /* Walk all the insns in this function looking for fcmp & fbranch
3447 instructions. Keep track of how many of each we find. */
3448 for (insn = get_insns (); insn; insn = next_insn (insn))
3450 rtx tmp;
3452 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3453 if (! NONJUMP_INSN_P (insn) && ! JUMP_P (insn))
3454 continue;
3456 tmp = PATTERN (insn);
3458 /* It must be a set. */
3459 if (GET_CODE (tmp) != SET)
3460 continue;
3462 /* If the destination is CCFP, then we've found an fcmp insn. */
3463 tmp = SET_DEST (tmp);
3464 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3466 fcmp_count++;
3467 continue;
3470 tmp = PATTERN (insn);
3471 /* If this is an fbranch instruction, bump the fbranch counter. */
3472 if (GET_CODE (tmp) == SET
3473 && SET_DEST (tmp) == pc_rtx
3474 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3475 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3476 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3477 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3479 fbranch_count++;
3480 continue;
3485 /* Find all floating point compare + branch insns. If possible,
3486 reverse the comparison & the branch to avoid add,tr insns. */
3487 for (insn = get_insns (); insn; insn = next_insn (insn))
3489 rtx tmp;
3490 rtx_insn *next;
3492 /* Ignore anything that isn't an INSN. */
3493 if (! NONJUMP_INSN_P (insn))
3494 continue;
3496 tmp = PATTERN (insn);
3498 /* It must be a set. */
3499 if (GET_CODE (tmp) != SET)
3500 continue;
3502 /* The destination must be CCFP, which is register zero. */
3503 tmp = SET_DEST (tmp);
3504 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3505 continue;
3507 /* INSN should be a set of CCFP.
3509 See if the result of this insn is used in a reversed FP
3510 conditional branch. If so, reverse our condition and
3511 the branch. Doing so avoids useless add,tr insns. */
3512 next = next_insn (insn);
3513 while (next)
3515 /* Jumps, calls and labels stop our search. */
3516 if (JUMP_P (next) || CALL_P (next) || LABEL_P (next))
3517 break;
3519 /* As does another fcmp insn. */
3520 if (NONJUMP_INSN_P (next)
3521 && GET_CODE (PATTERN (next)) == SET
3522 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3523 && REGNO (SET_DEST (PATTERN (next))) == 0)
3524 break;
3526 next = next_insn (next);
3529 /* Is NEXT_INSN a branch? */
3530 if (next && JUMP_P (next))
3532 rtx pattern = PATTERN (next);
3534 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3535 and CCFP dies, then reverse our conditional and the branch
3536 to avoid the add,tr. */
3537 if (GET_CODE (pattern) == SET
3538 && SET_DEST (pattern) == pc_rtx
3539 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3540 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3541 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3542 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3543 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3544 && (fcmp_count == fbranch_count
3545 || (check_notes
3546 && find_regno_note (next, REG_DEAD, 0))))
3548 /* Reverse the branch. */
3549 tmp = XEXP (SET_SRC (pattern), 1);
3550 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3551 XEXP (SET_SRC (pattern), 2) = tmp;
3552 INSN_CODE (next) = -1;
3554 /* Reverse our condition. */
3555 tmp = PATTERN (insn);
3556 PUT_CODE (XEXP (tmp, 1),
3557 (reverse_condition_maybe_unordered
3558 (GET_CODE (XEXP (tmp, 1)))));
3564 pass = !pass;
3568 /* You may have trouble believing this, but this is the 32 bit HP-PA
3569 stack layout. Wow.
3571 Offset Contents
3573 Variable arguments (optional; any number may be allocated)
3575 SP-(4*(N+9)) arg word N
3577 SP-56 arg word 5
3578 SP-52 arg word 4
3580 Fixed arguments (must be allocated; may remain unused)
3582 SP-48 arg word 3
3583 SP-44 arg word 2
3584 SP-40 arg word 1
3585 SP-36 arg word 0
3587 Frame Marker
3589 SP-32 External Data Pointer (DP)
3590 SP-28 External sr4
3591 SP-24 External/stub RP (RP')
3592 SP-20 Current RP
3593 SP-16 Static Link
3594 SP-12 Clean up
3595 SP-8 Calling Stub RP (RP'')
3596 SP-4 Previous SP
3598 Top of Frame
3600 SP-0 Stack Pointer (points to next available address)
3604 /* This function saves registers as follows. Registers marked with ' are
3605 this function's registers (as opposed to the previous function's).
3606 If a frame_pointer isn't needed, r4 is saved as a general register;
3607 the space for the frame pointer is still allocated, though, to keep
3608 things simple.
3611 Top of Frame
3613 SP (FP') Previous FP
3614 SP + 4 Alignment filler (sigh)
3615 SP + 8 Space for locals reserved here.
3619 SP + n All call saved register used.
3623 SP + o All call saved fp registers used.
3627 SP + p (SP') points to next available address.
3631 /* Global variables set by output_function_prologue(). */
3632 /* Size of frame. Need to know this to emit return insns from
3633 leaf procedures. */
3634 static HOST_WIDE_INT actual_fsize, local_fsize;
3635 static int save_fregs;
3637 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3638 Handle case where DISP > 8k by using the add_high_const patterns.
3640 Note in DISP > 8k case, we will leave the high part of the address
3641 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3643 static void
3644 store_reg (int reg, HOST_WIDE_INT disp, int base)
3646 rtx dest, src, basereg;
3647 rtx_insn *insn;
3649 src = gen_rtx_REG (word_mode, reg);
3650 basereg = gen_rtx_REG (Pmode, base);
3651 if (VAL_14_BITS_P (disp))
3653 dest = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
3654 insn = emit_move_insn (dest, src);
3656 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3658 rtx delta = GEN_INT (disp);
3659 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3661 emit_move_insn (tmpreg, delta);
3662 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3663 if (DO_FRAME_NOTES)
3665 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3666 gen_rtx_SET (tmpreg,
3667 gen_rtx_PLUS (Pmode, basereg, delta)));
3668 RTX_FRAME_RELATED_P (insn) = 1;
3670 dest = gen_rtx_MEM (word_mode, tmpreg);
3671 insn = emit_move_insn (dest, src);
3673 else
3675 rtx delta = GEN_INT (disp);
3676 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3677 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3679 emit_move_insn (tmpreg, high);
3680 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3681 insn = emit_move_insn (dest, src);
3682 if (DO_FRAME_NOTES)
3683 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3684 gen_rtx_SET (gen_rtx_MEM (word_mode,
3685 gen_rtx_PLUS (word_mode,
3686 basereg,
3687 delta)),
3688 src));
3691 if (DO_FRAME_NOTES)
3692 RTX_FRAME_RELATED_P (insn) = 1;
3695 /* Emit RTL to store REG at the memory location specified by BASE and then
3696 add MOD to BASE. MOD must be <= 8k. */
3698 static void
3699 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3701 rtx basereg, srcreg, delta;
3702 rtx_insn *insn;
3704 gcc_assert (VAL_14_BITS_P (mod));
3706 basereg = gen_rtx_REG (Pmode, base);
3707 srcreg = gen_rtx_REG (word_mode, reg);
3708 delta = GEN_INT (mod);
3710 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3711 if (DO_FRAME_NOTES)
3713 RTX_FRAME_RELATED_P (insn) = 1;
3715 /* RTX_FRAME_RELATED_P must be set on each frame related set
3716 in a parallel with more than one element. */
3717 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3718 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3722 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3723 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3724 whether to add a frame note or not.
3726 In the DISP > 8k case, we leave the high part of the address in %r1.
3727 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3729 static void
3730 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3732 rtx_insn *insn;
3734 if (VAL_14_BITS_P (disp))
3736 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3737 plus_constant (Pmode,
3738 gen_rtx_REG (Pmode, base), disp));
3740 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3742 rtx basereg = gen_rtx_REG (Pmode, base);
3743 rtx delta = GEN_INT (disp);
3744 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3746 emit_move_insn (tmpreg, delta);
3747 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3748 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3749 if (DO_FRAME_NOTES)
3750 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3751 gen_rtx_SET (tmpreg,
3752 gen_rtx_PLUS (Pmode, basereg, delta)));
3754 else
3756 rtx basereg = gen_rtx_REG (Pmode, base);
3757 rtx delta = GEN_INT (disp);
3758 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3760 emit_move_insn (tmpreg,
3761 gen_rtx_PLUS (Pmode, basereg,
3762 gen_rtx_HIGH (Pmode, delta)));
3763 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3764 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3767 if (DO_FRAME_NOTES && note)
3768 RTX_FRAME_RELATED_P (insn) = 1;
3771 HOST_WIDE_INT
3772 pa_compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3774 int freg_saved = 0;
3775 int i, j;
3777 /* The code in pa_expand_prologue and pa_expand_epilogue must
3778 be consistent with the rounding and size calculation done here.
3779 Change them at the same time. */
3781 /* We do our own stack alignment. First, round the size of the
3782 stack locals up to a word boundary. */
3783 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3785 /* Space for previous frame pointer + filler. If any frame is
3786 allocated, we need to add in the TARGET_STARTING_FRAME_OFFSET. We
3787 waste some space here for the sake of HP compatibility. The
3788 first slot is only used when the frame pointer is needed. */
3789 if (size || frame_pointer_needed)
3790 size += pa_starting_frame_offset ();
3792 /* If the current function calls __builtin_eh_return, then we need
3793 to allocate stack space for registers that will hold data for
3794 the exception handler. */
3795 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3797 unsigned int i;
3799 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3800 continue;
3801 size += i * UNITS_PER_WORD;
3804 /* Account for space used by the callee general register saves. */
3805 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3806 if (df_regs_ever_live_p (i))
3807 size += UNITS_PER_WORD;
3809 /* Account for space used by the callee floating point register saves. */
3810 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3811 if (df_regs_ever_live_p (i)
3812 || (!TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3814 freg_saved = 1;
3816 /* We always save both halves of the FP register, so always
3817 increment the frame size by 8 bytes. */
3818 size += 8;
3821 /* If any of the floating registers are saved, account for the
3822 alignment needed for the floating point register save block. */
3823 if (freg_saved)
3825 size = (size + 7) & ~7;
3826 if (fregs_live)
3827 *fregs_live = 1;
3830 /* The various ABIs include space for the outgoing parameters in the
3831 size of the current function's stack frame. We don't need to align
3832 for the outgoing arguments as their alignment is set by the final
3833 rounding for the frame as a whole. */
3834 size += crtl->outgoing_args_size;
3836 /* Allocate space for the fixed frame marker. This space must be
3837 allocated for any function that makes calls or allocates
3838 stack space. */
3839 if (!crtl->is_leaf || size)
3840 size += TARGET_64BIT ? 48 : 32;
3842 /* Finally, round to the preferred stack boundary. */
3843 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3844 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3847 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3848 of memory. If any fpu reg is used in the function, we allocate
3849 such a block here, at the bottom of the frame, just in case it's needed.
3851 If this function is a leaf procedure, then we may choose not
3852 to do a "save" insn. The decision about whether or not
3853 to do this is made in regclass.c. */
3855 static void
3856 pa_output_function_prologue (FILE *file)
3858 /* The function's label and associated .PROC must never be
3859 separated and must be output *after* any profiling declarations
3860 to avoid changing spaces/subspaces within a procedure. */
3861 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3862 fputs ("\t.PROC\n", file);
3864 /* pa_expand_prologue does the dirty work now. We just need
3865 to output the assembler directives which denote the start
3866 of a function. */
3867 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3868 if (crtl->is_leaf)
3869 fputs (",NO_CALLS", file);
3870 else
3871 fputs (",CALLS", file);
3872 if (rp_saved)
3873 fputs (",SAVE_RP", file);
3875 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3876 at the beginning of the frame and that it is used as the frame
3877 pointer for the frame. We do this because our current frame
3878 layout doesn't conform to that specified in the HP runtime
3879 documentation and we need a way to indicate to programs such as
3880 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3881 isn't used by HP compilers but is supported by the assembler.
3882 However, SAVE_SP is supposed to indicate that the previous stack
3883 pointer has been saved in the frame marker. */
3884 if (frame_pointer_needed)
3885 fputs (",SAVE_SP", file);
3887 /* Pass on information about the number of callee register saves
3888 performed in the prologue.
3890 The compiler is supposed to pass the highest register number
3891 saved, the assembler then has to adjust that number before
3892 entering it into the unwind descriptor (to account for any
3893 caller saved registers with lower register numbers than the
3894 first callee saved register). */
3895 if (gr_saved)
3896 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3898 if (fr_saved)
3899 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3901 fputs ("\n\t.ENTRY\n", file);
3903 remove_useless_addtr_insns (0);
3906 void
3907 pa_expand_prologue (void)
3909 int merge_sp_adjust_with_store = 0;
3910 HOST_WIDE_INT size = get_frame_size ();
3911 HOST_WIDE_INT offset;
3912 int i;
3913 rtx tmpreg;
3914 rtx_insn *insn;
3916 gr_saved = 0;
3917 fr_saved = 0;
3918 save_fregs = 0;
3920 /* Compute total size for frame pointer, filler, locals and rounding to
3921 the next word boundary. Similar code appears in pa_compute_frame_size
3922 and must be changed in tandem with this code. */
3923 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3924 if (local_fsize || frame_pointer_needed)
3925 local_fsize += pa_starting_frame_offset ();
3927 actual_fsize = pa_compute_frame_size (size, &save_fregs);
3928 if (flag_stack_usage_info)
3929 current_function_static_stack_size = actual_fsize;
3931 /* Compute a few things we will use often. */
3932 tmpreg = gen_rtx_REG (word_mode, 1);
3934 /* Save RP first. The calling conventions manual states RP will
3935 always be stored into the caller's frame at sp - 20 or sp - 16
3936 depending on which ABI is in use. */
3937 if (df_regs_ever_live_p (2) || crtl->calls_eh_return)
3939 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3940 rp_saved = true;
3942 else
3943 rp_saved = false;
3945 /* Allocate the local frame and set up the frame pointer if needed. */
3946 if (actual_fsize != 0)
3948 if (frame_pointer_needed)
3950 /* Copy the old frame pointer temporarily into %r1. Set up the
3951 new stack pointer, then store away the saved old frame pointer
3952 into the stack at sp and at the same time update the stack
3953 pointer by actual_fsize bytes. Two versions, first
3954 handles small (<8k) frames. The second handles large (>=8k)
3955 frames. */
3956 insn = emit_move_insn (tmpreg, hard_frame_pointer_rtx);
3957 if (DO_FRAME_NOTES)
3958 RTX_FRAME_RELATED_P (insn) = 1;
3960 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3961 if (DO_FRAME_NOTES)
3962 RTX_FRAME_RELATED_P (insn) = 1;
3964 if (VAL_14_BITS_P (actual_fsize))
3965 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3966 else
3968 /* It is incorrect to store the saved frame pointer at *sp,
3969 then increment sp (writes beyond the current stack boundary).
3971 So instead use stwm to store at *sp and post-increment the
3972 stack pointer as an atomic operation. Then increment sp to
3973 finish allocating the new frame. */
3974 HOST_WIDE_INT adjust1 = 8192 - 64;
3975 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3977 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3978 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3979 adjust2, 1);
3982 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3983 we need to store the previous stack pointer (frame pointer)
3984 into the frame marker on targets that use the HP unwind
3985 library. This allows the HP unwind library to be used to
3986 unwind GCC frames. However, we are not fully compatible
3987 with the HP library because our frame layout differs from
3988 that specified in the HP runtime specification.
3990 We don't want a frame note on this instruction as the frame
3991 marker moves during dynamic stack allocation.
3993 This instruction also serves as a blockage to prevent
3994 register spills from being scheduled before the stack
3995 pointer is raised. This is necessary as we store
3996 registers using the frame pointer as a base register,
3997 and the frame pointer is set before sp is raised. */
3998 if (TARGET_HPUX_UNWIND_LIBRARY)
4000 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
4001 GEN_INT (TARGET_64BIT ? -8 : -4));
4003 emit_move_insn (gen_rtx_MEM (word_mode, addr),
4004 hard_frame_pointer_rtx);
4006 else
4007 emit_insn (gen_blockage ());
4009 /* no frame pointer needed. */
4010 else
4012 /* In some cases we can perform the first callee register save
4013 and allocating the stack frame at the same time. If so, just
4014 make a note of it and defer allocating the frame until saving
4015 the callee registers. */
4016 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
4017 merge_sp_adjust_with_store = 1;
4018 /* Can not optimize. Adjust the stack frame by actual_fsize
4019 bytes. */
4020 else
4021 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4022 actual_fsize, 1);
4026 /* Normal register save.
4028 Do not save the frame pointer in the frame_pointer_needed case. It
4029 was done earlier. */
4030 if (frame_pointer_needed)
4032 offset = local_fsize;
4034 /* Saving the EH return data registers in the frame is the simplest
4035 way to get the frame unwind information emitted. We put them
4036 just before the general registers. */
4037 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4039 unsigned int i, regno;
4041 for (i = 0; ; ++i)
4043 regno = EH_RETURN_DATA_REGNO (i);
4044 if (regno == INVALID_REGNUM)
4045 break;
4047 store_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4048 offset += UNITS_PER_WORD;
4052 for (i = 18; i >= 4; i--)
4053 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4055 store_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4056 offset += UNITS_PER_WORD;
4057 gr_saved++;
4059 /* Account for %r3 which is saved in a special place. */
4060 gr_saved++;
4062 /* No frame pointer needed. */
4063 else
4065 offset = local_fsize - actual_fsize;
4067 /* Saving the EH return data registers in the frame is the simplest
4068 way to get the frame unwind information emitted. */
4069 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4071 unsigned int i, regno;
4073 for (i = 0; ; ++i)
4075 regno = EH_RETURN_DATA_REGNO (i);
4076 if (regno == INVALID_REGNUM)
4077 break;
4079 /* If merge_sp_adjust_with_store is nonzero, then we can
4080 optimize the first save. */
4081 if (merge_sp_adjust_with_store)
4083 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
4084 merge_sp_adjust_with_store = 0;
4086 else
4087 store_reg (regno, offset, STACK_POINTER_REGNUM);
4088 offset += UNITS_PER_WORD;
4092 for (i = 18; i >= 3; i--)
4093 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4095 /* If merge_sp_adjust_with_store is nonzero, then we can
4096 optimize the first GR save. */
4097 if (merge_sp_adjust_with_store)
4099 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
4100 merge_sp_adjust_with_store = 0;
4102 else
4103 store_reg (i, offset, STACK_POINTER_REGNUM);
4104 offset += UNITS_PER_WORD;
4105 gr_saved++;
4108 /* If we wanted to merge the SP adjustment with a GR save, but we never
4109 did any GR saves, then just emit the adjustment here. */
4110 if (merge_sp_adjust_with_store)
4111 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4112 actual_fsize, 1);
4115 /* The hppa calling conventions say that %r19, the pic offset
4116 register, is saved at sp - 32 (in this function's frame)
4117 when generating PIC code. FIXME: What is the correct thing
4118 to do for functions which make no calls and allocate no
4119 frame? Do we need to allocate a frame, or can we just omit
4120 the save? For now we'll just omit the save.
4122 We don't want a note on this insn as the frame marker can
4123 move if there is a dynamic stack allocation. */
4124 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
4126 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
4128 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
4132 /* Align pointer properly (doubleword boundary). */
4133 offset = (offset + 7) & ~7;
4135 /* Floating point register store. */
4136 if (save_fregs)
4138 rtx base;
4140 /* First get the frame or stack pointer to the start of the FP register
4141 save area. */
4142 if (frame_pointer_needed)
4144 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4145 base = hard_frame_pointer_rtx;
4147 else
4149 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4150 base = stack_pointer_rtx;
4153 /* Now actually save the FP registers. */
4154 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4156 if (df_regs_ever_live_p (i)
4157 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4159 rtx addr, reg;
4160 rtx_insn *insn;
4161 addr = gen_rtx_MEM (DFmode,
4162 gen_rtx_POST_INC (word_mode, tmpreg));
4163 reg = gen_rtx_REG (DFmode, i);
4164 insn = emit_move_insn (addr, reg);
4165 if (DO_FRAME_NOTES)
4167 RTX_FRAME_RELATED_P (insn) = 1;
4168 if (TARGET_64BIT)
4170 rtx mem = gen_rtx_MEM (DFmode,
4171 plus_constant (Pmode, base,
4172 offset));
4173 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4174 gen_rtx_SET (mem, reg));
4176 else
4178 rtx meml = gen_rtx_MEM (SFmode,
4179 plus_constant (Pmode, base,
4180 offset));
4181 rtx memr = gen_rtx_MEM (SFmode,
4182 plus_constant (Pmode, base,
4183 offset + 4));
4184 rtx regl = gen_rtx_REG (SFmode, i);
4185 rtx regr = gen_rtx_REG (SFmode, i + 1);
4186 rtx setl = gen_rtx_SET (meml, regl);
4187 rtx setr = gen_rtx_SET (memr, regr);
4188 rtvec vec;
4190 RTX_FRAME_RELATED_P (setl) = 1;
4191 RTX_FRAME_RELATED_P (setr) = 1;
4192 vec = gen_rtvec (2, setl, setr);
4193 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4194 gen_rtx_SEQUENCE (VOIDmode, vec));
4197 offset += GET_MODE_SIZE (DFmode);
4198 fr_saved++;
4204 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4205 Handle case where DISP > 8k by using the add_high_const patterns. */
4207 static void
4208 load_reg (int reg, HOST_WIDE_INT disp, int base)
4210 rtx dest = gen_rtx_REG (word_mode, reg);
4211 rtx basereg = gen_rtx_REG (Pmode, base);
4212 rtx src;
4214 if (VAL_14_BITS_P (disp))
4215 src = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
4216 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
4218 rtx delta = GEN_INT (disp);
4219 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4221 emit_move_insn (tmpreg, delta);
4222 if (TARGET_DISABLE_INDEXING)
4224 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4225 src = gen_rtx_MEM (word_mode, tmpreg);
4227 else
4228 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4230 else
4232 rtx delta = GEN_INT (disp);
4233 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
4234 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4236 emit_move_insn (tmpreg, high);
4237 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
4240 emit_move_insn (dest, src);
4243 /* Update the total code bytes output to the text section. */
4245 static void
4246 update_total_code_bytes (unsigned int nbytes)
4248 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
4249 && !IN_NAMED_SECTION_P (cfun->decl))
4251 unsigned int old_total = total_code_bytes;
4253 total_code_bytes += nbytes;
4255 /* Be prepared to handle overflows. */
4256 if (old_total > total_code_bytes)
4257 total_code_bytes = UINT_MAX;
4261 /* This function generates the assembly code for function exit.
4262 Args are as for output_function_prologue ().
4264 The function epilogue should not depend on the current stack
4265 pointer! It should use the frame pointer only. This is mandatory
4266 because of alloca; we also take advantage of it to omit stack
4267 adjustments before returning. */
4269 static void
4270 pa_output_function_epilogue (FILE *file)
4272 rtx_insn *insn = get_last_insn ();
4273 bool extra_nop;
4275 /* pa_expand_epilogue does the dirty work now. We just need
4276 to output the assembler directives which denote the end
4277 of a function.
4279 To make debuggers happy, emit a nop if the epilogue was completely
4280 eliminated due to a volatile call as the last insn in the
4281 current function. That way the return address (in %r2) will
4282 always point to a valid instruction in the current function. */
4284 /* Get the last real insn. */
4285 if (NOTE_P (insn))
4286 insn = prev_real_insn (insn);
4288 /* If it is a sequence, then look inside. */
4289 if (insn && NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
4290 insn = as_a <rtx_sequence *> (PATTERN (insn))-> insn (0);
4292 /* If insn is a CALL_INSN, then it must be a call to a volatile
4293 function (otherwise there would be epilogue insns). */
4294 if (insn && CALL_P (insn))
4296 fputs ("\tnop\n", file);
4297 extra_nop = true;
4299 else
4300 extra_nop = false;
4302 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4304 if (TARGET_SOM && TARGET_GAS)
4306 /* We are done with this subspace except possibly for some additional
4307 debug information. Forget that we are in this subspace to ensure
4308 that the next function is output in its own subspace. */
4309 in_section = NULL;
4310 cfun->machine->in_nsubspa = 2;
4313 /* Thunks do their own insn accounting. */
4314 if (cfun->is_thunk)
4315 return;
4317 if (INSN_ADDRESSES_SET_P ())
4319 last_address = extra_nop ? 4 : 0;
4320 insn = get_last_nonnote_insn ();
4321 if (insn)
4323 last_address += INSN_ADDRESSES (INSN_UID (insn));
4324 if (INSN_P (insn))
4325 last_address += insn_default_length (insn);
4327 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4328 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4330 else
4331 last_address = UINT_MAX;
4333 /* Finally, update the total number of code bytes output so far. */
4334 update_total_code_bytes (last_address);
4337 void
4338 pa_expand_epilogue (void)
4340 rtx tmpreg;
4341 HOST_WIDE_INT offset;
4342 HOST_WIDE_INT ret_off = 0;
4343 int i;
4344 int merge_sp_adjust_with_load = 0;
4346 /* We will use this often. */
4347 tmpreg = gen_rtx_REG (word_mode, 1);
4349 /* Try to restore RP early to avoid load/use interlocks when
4350 RP gets used in the return (bv) instruction. This appears to still
4351 be necessary even when we schedule the prologue and epilogue. */
4352 if (rp_saved)
4354 ret_off = TARGET_64BIT ? -16 : -20;
4355 if (frame_pointer_needed)
4357 load_reg (2, ret_off, HARD_FRAME_POINTER_REGNUM);
4358 ret_off = 0;
4360 else
4362 /* No frame pointer, and stack is smaller than 8k. */
4363 if (VAL_14_BITS_P (ret_off - actual_fsize))
4365 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4366 ret_off = 0;
4371 /* General register restores. */
4372 if (frame_pointer_needed)
4374 offset = local_fsize;
4376 /* If the current function calls __builtin_eh_return, then we need
4377 to restore the saved EH data registers. */
4378 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4380 unsigned int i, regno;
4382 for (i = 0; ; ++i)
4384 regno = EH_RETURN_DATA_REGNO (i);
4385 if (regno == INVALID_REGNUM)
4386 break;
4388 load_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4389 offset += UNITS_PER_WORD;
4393 for (i = 18; i >= 4; i--)
4394 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4396 load_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4397 offset += UNITS_PER_WORD;
4400 else
4402 offset = local_fsize - actual_fsize;
4404 /* If the current function calls __builtin_eh_return, then we need
4405 to restore the saved EH data registers. */
4406 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4408 unsigned int i, regno;
4410 for (i = 0; ; ++i)
4412 regno = EH_RETURN_DATA_REGNO (i);
4413 if (regno == INVALID_REGNUM)
4414 break;
4416 /* Only for the first load.
4417 merge_sp_adjust_with_load holds the register load
4418 with which we will merge the sp adjustment. */
4419 if (merge_sp_adjust_with_load == 0
4420 && local_fsize == 0
4421 && VAL_14_BITS_P (-actual_fsize))
4422 merge_sp_adjust_with_load = regno;
4423 else
4424 load_reg (regno, offset, STACK_POINTER_REGNUM);
4425 offset += UNITS_PER_WORD;
4429 for (i = 18; i >= 3; i--)
4431 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4433 /* Only for the first load.
4434 merge_sp_adjust_with_load holds the register load
4435 with which we will merge the sp adjustment. */
4436 if (merge_sp_adjust_with_load == 0
4437 && local_fsize == 0
4438 && VAL_14_BITS_P (-actual_fsize))
4439 merge_sp_adjust_with_load = i;
4440 else
4441 load_reg (i, offset, STACK_POINTER_REGNUM);
4442 offset += UNITS_PER_WORD;
4447 /* Align pointer properly (doubleword boundary). */
4448 offset = (offset + 7) & ~7;
4450 /* FP register restores. */
4451 if (save_fregs)
4453 /* Adjust the register to index off of. */
4454 if (frame_pointer_needed)
4455 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4456 else
4457 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4459 /* Actually do the restores now. */
4460 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4461 if (df_regs_ever_live_p (i)
4462 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4464 rtx src = gen_rtx_MEM (DFmode,
4465 gen_rtx_POST_INC (word_mode, tmpreg));
4466 rtx dest = gen_rtx_REG (DFmode, i);
4467 emit_move_insn (dest, src);
4471 /* Emit a blockage insn here to keep these insns from being moved to
4472 an earlier spot in the epilogue, or into the main instruction stream.
4474 This is necessary as we must not cut the stack back before all the
4475 restores are finished. */
4476 emit_insn (gen_blockage ());
4478 /* Reset stack pointer (and possibly frame pointer). The stack
4479 pointer is initially set to fp + 64 to avoid a race condition. */
4480 if (frame_pointer_needed)
4482 rtx delta = GEN_INT (-64);
4484 set_reg_plus_d (STACK_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM, 64, 0);
4485 emit_insn (gen_pre_load (hard_frame_pointer_rtx,
4486 stack_pointer_rtx, delta));
4488 /* If we were deferring a callee register restore, do it now. */
4489 else if (merge_sp_adjust_with_load)
4491 rtx delta = GEN_INT (-actual_fsize);
4492 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4494 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4496 else if (actual_fsize != 0)
4497 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4498 - actual_fsize, 0);
4500 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4501 frame greater than 8k), do so now. */
4502 if (ret_off != 0)
4503 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4505 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4507 rtx sa = EH_RETURN_STACKADJ_RTX;
4509 emit_insn (gen_blockage ());
4510 emit_insn (TARGET_64BIT
4511 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4512 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4516 bool
4517 pa_can_use_return_insn (void)
4519 if (!reload_completed)
4520 return false;
4522 if (frame_pointer_needed)
4523 return false;
4525 if (df_regs_ever_live_p (2))
4526 return false;
4528 if (crtl->profile)
4529 return false;
4531 return pa_compute_frame_size (get_frame_size (), 0) == 0;
4535 hppa_pic_save_rtx (void)
4537 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4540 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4541 #define NO_DEFERRED_PROFILE_COUNTERS 0
4542 #endif
4545 /* Vector of funcdef numbers. */
4546 static vec<int> funcdef_nos;
4548 /* Output deferred profile counters. */
4549 static void
4550 output_deferred_profile_counters (void)
4552 unsigned int i;
4553 int align, n;
4555 if (funcdef_nos.is_empty ())
4556 return;
4558 switch_to_section (data_section);
4559 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4560 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4562 for (i = 0; funcdef_nos.iterate (i, &n); i++)
4564 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4565 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4568 funcdef_nos.release ();
4571 void
4572 hppa_profile_hook (int label_no)
4574 /* We use SImode for the address of the function in both 32 and
4575 64-bit code to avoid having to provide DImode versions of the
4576 lcla2 and load_offset_label_address insn patterns. */
4577 rtx reg = gen_reg_rtx (SImode);
4578 rtx_code_label *label_rtx = gen_label_rtx ();
4579 rtx mcount = gen_rtx_MEM (Pmode, gen_rtx_SYMBOL_REF (Pmode, "_mcount"));
4580 int reg_parm_stack_space = REG_PARM_STACK_SPACE (NULL_TREE);
4581 rtx arg_bytes, begin_label_rtx;
4582 rtx_insn *call_insn;
4583 char begin_label_name[16];
4584 bool use_mcount_pcrel_call;
4586 /* If we can reach _mcount with a pc-relative call, we can optimize
4587 loading the address of the current function. This requires linker
4588 long branch stub support. */
4589 if (!TARGET_PORTABLE_RUNTIME
4590 && !TARGET_LONG_CALLS
4591 && (TARGET_SOM || flag_function_sections))
4592 use_mcount_pcrel_call = TRUE;
4593 else
4594 use_mcount_pcrel_call = FALSE;
4596 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4597 label_no);
4598 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4600 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4602 if (!use_mcount_pcrel_call)
4604 /* The address of the function is loaded into %r25 with an instruction-
4605 relative sequence that avoids the use of relocations. The sequence
4606 is split so that the load_offset_label_address instruction can
4607 occupy the delay slot of the call to _mcount. */
4608 if (TARGET_PA_20)
4609 emit_insn (gen_lcla2 (reg, label_rtx));
4610 else
4611 emit_insn (gen_lcla1 (reg, label_rtx));
4613 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4614 reg,
4615 begin_label_rtx,
4616 label_rtx));
4619 if (!NO_DEFERRED_PROFILE_COUNTERS)
4621 rtx count_label_rtx, addr, r24;
4622 char count_label_name[16];
4624 funcdef_nos.safe_push (label_no);
4625 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4626 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode,
4627 ggc_strdup (count_label_name));
4629 addr = force_reg (Pmode, count_label_rtx);
4630 r24 = gen_rtx_REG (Pmode, 24);
4631 emit_move_insn (r24, addr);
4633 arg_bytes = GEN_INT (TARGET_64BIT ? 24 : 12);
4634 if (use_mcount_pcrel_call)
4635 call_insn = emit_call_insn (gen_call_mcount (mcount, arg_bytes,
4636 begin_label_rtx));
4637 else
4638 call_insn = emit_call_insn (gen_call (mcount, arg_bytes));
4640 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4642 else
4644 arg_bytes = GEN_INT (TARGET_64BIT ? 16 : 8);
4645 if (use_mcount_pcrel_call)
4646 call_insn = emit_call_insn (gen_call_mcount (mcount, arg_bytes,
4647 begin_label_rtx));
4648 else
4649 call_insn = emit_call_insn (gen_call (mcount, arg_bytes));
4652 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4653 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4655 /* Indicate the _mcount call cannot throw, nor will it execute a
4656 non-local goto. */
4657 make_reg_eh_region_note_nothrow_nononlocal (call_insn);
4659 /* Allocate space for fixed arguments. */
4660 if (reg_parm_stack_space > crtl->outgoing_args_size)
4661 crtl->outgoing_args_size = reg_parm_stack_space;
4664 /* Fetch the return address for the frame COUNT steps up from
4665 the current frame, after the prologue. FRAMEADDR is the
4666 frame pointer of the COUNT frame.
4668 We want to ignore any export stub remnants here. To handle this,
4669 we examine the code at the return address, and if it is an export
4670 stub, we return a memory rtx for the stub return address stored
4671 at frame-24.
4673 The value returned is used in two different ways:
4675 1. To find a function's caller.
4677 2. To change the return address for a function.
4679 This function handles most instances of case 1; however, it will
4680 fail if there are two levels of stubs to execute on the return
4681 path. The only way I believe that can happen is if the return value
4682 needs a parameter relocation, which never happens for C code.
4684 This function handles most instances of case 2; however, it will
4685 fail if we did not originally have stub code on the return path
4686 but will need stub code on the new return path. This can happen if
4687 the caller & callee are both in the main program, but the new
4688 return location is in a shared library. */
4691 pa_return_addr_rtx (int count, rtx frameaddr)
4693 rtx label;
4694 rtx rp;
4695 rtx saved_rp;
4696 rtx ins;
4698 /* The instruction stream at the return address of a PA1.X export stub is:
4700 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4701 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4702 0x00011820 | stub+16: mtsp r1,sr0
4703 0xe0400002 | stub+20: be,n 0(sr0,rp)
4705 0xe0400002 must be specified as -532676606 so that it won't be
4706 rejected as an invalid immediate operand on 64-bit hosts.
4708 The instruction stream at the return address of a PA2.0 export stub is:
4710 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4711 0xe840d002 | stub+12: bve,n (rp)
4714 HOST_WIDE_INT insns[4];
4715 int i, len;
4717 if (count != 0)
4718 return NULL_RTX;
4720 rp = get_hard_reg_initial_val (Pmode, 2);
4722 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4723 return rp;
4725 /* If there is no export stub then just use the value saved from
4726 the return pointer register. */
4728 saved_rp = gen_reg_rtx (Pmode);
4729 emit_move_insn (saved_rp, rp);
4731 /* Get pointer to the instruction stream. We have to mask out the
4732 privilege level from the two low order bits of the return address
4733 pointer here so that ins will point to the start of the first
4734 instruction that would have been executed if we returned. */
4735 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4736 label = gen_label_rtx ();
4738 if (TARGET_PA_20)
4740 insns[0] = 0x4bc23fd1;
4741 insns[1] = -398405630;
4742 len = 2;
4744 else
4746 insns[0] = 0x4bc23fd1;
4747 insns[1] = 0x004010a1;
4748 insns[2] = 0x00011820;
4749 insns[3] = -532676606;
4750 len = 4;
4753 /* Check the instruction stream at the normal return address for the
4754 export stub. If it is an export stub, than our return address is
4755 really in -24[frameaddr]. */
4757 for (i = 0; i < len; i++)
4759 rtx op0 = gen_rtx_MEM (SImode, plus_constant (Pmode, ins, i * 4));
4760 rtx op1 = GEN_INT (insns[i]);
4761 emit_cmp_and_jump_insns (op0, op1, NE, NULL, SImode, 0, label);
4764 /* Here we know that our return address points to an export
4765 stub. We don't want to return the address of the export stub,
4766 but rather the return address of the export stub. That return
4767 address is stored at -24[frameaddr]. */
4769 emit_move_insn (saved_rp,
4770 gen_rtx_MEM (Pmode,
4771 memory_address (Pmode,
4772 plus_constant (Pmode, frameaddr,
4773 -24))));
4775 emit_label (label);
4777 return saved_rp;
4780 void
4781 pa_emit_bcond_fp (rtx operands[])
4783 enum rtx_code code = GET_CODE (operands[0]);
4784 rtx operand0 = operands[1];
4785 rtx operand1 = operands[2];
4786 rtx label = operands[3];
4788 emit_insn (gen_rtx_SET (gen_rtx_REG (CCFPmode, 0),
4789 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1)));
4791 emit_jump_insn (gen_rtx_SET (pc_rtx,
4792 gen_rtx_IF_THEN_ELSE (VOIDmode,
4793 gen_rtx_fmt_ee (NE,
4794 VOIDmode,
4795 gen_rtx_REG (CCFPmode, 0),
4796 const0_rtx),
4797 gen_rtx_LABEL_REF (VOIDmode, label),
4798 pc_rtx)));
4802 /* Adjust the cost of a scheduling dependency. Return the new cost of
4803 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4805 static int
4806 pa_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
4807 unsigned int)
4809 enum attr_type attr_type;
4811 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4812 true dependencies as they are described with bypasses now. */
4813 if (pa_cpu >= PROCESSOR_8000 || dep_type == 0)
4814 return cost;
4816 if (! recog_memoized (insn))
4817 return 0;
4819 attr_type = get_attr_type (insn);
4821 switch (dep_type)
4823 case REG_DEP_ANTI:
4824 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4825 cycles later. */
4827 if (attr_type == TYPE_FPLOAD)
4829 rtx pat = PATTERN (insn);
4830 rtx dep_pat = PATTERN (dep_insn);
4831 if (GET_CODE (pat) == PARALLEL)
4833 /* This happens for the fldXs,mb patterns. */
4834 pat = XVECEXP (pat, 0, 0);
4836 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4837 /* If this happens, we have to extend this to schedule
4838 optimally. Return 0 for now. */
4839 return 0;
4841 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4843 if (! recog_memoized (dep_insn))
4844 return 0;
4845 switch (get_attr_type (dep_insn))
4847 case TYPE_FPALU:
4848 case TYPE_FPMULSGL:
4849 case TYPE_FPMULDBL:
4850 case TYPE_FPDIVSGL:
4851 case TYPE_FPDIVDBL:
4852 case TYPE_FPSQRTSGL:
4853 case TYPE_FPSQRTDBL:
4854 /* A fpload can't be issued until one cycle before a
4855 preceding arithmetic operation has finished if
4856 the target of the fpload is any of the sources
4857 (or destination) of the arithmetic operation. */
4858 return insn_default_latency (dep_insn) - 1;
4860 default:
4861 return 0;
4865 else if (attr_type == TYPE_FPALU)
4867 rtx pat = PATTERN (insn);
4868 rtx dep_pat = PATTERN (dep_insn);
4869 if (GET_CODE (pat) == PARALLEL)
4871 /* This happens for the fldXs,mb patterns. */
4872 pat = XVECEXP (pat, 0, 0);
4874 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4875 /* If this happens, we have to extend this to schedule
4876 optimally. Return 0 for now. */
4877 return 0;
4879 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4881 if (! recog_memoized (dep_insn))
4882 return 0;
4883 switch (get_attr_type (dep_insn))
4885 case TYPE_FPDIVSGL:
4886 case TYPE_FPDIVDBL:
4887 case TYPE_FPSQRTSGL:
4888 case TYPE_FPSQRTDBL:
4889 /* An ALU flop can't be issued until two cycles before a
4890 preceding divide or sqrt operation has finished if
4891 the target of the ALU flop is any of the sources
4892 (or destination) of the divide or sqrt operation. */
4893 return insn_default_latency (dep_insn) - 2;
4895 default:
4896 return 0;
4901 /* For other anti dependencies, the cost is 0. */
4902 return 0;
4904 case REG_DEP_OUTPUT:
4905 /* Output dependency; DEP_INSN writes a register that INSN writes some
4906 cycles later. */
4907 if (attr_type == TYPE_FPLOAD)
4909 rtx pat = PATTERN (insn);
4910 rtx dep_pat = PATTERN (dep_insn);
4911 if (GET_CODE (pat) == PARALLEL)
4913 /* This happens for the fldXs,mb patterns. */
4914 pat = XVECEXP (pat, 0, 0);
4916 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4917 /* If this happens, we have to extend this to schedule
4918 optimally. Return 0 for now. */
4919 return 0;
4921 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4923 if (! recog_memoized (dep_insn))
4924 return 0;
4925 switch (get_attr_type (dep_insn))
4927 case TYPE_FPALU:
4928 case TYPE_FPMULSGL:
4929 case TYPE_FPMULDBL:
4930 case TYPE_FPDIVSGL:
4931 case TYPE_FPDIVDBL:
4932 case TYPE_FPSQRTSGL:
4933 case TYPE_FPSQRTDBL:
4934 /* A fpload can't be issued until one cycle before a
4935 preceding arithmetic operation has finished if
4936 the target of the fpload is the destination of the
4937 arithmetic operation.
4939 Exception: For PA7100LC, PA7200 and PA7300, the cost
4940 is 3 cycles, unless they bundle together. We also
4941 pay the penalty if the second insn is a fpload. */
4942 return insn_default_latency (dep_insn) - 1;
4944 default:
4945 return 0;
4949 else if (attr_type == TYPE_FPALU)
4951 rtx pat = PATTERN (insn);
4952 rtx dep_pat = PATTERN (dep_insn);
4953 if (GET_CODE (pat) == PARALLEL)
4955 /* This happens for the fldXs,mb patterns. */
4956 pat = XVECEXP (pat, 0, 0);
4958 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4959 /* If this happens, we have to extend this to schedule
4960 optimally. Return 0 for now. */
4961 return 0;
4963 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4965 if (! recog_memoized (dep_insn))
4966 return 0;
4967 switch (get_attr_type (dep_insn))
4969 case TYPE_FPDIVSGL:
4970 case TYPE_FPDIVDBL:
4971 case TYPE_FPSQRTSGL:
4972 case TYPE_FPSQRTDBL:
4973 /* An ALU flop can't be issued until two cycles before a
4974 preceding divide or sqrt operation has finished if
4975 the target of the ALU flop is also the target of
4976 the divide or sqrt operation. */
4977 return insn_default_latency (dep_insn) - 2;
4979 default:
4980 return 0;
4985 /* For other output dependencies, the cost is 0. */
4986 return 0;
4988 default:
4989 gcc_unreachable ();
4993 /* Adjust scheduling priorities. We use this to try and keep addil
4994 and the next use of %r1 close together. */
4995 static int
4996 pa_adjust_priority (rtx_insn *insn, int priority)
4998 rtx set = single_set (insn);
4999 rtx src, dest;
5000 if (set)
5002 src = SET_SRC (set);
5003 dest = SET_DEST (set);
5004 if (GET_CODE (src) == LO_SUM
5005 && symbolic_operand (XEXP (src, 1), VOIDmode)
5006 && ! read_only_operand (XEXP (src, 1), VOIDmode))
5007 priority >>= 3;
5009 else if (GET_CODE (src) == MEM
5010 && GET_CODE (XEXP (src, 0)) == LO_SUM
5011 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
5012 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
5013 priority >>= 1;
5015 else if (GET_CODE (dest) == MEM
5016 && GET_CODE (XEXP (dest, 0)) == LO_SUM
5017 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
5018 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
5019 priority >>= 3;
5021 return priority;
5024 /* The 700 can only issue a single insn at a time.
5025 The 7XXX processors can issue two insns at a time.
5026 The 8000 can issue 4 insns at a time. */
5027 static int
5028 pa_issue_rate (void)
5030 switch (pa_cpu)
5032 case PROCESSOR_700: return 1;
5033 case PROCESSOR_7100: return 2;
5034 case PROCESSOR_7100LC: return 2;
5035 case PROCESSOR_7200: return 2;
5036 case PROCESSOR_7300: return 2;
5037 case PROCESSOR_8000: return 4;
5039 default:
5040 gcc_unreachable ();
5046 /* Return any length plus adjustment needed by INSN which already has
5047 its length computed as LENGTH. Return LENGTH if no adjustment is
5048 necessary.
5050 Also compute the length of an inline block move here as it is too
5051 complicated to express as a length attribute in pa.md. */
5053 pa_adjust_insn_length (rtx_insn *insn, int length)
5055 rtx pat = PATTERN (insn);
5057 /* If length is negative or undefined, provide initial length. */
5058 if ((unsigned int) length >= INT_MAX)
5060 if (GET_CODE (pat) == SEQUENCE)
5061 insn = as_a <rtx_insn *> (XVECEXP (pat, 0, 0));
5063 switch (get_attr_type (insn))
5065 case TYPE_MILLI:
5066 length = pa_attr_length_millicode_call (insn);
5067 break;
5068 case TYPE_CALL:
5069 length = pa_attr_length_call (insn, 0);
5070 break;
5071 case TYPE_SIBCALL:
5072 length = pa_attr_length_call (insn, 1);
5073 break;
5074 case TYPE_DYNCALL:
5075 length = pa_attr_length_indirect_call (insn);
5076 break;
5077 case TYPE_SH_FUNC_ADRS:
5078 length = pa_attr_length_millicode_call (insn) + 20;
5079 break;
5080 default:
5081 gcc_unreachable ();
5085 /* Block move pattern. */
5086 if (NONJUMP_INSN_P (insn)
5087 && GET_CODE (pat) == PARALLEL
5088 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
5089 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
5090 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
5091 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
5092 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
5093 length += compute_movmem_length (insn) - 4;
5094 /* Block clear pattern. */
5095 else if (NONJUMP_INSN_P (insn)
5096 && GET_CODE (pat) == PARALLEL
5097 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
5098 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
5099 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
5100 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
5101 length += compute_clrmem_length (insn) - 4;
5102 /* Conditional branch with an unfilled delay slot. */
5103 else if (JUMP_P (insn) && ! simplejump_p (insn))
5105 /* Adjust a short backwards conditional with an unfilled delay slot. */
5106 if (GET_CODE (pat) == SET
5107 && length == 4
5108 && JUMP_LABEL (insn) != NULL_RTX
5109 && ! forward_branch_p (insn))
5110 length += 4;
5111 else if (GET_CODE (pat) == PARALLEL
5112 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
5113 && length == 4)
5114 length += 4;
5115 /* Adjust dbra insn with short backwards conditional branch with
5116 unfilled delay slot -- only for case where counter is in a
5117 general register register. */
5118 else if (GET_CODE (pat) == PARALLEL
5119 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
5120 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
5121 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
5122 && length == 4
5123 && ! forward_branch_p (insn))
5124 length += 4;
5126 return length;
5129 /* Implement the TARGET_PRINT_OPERAND_PUNCT_VALID_P hook. */
5131 static bool
5132 pa_print_operand_punct_valid_p (unsigned char code)
5134 if (code == '@'
5135 || code == '#'
5136 || code == '*'
5137 || code == '^')
5138 return true;
5140 return false;
5143 /* Print operand X (an rtx) in assembler syntax to file FILE.
5144 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
5145 For `%' followed by punctuation, CODE is the punctuation and X is null. */
5147 void
5148 pa_print_operand (FILE *file, rtx x, int code)
5150 switch (code)
5152 case '#':
5153 /* Output a 'nop' if there's nothing for the delay slot. */
5154 if (dbr_sequence_length () == 0)
5155 fputs ("\n\tnop", file);
5156 return;
5157 case '*':
5158 /* Output a nullification completer if there's nothing for the */
5159 /* delay slot or nullification is requested. */
5160 if (dbr_sequence_length () == 0 ||
5161 (final_sequence &&
5162 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
5163 fputs (",n", file);
5164 return;
5165 case 'R':
5166 /* Print out the second register name of a register pair.
5167 I.e., R (6) => 7. */
5168 fputs (reg_names[REGNO (x) + 1], file);
5169 return;
5170 case 'r':
5171 /* A register or zero. */
5172 if (x == const0_rtx
5173 || (x == CONST0_RTX (DFmode))
5174 || (x == CONST0_RTX (SFmode)))
5176 fputs ("%r0", file);
5177 return;
5179 else
5180 break;
5181 case 'f':
5182 /* A register or zero (floating point). */
5183 if (x == const0_rtx
5184 || (x == CONST0_RTX (DFmode))
5185 || (x == CONST0_RTX (SFmode)))
5187 fputs ("%fr0", file);
5188 return;
5190 else
5191 break;
5192 case 'A':
5194 rtx xoperands[2];
5196 xoperands[0] = XEXP (XEXP (x, 0), 0);
5197 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
5198 pa_output_global_address (file, xoperands[1], 0);
5199 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
5200 return;
5203 case 'C': /* Plain (C)ondition */
5204 case 'X':
5205 switch (GET_CODE (x))
5207 case EQ:
5208 fputs ("=", file); break;
5209 case NE:
5210 fputs ("<>", file); break;
5211 case GT:
5212 fputs (">", file); break;
5213 case GE:
5214 fputs (">=", file); break;
5215 case GEU:
5216 fputs (">>=", file); break;
5217 case GTU:
5218 fputs (">>", file); break;
5219 case LT:
5220 fputs ("<", file); break;
5221 case LE:
5222 fputs ("<=", file); break;
5223 case LEU:
5224 fputs ("<<=", file); break;
5225 case LTU:
5226 fputs ("<<", file); break;
5227 default:
5228 gcc_unreachable ();
5230 return;
5231 case 'N': /* Condition, (N)egated */
5232 switch (GET_CODE (x))
5234 case EQ:
5235 fputs ("<>", file); break;
5236 case NE:
5237 fputs ("=", file); break;
5238 case GT:
5239 fputs ("<=", file); break;
5240 case GE:
5241 fputs ("<", file); break;
5242 case GEU:
5243 fputs ("<<", file); break;
5244 case GTU:
5245 fputs ("<<=", file); break;
5246 case LT:
5247 fputs (">=", file); break;
5248 case LE:
5249 fputs (">", file); break;
5250 case LEU:
5251 fputs (">>", file); break;
5252 case LTU:
5253 fputs (">>=", file); break;
5254 default:
5255 gcc_unreachable ();
5257 return;
5258 /* For floating point comparisons. Note that the output
5259 predicates are the complement of the desired mode. The
5260 conditions for GT, GE, LT, LE and LTGT cause an invalid
5261 operation exception if the result is unordered and this
5262 exception is enabled in the floating-point status register. */
5263 case 'Y':
5264 switch (GET_CODE (x))
5266 case EQ:
5267 fputs ("!=", file); break;
5268 case NE:
5269 fputs ("=", file); break;
5270 case GT:
5271 fputs ("!>", file); break;
5272 case GE:
5273 fputs ("!>=", file); break;
5274 case LT:
5275 fputs ("!<", file); break;
5276 case LE:
5277 fputs ("!<=", file); break;
5278 case LTGT:
5279 fputs ("!<>", file); break;
5280 case UNLE:
5281 fputs ("!?<=", file); break;
5282 case UNLT:
5283 fputs ("!?<", file); break;
5284 case UNGE:
5285 fputs ("!?>=", file); break;
5286 case UNGT:
5287 fputs ("!?>", file); break;
5288 case UNEQ:
5289 fputs ("!?=", file); break;
5290 case UNORDERED:
5291 fputs ("!?", file); break;
5292 case ORDERED:
5293 fputs ("?", file); break;
5294 default:
5295 gcc_unreachable ();
5297 return;
5298 case 'S': /* Condition, operands are (S)wapped. */
5299 switch (GET_CODE (x))
5301 case EQ:
5302 fputs ("=", file); break;
5303 case NE:
5304 fputs ("<>", file); break;
5305 case GT:
5306 fputs ("<", file); break;
5307 case GE:
5308 fputs ("<=", file); break;
5309 case GEU:
5310 fputs ("<<=", file); break;
5311 case GTU:
5312 fputs ("<<", file); break;
5313 case LT:
5314 fputs (">", file); break;
5315 case LE:
5316 fputs (">=", file); break;
5317 case LEU:
5318 fputs (">>=", file); break;
5319 case LTU:
5320 fputs (">>", file); break;
5321 default:
5322 gcc_unreachable ();
5324 return;
5325 case 'B': /* Condition, (B)oth swapped and negate. */
5326 switch (GET_CODE (x))
5328 case EQ:
5329 fputs ("<>", file); break;
5330 case NE:
5331 fputs ("=", file); break;
5332 case GT:
5333 fputs (">=", file); break;
5334 case GE:
5335 fputs (">", file); break;
5336 case GEU:
5337 fputs (">>", file); break;
5338 case GTU:
5339 fputs (">>=", file); break;
5340 case LT:
5341 fputs ("<=", file); break;
5342 case LE:
5343 fputs ("<", file); break;
5344 case LEU:
5345 fputs ("<<", file); break;
5346 case LTU:
5347 fputs ("<<=", file); break;
5348 default:
5349 gcc_unreachable ();
5351 return;
5352 case 'k':
5353 gcc_assert (GET_CODE (x) == CONST_INT);
5354 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
5355 return;
5356 case 'Q':
5357 gcc_assert (GET_CODE (x) == CONST_INT);
5358 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5359 return;
5360 case 'L':
5361 gcc_assert (GET_CODE (x) == CONST_INT);
5362 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5363 return;
5364 case 'o':
5365 gcc_assert (GET_CODE (x) == CONST_INT
5366 && (INTVAL (x) == 1 || INTVAL (x) == 2 || INTVAL (x) == 3));
5367 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5368 return;
5369 case 'O':
5370 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
5371 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5372 return;
5373 case 'p':
5374 gcc_assert (GET_CODE (x) == CONST_INT);
5375 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5376 return;
5377 case 'P':
5378 gcc_assert (GET_CODE (x) == CONST_INT);
5379 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5380 return;
5381 case 'I':
5382 if (GET_CODE (x) == CONST_INT)
5383 fputs ("i", file);
5384 return;
5385 case 'M':
5386 case 'F':
5387 switch (GET_CODE (XEXP (x, 0)))
5389 case PRE_DEC:
5390 case PRE_INC:
5391 if (ASSEMBLER_DIALECT == 0)
5392 fputs ("s,mb", file);
5393 else
5394 fputs (",mb", file);
5395 break;
5396 case POST_DEC:
5397 case POST_INC:
5398 if (ASSEMBLER_DIALECT == 0)
5399 fputs ("s,ma", file);
5400 else
5401 fputs (",ma", file);
5402 break;
5403 case PLUS:
5404 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5405 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5407 if (ASSEMBLER_DIALECT == 0)
5408 fputs ("x", file);
5410 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5411 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5413 if (ASSEMBLER_DIALECT == 0)
5414 fputs ("x,s", file);
5415 else
5416 fputs (",s", file);
5418 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5419 fputs ("s", file);
5420 break;
5421 default:
5422 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5423 fputs ("s", file);
5424 break;
5426 return;
5427 case 'G':
5428 pa_output_global_address (file, x, 0);
5429 return;
5430 case 'H':
5431 pa_output_global_address (file, x, 1);
5432 return;
5433 case 0: /* Don't do anything special */
5434 break;
5435 case 'Z':
5437 unsigned op[3];
5438 compute_zdepwi_operands (INTVAL (x), op);
5439 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5440 return;
5442 case 'z':
5444 unsigned op[3];
5445 compute_zdepdi_operands (INTVAL (x), op);
5446 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5447 return;
5449 case 'c':
5450 /* We can get here from a .vtable_inherit due to our
5451 CONSTANT_ADDRESS_P rejecting perfectly good constant
5452 addresses. */
5453 break;
5454 default:
5455 gcc_unreachable ();
5457 if (GET_CODE (x) == REG)
5459 fputs (reg_names [REGNO (x)], file);
5460 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5462 fputs ("R", file);
5463 return;
5465 if (FP_REG_P (x)
5466 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5467 && (REGNO (x) & 1) == 0)
5468 fputs ("L", file);
5470 else if (GET_CODE (x) == MEM)
5472 int size = GET_MODE_SIZE (GET_MODE (x));
5473 rtx base = NULL_RTX;
5474 switch (GET_CODE (XEXP (x, 0)))
5476 case PRE_DEC:
5477 case POST_DEC:
5478 base = XEXP (XEXP (x, 0), 0);
5479 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5480 break;
5481 case PRE_INC:
5482 case POST_INC:
5483 base = XEXP (XEXP (x, 0), 0);
5484 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5485 break;
5486 case PLUS:
5487 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5488 fprintf (file, "%s(%s)",
5489 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5490 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5491 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5492 fprintf (file, "%s(%s)",
5493 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5494 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5495 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5496 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5498 /* Because the REG_POINTER flag can get lost during reload,
5499 pa_legitimate_address_p canonicalizes the order of the
5500 index and base registers in the combined move patterns. */
5501 rtx base = XEXP (XEXP (x, 0), 1);
5502 rtx index = XEXP (XEXP (x, 0), 0);
5504 fprintf (file, "%s(%s)",
5505 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5507 else
5508 output_address (GET_MODE (x), XEXP (x, 0));
5509 break;
5510 default:
5511 output_address (GET_MODE (x), XEXP (x, 0));
5512 break;
5515 else
5516 output_addr_const (file, x);
5519 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5521 void
5522 pa_output_global_address (FILE *file, rtx x, int round_constant)
5525 /* Imagine (high (const (plus ...))). */
5526 if (GET_CODE (x) == HIGH)
5527 x = XEXP (x, 0);
5529 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5530 output_addr_const (file, x);
5531 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5533 output_addr_const (file, x);
5534 fputs ("-$global$", file);
5536 else if (GET_CODE (x) == CONST)
5538 const char *sep = "";
5539 int offset = 0; /* assembler wants -$global$ at end */
5540 rtx base = NULL_RTX;
5542 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5544 case LABEL_REF:
5545 case SYMBOL_REF:
5546 base = XEXP (XEXP (x, 0), 0);
5547 output_addr_const (file, base);
5548 break;
5549 case CONST_INT:
5550 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5551 break;
5552 default:
5553 gcc_unreachable ();
5556 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5558 case LABEL_REF:
5559 case SYMBOL_REF:
5560 base = XEXP (XEXP (x, 0), 1);
5561 output_addr_const (file, base);
5562 break;
5563 case CONST_INT:
5564 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5565 break;
5566 default:
5567 gcc_unreachable ();
5570 /* How bogus. The compiler is apparently responsible for
5571 rounding the constant if it uses an LR field selector.
5573 The linker and/or assembler seem a better place since
5574 they have to do this kind of thing already.
5576 If we fail to do this, HP's optimizing linker may eliminate
5577 an addil, but not update the ldw/stw/ldo instruction that
5578 uses the result of the addil. */
5579 if (round_constant)
5580 offset = ((offset + 0x1000) & ~0x1fff);
5582 switch (GET_CODE (XEXP (x, 0)))
5584 case PLUS:
5585 if (offset < 0)
5587 offset = -offset;
5588 sep = "-";
5590 else
5591 sep = "+";
5592 break;
5594 case MINUS:
5595 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5596 sep = "-";
5597 break;
5599 default:
5600 gcc_unreachable ();
5603 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5604 fputs ("-$global$", file);
5605 if (offset)
5606 fprintf (file, "%s%d", sep, offset);
5608 else
5609 output_addr_const (file, x);
5612 /* Output boilerplate text to appear at the beginning of the file.
5613 There are several possible versions. */
5614 #define aputs(x) fputs(x, asm_out_file)
5615 static inline void
5616 pa_file_start_level (void)
5618 if (TARGET_64BIT)
5619 aputs ("\t.LEVEL 2.0w\n");
5620 else if (TARGET_PA_20)
5621 aputs ("\t.LEVEL 2.0\n");
5622 else if (TARGET_PA_11)
5623 aputs ("\t.LEVEL 1.1\n");
5624 else
5625 aputs ("\t.LEVEL 1.0\n");
5628 static inline void
5629 pa_file_start_space (int sortspace)
5631 aputs ("\t.SPACE $PRIVATE$");
5632 if (sortspace)
5633 aputs (",SORT=16");
5634 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31");
5635 if (flag_tm)
5636 aputs ("\n\t.SUBSPA $TM_CLONE_TABLE$,QUAD=1,ALIGN=8,ACCESS=31");
5637 aputs ("\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5638 "\n\t.SPACE $TEXT$");
5639 if (sortspace)
5640 aputs (",SORT=8");
5641 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5642 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5645 static inline void
5646 pa_file_start_file (int want_version)
5648 if (write_symbols != NO_DEBUG)
5650 output_file_directive (asm_out_file, main_input_filename);
5651 if (want_version)
5652 aputs ("\t.version\t\"01.01\"\n");
5656 static inline void
5657 pa_file_start_mcount (const char *aswhat)
5659 if (profile_flag)
5660 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5663 static void
5664 pa_elf_file_start (void)
5666 pa_file_start_level ();
5667 pa_file_start_mcount ("ENTRY");
5668 pa_file_start_file (0);
5671 static void
5672 pa_som_file_start (void)
5674 pa_file_start_level ();
5675 pa_file_start_space (0);
5676 aputs ("\t.IMPORT $global$,DATA\n"
5677 "\t.IMPORT $$dyncall,MILLICODE\n");
5678 pa_file_start_mcount ("CODE");
5679 pa_file_start_file (0);
5682 static void
5683 pa_linux_file_start (void)
5685 pa_file_start_file (1);
5686 pa_file_start_level ();
5687 pa_file_start_mcount ("CODE");
5690 static void
5691 pa_hpux64_gas_file_start (void)
5693 pa_file_start_level ();
5694 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5695 if (profile_flag)
5696 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5697 #endif
5698 pa_file_start_file (1);
5701 static void
5702 pa_hpux64_hpas_file_start (void)
5704 pa_file_start_level ();
5705 pa_file_start_space (1);
5706 pa_file_start_mcount ("CODE");
5707 pa_file_start_file (0);
5709 #undef aputs
5711 /* Search the deferred plabel list for SYMBOL and return its internal
5712 label. If an entry for SYMBOL is not found, a new entry is created. */
5715 pa_get_deferred_plabel (rtx symbol)
5717 const char *fname = XSTR (symbol, 0);
5718 size_t i;
5720 /* See if we have already put this function on the list of deferred
5721 plabels. This list is generally small, so a liner search is not
5722 too ugly. If it proves too slow replace it with something faster. */
5723 for (i = 0; i < n_deferred_plabels; i++)
5724 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5725 break;
5727 /* If the deferred plabel list is empty, or this entry was not found
5728 on the list, create a new entry on the list. */
5729 if (deferred_plabels == NULL || i == n_deferred_plabels)
5731 tree id;
5733 if (deferred_plabels == 0)
5734 deferred_plabels = ggc_alloc<deferred_plabel> ();
5735 else
5736 deferred_plabels = GGC_RESIZEVEC (struct deferred_plabel,
5737 deferred_plabels,
5738 n_deferred_plabels + 1);
5740 i = n_deferred_plabels++;
5741 deferred_plabels[i].internal_label = gen_label_rtx ();
5742 deferred_plabels[i].symbol = symbol;
5744 /* Gross. We have just implicitly taken the address of this
5745 function. Mark it in the same manner as assemble_name. */
5746 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5747 if (id)
5748 mark_referenced (id);
5751 return deferred_plabels[i].internal_label;
5754 static void
5755 output_deferred_plabels (void)
5757 size_t i;
5759 /* If we have some deferred plabels, then we need to switch into the
5760 data or readonly data section, and align it to a 4 byte boundary
5761 before outputting the deferred plabels. */
5762 if (n_deferred_plabels)
5764 switch_to_section (flag_pic ? data_section : readonly_data_section);
5765 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5768 /* Now output the deferred plabels. */
5769 for (i = 0; i < n_deferred_plabels; i++)
5771 targetm.asm_out.internal_label (asm_out_file, "L",
5772 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5773 assemble_integer (deferred_plabels[i].symbol,
5774 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5778 /* Initialize optabs to point to emulation routines. */
5780 static void
5781 pa_init_libfuncs (void)
5783 if (HPUX_LONG_DOUBLE_LIBRARY)
5785 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5786 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5787 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5788 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5789 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5790 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5791 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5792 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5793 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5795 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5796 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5797 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5798 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5799 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5800 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5801 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5803 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5804 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5805 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5806 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5808 set_conv_libfunc (sfix_optab, SImode, TFmode,
5809 TARGET_64BIT ? "__U_Qfcnvfxt_quad_to_sgl"
5810 : "_U_Qfcnvfxt_quad_to_sgl");
5811 set_conv_libfunc (sfix_optab, DImode, TFmode,
5812 "_U_Qfcnvfxt_quad_to_dbl");
5813 set_conv_libfunc (ufix_optab, SImode, TFmode,
5814 "_U_Qfcnvfxt_quad_to_usgl");
5815 set_conv_libfunc (ufix_optab, DImode, TFmode,
5816 "_U_Qfcnvfxt_quad_to_udbl");
5818 set_conv_libfunc (sfloat_optab, TFmode, SImode,
5819 "_U_Qfcnvxf_sgl_to_quad");
5820 set_conv_libfunc (sfloat_optab, TFmode, DImode,
5821 "_U_Qfcnvxf_dbl_to_quad");
5822 set_conv_libfunc (ufloat_optab, TFmode, SImode,
5823 "_U_Qfcnvxf_usgl_to_quad");
5824 set_conv_libfunc (ufloat_optab, TFmode, DImode,
5825 "_U_Qfcnvxf_udbl_to_quad");
5828 if (TARGET_SYNC_LIBCALL)
5829 init_sync_libfuncs (8);
5832 /* HP's millicode routines mean something special to the assembler.
5833 Keep track of which ones we have used. */
5835 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5836 static void import_milli (enum millicodes);
5837 static char imported[(int) end1000];
5838 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5839 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5840 #define MILLI_START 10
5842 static void
5843 import_milli (enum millicodes code)
5845 char str[sizeof (import_string)];
5847 if (!imported[(int) code])
5849 imported[(int) code] = 1;
5850 strcpy (str, import_string);
5851 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5852 output_asm_insn (str, 0);
5856 /* The register constraints have put the operands and return value in
5857 the proper registers. */
5859 const char *
5860 pa_output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx_insn *insn)
5862 import_milli (mulI);
5863 return pa_output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5866 /* Emit the rtl for doing a division by a constant. */
5868 /* Do magic division millicodes exist for this value? */
5869 const int pa_magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5871 /* We'll use an array to keep track of the magic millicodes and
5872 whether or not we've used them already. [n][0] is signed, [n][1] is
5873 unsigned. */
5875 static int div_milli[16][2];
5878 pa_emit_hpdiv_const (rtx *operands, int unsignedp)
5880 if (GET_CODE (operands[2]) == CONST_INT
5881 && INTVAL (operands[2]) > 0
5882 && INTVAL (operands[2]) < 16
5883 && pa_magic_milli[INTVAL (operands[2])])
5885 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5887 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5888 emit
5889 (gen_rtx_PARALLEL
5890 (VOIDmode,
5891 gen_rtvec (6, gen_rtx_SET (gen_rtx_REG (SImode, 29),
5892 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5893 SImode,
5894 gen_rtx_REG (SImode, 26),
5895 operands[2])),
5896 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5897 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5898 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5899 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5900 gen_rtx_CLOBBER (VOIDmode, ret))));
5901 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5902 return 1;
5904 return 0;
5907 const char *
5908 pa_output_div_insn (rtx *operands, int unsignedp, rtx_insn *insn)
5910 int divisor;
5912 /* If the divisor is a constant, try to use one of the special
5913 opcodes .*/
5914 if (GET_CODE (operands[0]) == CONST_INT)
5916 static char buf[100];
5917 divisor = INTVAL (operands[0]);
5918 if (!div_milli[divisor][unsignedp])
5920 div_milli[divisor][unsignedp] = 1;
5921 if (unsignedp)
5922 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5923 else
5924 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5926 if (unsignedp)
5928 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5929 INTVAL (operands[0]));
5930 return pa_output_millicode_call (insn,
5931 gen_rtx_SYMBOL_REF (SImode, buf));
5933 else
5935 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5936 INTVAL (operands[0]));
5937 return pa_output_millicode_call (insn,
5938 gen_rtx_SYMBOL_REF (SImode, buf));
5941 /* Divisor isn't a special constant. */
5942 else
5944 if (unsignedp)
5946 import_milli (divU);
5947 return pa_output_millicode_call (insn,
5948 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5950 else
5952 import_milli (divI);
5953 return pa_output_millicode_call (insn,
5954 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5959 /* Output a $$rem millicode to do mod. */
5961 const char *
5962 pa_output_mod_insn (int unsignedp, rtx_insn *insn)
5964 if (unsignedp)
5966 import_milli (remU);
5967 return pa_output_millicode_call (insn,
5968 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5970 else
5972 import_milli (remI);
5973 return pa_output_millicode_call (insn,
5974 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5978 void
5979 pa_output_arg_descriptor (rtx_insn *call_insn)
5981 const char *arg_regs[4];
5982 machine_mode arg_mode;
5983 rtx link;
5984 int i, output_flag = 0;
5985 int regno;
5987 /* We neither need nor want argument location descriptors for the
5988 64bit runtime environment or the ELF32 environment. */
5989 if (TARGET_64BIT || TARGET_ELF32)
5990 return;
5992 for (i = 0; i < 4; i++)
5993 arg_regs[i] = 0;
5995 /* Specify explicitly that no argument relocations should take place
5996 if using the portable runtime calling conventions. */
5997 if (TARGET_PORTABLE_RUNTIME)
5999 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
6000 asm_out_file);
6001 return;
6004 gcc_assert (CALL_P (call_insn));
6005 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
6006 link; link = XEXP (link, 1))
6008 rtx use = XEXP (link, 0);
6010 if (! (GET_CODE (use) == USE
6011 && GET_CODE (XEXP (use, 0)) == REG
6012 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
6013 continue;
6015 arg_mode = GET_MODE (XEXP (use, 0));
6016 regno = REGNO (XEXP (use, 0));
6017 if (regno >= 23 && regno <= 26)
6019 arg_regs[26 - regno] = "GR";
6020 if (arg_mode == DImode)
6021 arg_regs[25 - regno] = "GR";
6023 else if (regno >= 32 && regno <= 39)
6025 if (arg_mode == SFmode)
6026 arg_regs[(regno - 32) / 2] = "FR";
6027 else
6029 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
6030 arg_regs[(regno - 34) / 2] = "FR";
6031 arg_regs[(regno - 34) / 2 + 1] = "FU";
6032 #else
6033 arg_regs[(regno - 34) / 2] = "FU";
6034 arg_regs[(regno - 34) / 2 + 1] = "FR";
6035 #endif
6039 fputs ("\t.CALL ", asm_out_file);
6040 for (i = 0; i < 4; i++)
6042 if (arg_regs[i])
6044 if (output_flag++)
6045 fputc (',', asm_out_file);
6046 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
6049 fputc ('\n', asm_out_file);
6052 /* Inform reload about cases where moving X with a mode MODE to or from
6053 a register in RCLASS requires an extra scratch or immediate register.
6054 Return the class needed for the immediate register. */
6056 static reg_class_t
6057 pa_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
6058 machine_mode mode, secondary_reload_info *sri)
6060 int regno;
6061 enum reg_class rclass = (enum reg_class) rclass_i;
6063 /* Handle the easy stuff first. */
6064 if (rclass == R1_REGS)
6065 return NO_REGS;
6067 if (REG_P (x))
6069 regno = REGNO (x);
6070 if (rclass == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
6071 return NO_REGS;
6073 else
6074 regno = -1;
6076 /* If we have something like (mem (mem (...)), we can safely assume the
6077 inner MEM will end up in a general register after reloading, so there's
6078 no need for a secondary reload. */
6079 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
6080 return NO_REGS;
6082 /* Trying to load a constant into a FP register during PIC code
6083 generation requires %r1 as a scratch register. For float modes,
6084 the only legitimate constant is CONST0_RTX. However, there are
6085 a few patterns that accept constant double operands. */
6086 if (flag_pic
6087 && FP_REG_CLASS_P (rclass)
6088 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
6090 switch (mode)
6092 case E_SImode:
6093 sri->icode = CODE_FOR_reload_insi_r1;
6094 break;
6096 case E_DImode:
6097 sri->icode = CODE_FOR_reload_indi_r1;
6098 break;
6100 case E_SFmode:
6101 sri->icode = CODE_FOR_reload_insf_r1;
6102 break;
6104 case E_DFmode:
6105 sri->icode = CODE_FOR_reload_indf_r1;
6106 break;
6108 default:
6109 gcc_unreachable ();
6111 return NO_REGS;
6114 /* Secondary reloads of symbolic expressions require %r1 as a scratch
6115 register when we're generating PIC code or when the operand isn't
6116 readonly. */
6117 if (pa_symbolic_expression_p (x))
6119 if (GET_CODE (x) == HIGH)
6120 x = XEXP (x, 0);
6122 if (flag_pic || !read_only_operand (x, VOIDmode))
6124 switch (mode)
6126 case E_SImode:
6127 sri->icode = CODE_FOR_reload_insi_r1;
6128 break;
6130 case E_DImode:
6131 sri->icode = CODE_FOR_reload_indi_r1;
6132 break;
6134 default:
6135 gcc_unreachable ();
6137 return NO_REGS;
6141 /* Profiling showed the PA port spends about 1.3% of its compilation
6142 time in true_regnum from calls inside pa_secondary_reload_class. */
6143 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
6144 regno = true_regnum (x);
6146 /* Handle reloads for floating point loads and stores. */
6147 if ((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
6148 && FP_REG_CLASS_P (rclass))
6150 if (MEM_P (x))
6152 x = XEXP (x, 0);
6154 /* We don't need a secondary reload for indexed memory addresses.
6156 When INT14_OK_STRICT is true, it might appear that we could
6157 directly allow register indirect memory addresses. However,
6158 this doesn't work because we don't support SUBREGs in
6159 floating-point register copies and reload doesn't tell us
6160 when it's going to use a SUBREG. */
6161 if (IS_INDEX_ADDR_P (x))
6162 return NO_REGS;
6165 /* Request a secondary reload with a general scratch register
6166 for everything else. ??? Could symbolic operands be handled
6167 directly when generating non-pic PA 2.0 code? */
6168 sri->icode = (in_p
6169 ? direct_optab_handler (reload_in_optab, mode)
6170 : direct_optab_handler (reload_out_optab, mode));
6171 return NO_REGS;
6174 /* A SAR<->FP register copy requires an intermediate general register
6175 and secondary memory. We need a secondary reload with a general
6176 scratch register for spills. */
6177 if (rclass == SHIFT_REGS)
6179 /* Handle spill. */
6180 if (regno >= FIRST_PSEUDO_REGISTER || regno < 0)
6182 sri->icode = (in_p
6183 ? direct_optab_handler (reload_in_optab, mode)
6184 : direct_optab_handler (reload_out_optab, mode));
6185 return NO_REGS;
6188 /* Handle FP copy. */
6189 if (FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))
6190 return GENERAL_REGS;
6193 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
6194 && REGNO_REG_CLASS (regno) == SHIFT_REGS
6195 && FP_REG_CLASS_P (rclass))
6196 return GENERAL_REGS;
6198 return NO_REGS;
6201 /* Implement TARGET_SECONDARY_MEMORY_NEEDED. */
6203 static bool
6204 pa_secondary_memory_needed (machine_mode mode ATTRIBUTE_UNUSED,
6205 reg_class_t class1 ATTRIBUTE_UNUSED,
6206 reg_class_t class2 ATTRIBUTE_UNUSED)
6208 #ifdef PA_SECONDARY_MEMORY_NEEDED
6209 return PA_SECONDARY_MEMORY_NEEDED (mode, class1, class2);
6210 #else
6211 return false;
6212 #endif
6215 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
6216 is only marked as live on entry by df-scan when it is a fixed
6217 register. It isn't a fixed register in the 64-bit runtime,
6218 so we need to mark it here. */
6220 static void
6221 pa_extra_live_on_entry (bitmap regs)
6223 if (TARGET_64BIT)
6224 bitmap_set_bit (regs, ARG_POINTER_REGNUM);
6227 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
6228 to prevent it from being deleted. */
6231 pa_eh_return_handler_rtx (void)
6233 rtx tmp;
6235 tmp = gen_rtx_PLUS (word_mode, hard_frame_pointer_rtx,
6236 TARGET_64BIT ? GEN_INT (-16) : GEN_INT (-20));
6237 tmp = gen_rtx_MEM (word_mode, tmp);
6238 tmp->volatil = 1;
6239 return tmp;
6242 /* In the 32-bit runtime, arguments larger than eight bytes are passed
6243 by invisible reference. As a GCC extension, we also pass anything
6244 with a zero or variable size by reference.
6246 The 64-bit runtime does not describe passing any types by invisible
6247 reference. The internals of GCC can't currently handle passing
6248 empty structures, and zero or variable length arrays when they are
6249 not passed entirely on the stack or by reference. Thus, as a GCC
6250 extension, we pass these types by reference. The HP compiler doesn't
6251 support these types, so hopefully there shouldn't be any compatibility
6252 issues. This may have to be revisited when HP releases a C99 compiler
6253 or updates the ABI. */
6255 static bool
6256 pa_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
6257 machine_mode mode, const_tree type,
6258 bool named ATTRIBUTE_UNUSED)
6260 HOST_WIDE_INT size;
6262 if (type)
6263 size = int_size_in_bytes (type);
6264 else
6265 size = GET_MODE_SIZE (mode);
6267 if (TARGET_64BIT)
6268 return size <= 0;
6269 else
6270 return size <= 0 || size > 8;
6273 /* Implement TARGET_FUNCTION_ARG_PADDING. */
6275 static pad_direction
6276 pa_function_arg_padding (machine_mode mode, const_tree type)
6278 if (mode == BLKmode
6279 || (TARGET_64BIT
6280 && type
6281 && (AGGREGATE_TYPE_P (type)
6282 || TREE_CODE (type) == COMPLEX_TYPE
6283 || TREE_CODE (type) == VECTOR_TYPE)))
6285 /* Return PAD_NONE if justification is not required. */
6286 if (type
6287 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
6288 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
6289 return PAD_NONE;
6291 /* The directions set here are ignored when a BLKmode argument larger
6292 than a word is placed in a register. Different code is used for
6293 the stack and registers. This makes it difficult to have a
6294 consistent data representation for both the stack and registers.
6295 For both runtimes, the justification and padding for arguments on
6296 the stack and in registers should be identical. */
6297 if (TARGET_64BIT)
6298 /* The 64-bit runtime specifies left justification for aggregates. */
6299 return PAD_UPWARD;
6300 else
6301 /* The 32-bit runtime architecture specifies right justification.
6302 When the argument is passed on the stack, the argument is padded
6303 with garbage on the left. The HP compiler pads with zeros. */
6304 return PAD_DOWNWARD;
6307 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
6308 return PAD_DOWNWARD;
6309 else
6310 return PAD_NONE;
6314 /* Do what is necessary for `va_start'. We look at the current function
6315 to determine if stdargs or varargs is used and fill in an initial
6316 va_list. A pointer to this constructor is returned. */
6318 static rtx
6319 hppa_builtin_saveregs (void)
6321 rtx offset, dest;
6322 tree fntype = TREE_TYPE (current_function_decl);
6323 int argadj = ((!stdarg_p (fntype))
6324 ? UNITS_PER_WORD : 0);
6326 if (argadj)
6327 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
6328 else
6329 offset = crtl->args.arg_offset_rtx;
6331 if (TARGET_64BIT)
6333 int i, off;
6335 /* Adjust for varargs/stdarg differences. */
6336 if (argadj)
6337 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, -argadj);
6338 else
6339 offset = crtl->args.arg_offset_rtx;
6341 /* We need to save %r26 .. %r19 inclusive starting at offset -64
6342 from the incoming arg pointer and growing to larger addresses. */
6343 for (i = 26, off = -64; i >= 19; i--, off += 8)
6344 emit_move_insn (gen_rtx_MEM (word_mode,
6345 plus_constant (Pmode,
6346 arg_pointer_rtx, off)),
6347 gen_rtx_REG (word_mode, i));
6349 /* The incoming args pointer points just beyond the flushback area;
6350 normally this is not a serious concern. However, when we are doing
6351 varargs/stdargs we want to make the arg pointer point to the start
6352 of the incoming argument area. */
6353 emit_move_insn (virtual_incoming_args_rtx,
6354 plus_constant (Pmode, arg_pointer_rtx, -64));
6356 /* Now return a pointer to the first anonymous argument. */
6357 return copy_to_reg (expand_binop (Pmode, add_optab,
6358 virtual_incoming_args_rtx,
6359 offset, 0, 0, OPTAB_LIB_WIDEN));
6362 /* Store general registers on the stack. */
6363 dest = gen_rtx_MEM (BLKmode,
6364 plus_constant (Pmode, crtl->args.internal_arg_pointer,
6365 -16));
6366 set_mem_alias_set (dest, get_varargs_alias_set ());
6367 set_mem_align (dest, BITS_PER_WORD);
6368 move_block_from_reg (23, dest, 4);
6370 /* move_block_from_reg will emit code to store the argument registers
6371 individually as scalar stores.
6373 However, other insns may later load from the same addresses for
6374 a structure load (passing a struct to a varargs routine).
6376 The alias code assumes that such aliasing can never happen, so we
6377 have to keep memory referencing insns from moving up beyond the
6378 last argument register store. So we emit a blockage insn here. */
6379 emit_insn (gen_blockage ());
6381 return copy_to_reg (expand_binop (Pmode, add_optab,
6382 crtl->args.internal_arg_pointer,
6383 offset, 0, 0, OPTAB_LIB_WIDEN));
6386 static void
6387 hppa_va_start (tree valist, rtx nextarg)
6389 nextarg = expand_builtin_saveregs ();
6390 std_expand_builtin_va_start (valist, nextarg);
6393 static tree
6394 hppa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
6395 gimple_seq *post_p)
6397 if (TARGET_64BIT)
6399 /* Args grow upward. We can use the generic routines. */
6400 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6402 else /* !TARGET_64BIT */
6404 tree ptr = build_pointer_type (type);
6405 tree valist_type;
6406 tree t, u;
6407 unsigned int size, ofs;
6408 bool indirect;
6410 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6411 if (indirect)
6413 type = ptr;
6414 ptr = build_pointer_type (type);
6416 size = int_size_in_bytes (type);
6417 valist_type = TREE_TYPE (valist);
6419 /* Args grow down. Not handled by generic routines. */
6421 u = fold_convert (sizetype, size_in_bytes (type));
6422 u = fold_build1 (NEGATE_EXPR, sizetype, u);
6423 t = fold_build_pointer_plus (valist, u);
6425 /* Align to 4 or 8 byte boundary depending on argument size. */
6427 u = build_int_cst (TREE_TYPE (t), (HOST_WIDE_INT)(size > 4 ? -8 : -4));
6428 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, u);
6429 t = fold_convert (valist_type, t);
6431 t = build2 (MODIFY_EXPR, valist_type, valist, t);
6433 ofs = (8 - size) % 4;
6434 if (ofs != 0)
6435 t = fold_build_pointer_plus_hwi (t, ofs);
6437 t = fold_convert (ptr, t);
6438 t = build_va_arg_indirect_ref (t);
6440 if (indirect)
6441 t = build_va_arg_indirect_ref (t);
6443 return t;
6447 /* True if MODE is valid for the target. By "valid", we mean able to
6448 be manipulated in non-trivial ways. In particular, this means all
6449 the arithmetic is supported.
6451 Currently, TImode is not valid as the HP 64-bit runtime documentation
6452 doesn't document the alignment and calling conventions for this type.
6453 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6454 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6456 static bool
6457 pa_scalar_mode_supported_p (scalar_mode mode)
6459 int precision = GET_MODE_PRECISION (mode);
6461 switch (GET_MODE_CLASS (mode))
6463 case MODE_PARTIAL_INT:
6464 case MODE_INT:
6465 if (precision == CHAR_TYPE_SIZE)
6466 return true;
6467 if (precision == SHORT_TYPE_SIZE)
6468 return true;
6469 if (precision == INT_TYPE_SIZE)
6470 return true;
6471 if (precision == LONG_TYPE_SIZE)
6472 return true;
6473 if (precision == LONG_LONG_TYPE_SIZE)
6474 return true;
6475 return false;
6477 case MODE_FLOAT:
6478 if (precision == FLOAT_TYPE_SIZE)
6479 return true;
6480 if (precision == DOUBLE_TYPE_SIZE)
6481 return true;
6482 if (precision == LONG_DOUBLE_TYPE_SIZE)
6483 return true;
6484 return false;
6486 case MODE_DECIMAL_FLOAT:
6487 return false;
6489 default:
6490 gcc_unreachable ();
6494 /* Return TRUE if INSN, a jump insn, has an unfilled delay slot and
6495 it branches into the delay slot. Otherwise, return FALSE. */
6497 static bool
6498 branch_to_delay_slot_p (rtx_insn *insn)
6500 rtx_insn *jump_insn;
6502 if (dbr_sequence_length ())
6503 return FALSE;
6505 jump_insn = next_active_insn (JUMP_LABEL_AS_INSN (insn));
6506 while (insn)
6508 insn = next_active_insn (insn);
6509 if (jump_insn == insn)
6510 return TRUE;
6512 /* We can't rely on the length of asms. So, we return FALSE when
6513 the branch is followed by an asm. */
6514 if (!insn
6515 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6516 || asm_noperands (PATTERN (insn)) >= 0
6517 || get_attr_length (insn) > 0)
6518 break;
6521 return FALSE;
6524 /* Return TRUE if INSN, a forward jump insn, needs a nop in its delay slot.
6526 This occurs when INSN has an unfilled delay slot and is followed
6527 by an asm. Disaster can occur if the asm is empty and the jump
6528 branches into the delay slot. So, we add a nop in the delay slot
6529 when this occurs. */
6531 static bool
6532 branch_needs_nop_p (rtx_insn *insn)
6534 rtx_insn *jump_insn;
6536 if (dbr_sequence_length ())
6537 return FALSE;
6539 jump_insn = next_active_insn (JUMP_LABEL_AS_INSN (insn));
6540 while (insn)
6542 insn = next_active_insn (insn);
6543 if (!insn || jump_insn == insn)
6544 return TRUE;
6546 if (!(GET_CODE (PATTERN (insn)) == ASM_INPUT
6547 || asm_noperands (PATTERN (insn)) >= 0)
6548 && get_attr_length (insn) > 0)
6549 break;
6552 return FALSE;
6555 /* Return TRUE if INSN, a forward jump insn, can use nullification
6556 to skip the following instruction. This avoids an extra cycle due
6557 to a mis-predicted branch when we fall through. */
6559 static bool
6560 use_skip_p (rtx_insn *insn)
6562 rtx_insn *jump_insn = next_active_insn (JUMP_LABEL_AS_INSN (insn));
6564 while (insn)
6566 insn = next_active_insn (insn);
6568 /* We can't rely on the length of asms, so we can't skip asms. */
6569 if (!insn
6570 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6571 || asm_noperands (PATTERN (insn)) >= 0)
6572 break;
6573 if (get_attr_length (insn) == 4
6574 && jump_insn == next_active_insn (insn))
6575 return TRUE;
6576 if (get_attr_length (insn) > 0)
6577 break;
6580 return FALSE;
6583 /* This routine handles all the normal conditional branch sequences we
6584 might need to generate. It handles compare immediate vs compare
6585 register, nullification of delay slots, varying length branches,
6586 negated branches, and all combinations of the above. It returns the
6587 output appropriate to emit the branch corresponding to all given
6588 parameters. */
6590 const char *
6591 pa_output_cbranch (rtx *operands, int negated, rtx_insn *insn)
6593 static char buf[100];
6594 bool useskip;
6595 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6596 int length = get_attr_length (insn);
6597 int xdelay;
6599 /* A conditional branch to the following instruction (e.g. the delay slot)
6600 is asking for a disaster. This can happen when not optimizing and
6601 when jump optimization fails.
6603 While it is usually safe to emit nothing, this can fail if the
6604 preceding instruction is a nullified branch with an empty delay
6605 slot and the same branch target as this branch. We could check
6606 for this but jump optimization should eliminate nop jumps. It
6607 is always safe to emit a nop. */
6608 if (branch_to_delay_slot_p (insn))
6609 return "nop";
6611 /* The doubleword form of the cmpib instruction doesn't have the LEU
6612 and GTU conditions while the cmpb instruction does. Since we accept
6613 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6614 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6615 operands[2] = gen_rtx_REG (DImode, 0);
6616 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
6617 operands[1] = gen_rtx_REG (DImode, 0);
6619 /* If this is a long branch with its delay slot unfilled, set `nullify'
6620 as it can nullify the delay slot and save a nop. */
6621 if (length == 8 && dbr_sequence_length () == 0)
6622 nullify = 1;
6624 /* If this is a short forward conditional branch which did not get
6625 its delay slot filled, the delay slot can still be nullified. */
6626 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6627 nullify = forward_branch_p (insn);
6629 /* A forward branch over a single nullified insn can be done with a
6630 comclr instruction. This avoids a single cycle penalty due to
6631 mis-predicted branch if we fall through (branch not taken). */
6632 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6634 switch (length)
6636 /* All short conditional branches except backwards with an unfilled
6637 delay slot. */
6638 case 4:
6639 if (useskip)
6640 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6641 else
6642 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6643 if (GET_MODE (operands[1]) == DImode)
6644 strcat (buf, "*");
6645 if (negated)
6646 strcat (buf, "%B3");
6647 else
6648 strcat (buf, "%S3");
6649 if (useskip)
6650 strcat (buf, " %2,%r1,%%r0");
6651 else if (nullify)
6653 if (branch_needs_nop_p (insn))
6654 strcat (buf, ",n %2,%r1,%0%#");
6655 else
6656 strcat (buf, ",n %2,%r1,%0");
6658 else
6659 strcat (buf, " %2,%r1,%0");
6660 break;
6662 /* All long conditionals. Note a short backward branch with an
6663 unfilled delay slot is treated just like a long backward branch
6664 with an unfilled delay slot. */
6665 case 8:
6666 /* Handle weird backwards branch with a filled delay slot
6667 which is nullified. */
6668 if (dbr_sequence_length () != 0
6669 && ! forward_branch_p (insn)
6670 && nullify)
6672 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6673 if (GET_MODE (operands[1]) == DImode)
6674 strcat (buf, "*");
6675 if (negated)
6676 strcat (buf, "%S3");
6677 else
6678 strcat (buf, "%B3");
6679 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6681 /* Handle short backwards branch with an unfilled delay slot.
6682 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6683 taken and untaken branches. */
6684 else if (dbr_sequence_length () == 0
6685 && ! forward_branch_p (insn)
6686 && INSN_ADDRESSES_SET_P ()
6687 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6688 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6690 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6691 if (GET_MODE (operands[1]) == DImode)
6692 strcat (buf, "*");
6693 if (negated)
6694 strcat (buf, "%B3 %2,%r1,%0%#");
6695 else
6696 strcat (buf, "%S3 %2,%r1,%0%#");
6698 else
6700 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6701 if (GET_MODE (operands[1]) == DImode)
6702 strcat (buf, "*");
6703 if (negated)
6704 strcat (buf, "%S3");
6705 else
6706 strcat (buf, "%B3");
6707 if (nullify)
6708 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6709 else
6710 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6712 break;
6714 default:
6715 /* The reversed conditional branch must branch over one additional
6716 instruction if the delay slot is filled and needs to be extracted
6717 by pa_output_lbranch. If the delay slot is empty or this is a
6718 nullified forward branch, the instruction after the reversed
6719 condition branch must be nullified. */
6720 if (dbr_sequence_length () == 0
6721 || (nullify && forward_branch_p (insn)))
6723 nullify = 1;
6724 xdelay = 0;
6725 operands[4] = GEN_INT (length);
6727 else
6729 xdelay = 1;
6730 operands[4] = GEN_INT (length + 4);
6733 /* Create a reversed conditional branch which branches around
6734 the following insns. */
6735 if (GET_MODE (operands[1]) != DImode)
6737 if (nullify)
6739 if (negated)
6740 strcpy (buf,
6741 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6742 else
6743 strcpy (buf,
6744 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6746 else
6748 if (negated)
6749 strcpy (buf,
6750 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6751 else
6752 strcpy (buf,
6753 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6756 else
6758 if (nullify)
6760 if (negated)
6761 strcpy (buf,
6762 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6763 else
6764 strcpy (buf,
6765 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6767 else
6769 if (negated)
6770 strcpy (buf,
6771 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6772 else
6773 strcpy (buf,
6774 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6778 output_asm_insn (buf, operands);
6779 return pa_output_lbranch (operands[0], insn, xdelay);
6781 return buf;
6784 /* Output a PIC pc-relative instruction sequence to load the address of
6785 OPERANDS[0] to register OPERANDS[2]. OPERANDS[0] is a symbol ref
6786 or a code label. OPERANDS[1] specifies the register to use to load
6787 the program counter. OPERANDS[3] may be used for label generation
6788 The sequence is always three instructions in length. The program
6789 counter recorded for PA 1.X is eight bytes more than that for PA 2.0.
6790 Register %r1 is clobbered. */
6792 static void
6793 pa_output_pic_pcrel_sequence (rtx *operands)
6795 gcc_assert (SYMBOL_REF_P (operands[0]) || LABEL_P (operands[0]));
6796 if (TARGET_PA_20)
6798 /* We can use mfia to determine the current program counter. */
6799 if (TARGET_SOM || !TARGET_GAS)
6801 operands[3] = gen_label_rtx ();
6802 targetm.asm_out.internal_label (asm_out_file, "L",
6803 CODE_LABEL_NUMBER (operands[3]));
6804 output_asm_insn ("mfia %1", operands);
6805 output_asm_insn ("addil L'%0-%l3,%1", operands);
6806 output_asm_insn ("ldo R'%0-%l3(%%r1),%2", operands);
6808 else
6810 output_asm_insn ("mfia %1", operands);
6811 output_asm_insn ("addil L'%0-$PIC_pcrel$0+12,%1", operands);
6812 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+16(%%r1),%2", operands);
6815 else
6817 /* We need to use a branch to determine the current program counter. */
6818 output_asm_insn ("{bl|b,l} .+8,%1", operands);
6819 if (TARGET_SOM || !TARGET_GAS)
6821 operands[3] = gen_label_rtx ();
6822 output_asm_insn ("addil L'%0-%l3,%1", operands);
6823 targetm.asm_out.internal_label (asm_out_file, "L",
6824 CODE_LABEL_NUMBER (operands[3]));
6825 output_asm_insn ("ldo R'%0-%l3(%%r1),%2", operands);
6827 else
6829 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%1", operands);
6830 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%2", operands);
6835 /* This routine handles output of long unconditional branches that
6836 exceed the maximum range of a simple branch instruction. Since
6837 we don't have a register available for the branch, we save register
6838 %r1 in the frame marker, load the branch destination DEST into %r1,
6839 execute the branch, and restore %r1 in the delay slot of the branch.
6841 Since long branches may have an insn in the delay slot and the
6842 delay slot is used to restore %r1, we in general need to extract
6843 this insn and execute it before the branch. However, to facilitate
6844 use of this function by conditional branches, we also provide an
6845 option to not extract the delay insn so that it will be emitted
6846 after the long branch. So, if there is an insn in the delay slot,
6847 it is extracted if XDELAY is nonzero.
6849 The lengths of the various long-branch sequences are 20, 16 and 24
6850 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6852 const char *
6853 pa_output_lbranch (rtx dest, rtx_insn *insn, int xdelay)
6855 rtx xoperands[4];
6857 xoperands[0] = dest;
6859 /* First, free up the delay slot. */
6860 if (xdelay && dbr_sequence_length () != 0)
6862 /* We can't handle a jump in the delay slot. */
6863 gcc_assert (! JUMP_P (NEXT_INSN (insn)));
6865 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6866 optimize, 0, NULL);
6868 /* Now delete the delay insn. */
6869 SET_INSN_DELETED (NEXT_INSN (insn));
6872 /* Output an insn to save %r1. The runtime documentation doesn't
6873 specify whether the "Clean Up" slot in the callers frame can
6874 be clobbered by the callee. It isn't copied by HP's builtin
6875 alloca, so this suggests that it can be clobbered if necessary.
6876 The "Static Link" location is copied by HP builtin alloca, so
6877 we avoid using it. Using the cleanup slot might be a problem
6878 if we have to interoperate with languages that pass cleanup
6879 information. However, it should be possible to handle these
6880 situations with GCC's asm feature.
6882 The "Current RP" slot is reserved for the called procedure, so
6883 we try to use it when we don't have a frame of our own. It's
6884 rather unlikely that we won't have a frame when we need to emit
6885 a very long branch.
6887 Really the way to go long term is a register scavenger; goto
6888 the target of the jump and find a register which we can use
6889 as a scratch to hold the value in %r1. Then, we wouldn't have
6890 to free up the delay slot or clobber a slot that may be needed
6891 for other purposes. */
6892 if (TARGET_64BIT)
6894 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6895 /* Use the return pointer slot in the frame marker. */
6896 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6897 else
6898 /* Use the slot at -40 in the frame marker since HP builtin
6899 alloca doesn't copy it. */
6900 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6902 else
6904 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6905 /* Use the return pointer slot in the frame marker. */
6906 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6907 else
6908 /* Use the "Clean Up" slot in the frame marker. In GCC,
6909 the only other use of this location is for copying a
6910 floating point double argument from a floating-point
6911 register to two general registers. The copy is done
6912 as an "atomic" operation when outputting a call, so it
6913 won't interfere with our using the location here. */
6914 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6917 if (TARGET_PORTABLE_RUNTIME)
6919 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6920 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6921 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6923 else if (flag_pic)
6925 xoperands[1] = gen_rtx_REG (Pmode, 1);
6926 xoperands[2] = xoperands[1];
6927 pa_output_pic_pcrel_sequence (xoperands);
6928 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6930 else
6931 /* Now output a very long branch to the original target. */
6932 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6934 /* Now restore the value of %r1 in the delay slot. */
6935 if (TARGET_64BIT)
6937 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6938 return "ldd -16(%%r30),%%r1";
6939 else
6940 return "ldd -40(%%r30),%%r1";
6942 else
6944 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6945 return "ldw -20(%%r30),%%r1";
6946 else
6947 return "ldw -12(%%r30),%%r1";
6951 /* This routine handles all the branch-on-bit conditional branch sequences we
6952 might need to generate. It handles nullification of delay slots,
6953 varying length branches, negated branches and all combinations of the
6954 above. it returns the appropriate output template to emit the branch. */
6956 const char *
6957 pa_output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn, int which)
6959 static char buf[100];
6960 bool useskip;
6961 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6962 int length = get_attr_length (insn);
6963 int xdelay;
6965 /* A conditional branch to the following instruction (e.g. the delay slot) is
6966 asking for a disaster. I do not think this can happen as this pattern
6967 is only used when optimizing; jump optimization should eliminate the
6968 jump. But be prepared just in case. */
6970 if (branch_to_delay_slot_p (insn))
6971 return "nop";
6973 /* If this is a long branch with its delay slot unfilled, set `nullify'
6974 as it can nullify the delay slot and save a nop. */
6975 if (length == 8 && dbr_sequence_length () == 0)
6976 nullify = 1;
6978 /* If this is a short forward conditional branch which did not get
6979 its delay slot filled, the delay slot can still be nullified. */
6980 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6981 nullify = forward_branch_p (insn);
6983 /* A forward branch over a single nullified insn can be done with a
6984 extrs instruction. This avoids a single cycle penalty due to
6985 mis-predicted branch if we fall through (branch not taken). */
6986 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6988 switch (length)
6991 /* All short conditional branches except backwards with an unfilled
6992 delay slot. */
6993 case 4:
6994 if (useskip)
6995 strcpy (buf, "{extrs,|extrw,s,}");
6996 else
6997 strcpy (buf, "bb,");
6998 if (useskip && GET_MODE (operands[0]) == DImode)
6999 strcpy (buf, "extrd,s,*");
7000 else if (GET_MODE (operands[0]) == DImode)
7001 strcpy (buf, "bb,*");
7002 if ((which == 0 && negated)
7003 || (which == 1 && ! negated))
7004 strcat (buf, ">=");
7005 else
7006 strcat (buf, "<");
7007 if (useskip)
7008 strcat (buf, " %0,%1,1,%%r0");
7009 else if (nullify && negated)
7011 if (branch_needs_nop_p (insn))
7012 strcat (buf, ",n %0,%1,%3%#");
7013 else
7014 strcat (buf, ",n %0,%1,%3");
7016 else if (nullify && ! negated)
7018 if (branch_needs_nop_p (insn))
7019 strcat (buf, ",n %0,%1,%2%#");
7020 else
7021 strcat (buf, ",n %0,%1,%2");
7023 else if (! nullify && negated)
7024 strcat (buf, " %0,%1,%3");
7025 else if (! nullify && ! negated)
7026 strcat (buf, " %0,%1,%2");
7027 break;
7029 /* All long conditionals. Note a short backward branch with an
7030 unfilled delay slot is treated just like a long backward branch
7031 with an unfilled delay slot. */
7032 case 8:
7033 /* Handle weird backwards branch with a filled delay slot
7034 which is nullified. */
7035 if (dbr_sequence_length () != 0
7036 && ! forward_branch_p (insn)
7037 && nullify)
7039 strcpy (buf, "bb,");
7040 if (GET_MODE (operands[0]) == DImode)
7041 strcat (buf, "*");
7042 if ((which == 0 && negated)
7043 || (which == 1 && ! negated))
7044 strcat (buf, "<");
7045 else
7046 strcat (buf, ">=");
7047 if (negated)
7048 strcat (buf, ",n %0,%1,.+12\n\tb %3");
7049 else
7050 strcat (buf, ",n %0,%1,.+12\n\tb %2");
7052 /* Handle short backwards branch with an unfilled delay slot.
7053 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7054 taken and untaken branches. */
7055 else if (dbr_sequence_length () == 0
7056 && ! forward_branch_p (insn)
7057 && INSN_ADDRESSES_SET_P ()
7058 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7059 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7061 strcpy (buf, "bb,");
7062 if (GET_MODE (operands[0]) == DImode)
7063 strcat (buf, "*");
7064 if ((which == 0 && negated)
7065 || (which == 1 && ! negated))
7066 strcat (buf, ">=");
7067 else
7068 strcat (buf, "<");
7069 if (negated)
7070 strcat (buf, " %0,%1,%3%#");
7071 else
7072 strcat (buf, " %0,%1,%2%#");
7074 else
7076 if (GET_MODE (operands[0]) == DImode)
7077 strcpy (buf, "extrd,s,*");
7078 else
7079 strcpy (buf, "{extrs,|extrw,s,}");
7080 if ((which == 0 && negated)
7081 || (which == 1 && ! negated))
7082 strcat (buf, "<");
7083 else
7084 strcat (buf, ">=");
7085 if (nullify && negated)
7086 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
7087 else if (nullify && ! negated)
7088 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
7089 else if (negated)
7090 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
7091 else
7092 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
7094 break;
7096 default:
7097 /* The reversed conditional branch must branch over one additional
7098 instruction if the delay slot is filled and needs to be extracted
7099 by pa_output_lbranch. If the delay slot is empty or this is a
7100 nullified forward branch, the instruction after the reversed
7101 condition branch must be nullified. */
7102 if (dbr_sequence_length () == 0
7103 || (nullify && forward_branch_p (insn)))
7105 nullify = 1;
7106 xdelay = 0;
7107 operands[4] = GEN_INT (length);
7109 else
7111 xdelay = 1;
7112 operands[4] = GEN_INT (length + 4);
7115 if (GET_MODE (operands[0]) == DImode)
7116 strcpy (buf, "bb,*");
7117 else
7118 strcpy (buf, "bb,");
7119 if ((which == 0 && negated)
7120 || (which == 1 && !negated))
7121 strcat (buf, "<");
7122 else
7123 strcat (buf, ">=");
7124 if (nullify)
7125 strcat (buf, ",n %0,%1,.+%4");
7126 else
7127 strcat (buf, " %0,%1,.+%4");
7128 output_asm_insn (buf, operands);
7129 return pa_output_lbranch (negated ? operands[3] : operands[2],
7130 insn, xdelay);
7132 return buf;
7135 /* This routine handles all the branch-on-variable-bit conditional branch
7136 sequences we might need to generate. It handles nullification of delay
7137 slots, varying length branches, negated branches and all combinations
7138 of the above. it returns the appropriate output template to emit the
7139 branch. */
7141 const char *
7142 pa_output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn,
7143 int which)
7145 static char buf[100];
7146 bool useskip;
7147 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7148 int length = get_attr_length (insn);
7149 int xdelay;
7151 /* A conditional branch to the following instruction (e.g. the delay slot) is
7152 asking for a disaster. I do not think this can happen as this pattern
7153 is only used when optimizing; jump optimization should eliminate the
7154 jump. But be prepared just in case. */
7156 if (branch_to_delay_slot_p (insn))
7157 return "nop";
7159 /* If this is a long branch with its delay slot unfilled, set `nullify'
7160 as it can nullify the delay slot and save a nop. */
7161 if (length == 8 && dbr_sequence_length () == 0)
7162 nullify = 1;
7164 /* If this is a short forward conditional branch which did not get
7165 its delay slot filled, the delay slot can still be nullified. */
7166 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7167 nullify = forward_branch_p (insn);
7169 /* A forward branch over a single nullified insn can be done with a
7170 extrs instruction. This avoids a single cycle penalty due to
7171 mis-predicted branch if we fall through (branch not taken). */
7172 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
7174 switch (length)
7177 /* All short conditional branches except backwards with an unfilled
7178 delay slot. */
7179 case 4:
7180 if (useskip)
7181 strcpy (buf, "{vextrs,|extrw,s,}");
7182 else
7183 strcpy (buf, "{bvb,|bb,}");
7184 if (useskip && GET_MODE (operands[0]) == DImode)
7185 strcpy (buf, "extrd,s,*");
7186 else if (GET_MODE (operands[0]) == DImode)
7187 strcpy (buf, "bb,*");
7188 if ((which == 0 && negated)
7189 || (which == 1 && ! negated))
7190 strcat (buf, ">=");
7191 else
7192 strcat (buf, "<");
7193 if (useskip)
7194 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
7195 else if (nullify && negated)
7197 if (branch_needs_nop_p (insn))
7198 strcat (buf, "{,n %0,%3%#|,n %0,%%sar,%3%#}");
7199 else
7200 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
7202 else if (nullify && ! negated)
7204 if (branch_needs_nop_p (insn))
7205 strcat (buf, "{,n %0,%2%#|,n %0,%%sar,%2%#}");
7206 else
7207 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
7209 else if (! nullify && negated)
7210 strcat (buf, "{ %0,%3| %0,%%sar,%3}");
7211 else if (! nullify && ! negated)
7212 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
7213 break;
7215 /* All long conditionals. Note a short backward branch with an
7216 unfilled delay slot is treated just like a long backward branch
7217 with an unfilled delay slot. */
7218 case 8:
7219 /* Handle weird backwards branch with a filled delay slot
7220 which is nullified. */
7221 if (dbr_sequence_length () != 0
7222 && ! forward_branch_p (insn)
7223 && nullify)
7225 strcpy (buf, "{bvb,|bb,}");
7226 if (GET_MODE (operands[0]) == DImode)
7227 strcat (buf, "*");
7228 if ((which == 0 && negated)
7229 || (which == 1 && ! negated))
7230 strcat (buf, "<");
7231 else
7232 strcat (buf, ">=");
7233 if (negated)
7234 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
7235 else
7236 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
7238 /* Handle short backwards branch with an unfilled delay slot.
7239 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7240 taken and untaken branches. */
7241 else if (dbr_sequence_length () == 0
7242 && ! forward_branch_p (insn)
7243 && INSN_ADDRESSES_SET_P ()
7244 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7245 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7247 strcpy (buf, "{bvb,|bb,}");
7248 if (GET_MODE (operands[0]) == DImode)
7249 strcat (buf, "*");
7250 if ((which == 0 && negated)
7251 || (which == 1 && ! negated))
7252 strcat (buf, ">=");
7253 else
7254 strcat (buf, "<");
7255 if (negated)
7256 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
7257 else
7258 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
7260 else
7262 strcpy (buf, "{vextrs,|extrw,s,}");
7263 if (GET_MODE (operands[0]) == DImode)
7264 strcpy (buf, "extrd,s,*");
7265 if ((which == 0 && negated)
7266 || (which == 1 && ! negated))
7267 strcat (buf, "<");
7268 else
7269 strcat (buf, ">=");
7270 if (nullify && negated)
7271 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
7272 else if (nullify && ! negated)
7273 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
7274 else if (negated)
7275 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
7276 else
7277 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
7279 break;
7281 default:
7282 /* The reversed conditional branch must branch over one additional
7283 instruction if the delay slot is filled and needs to be extracted
7284 by pa_output_lbranch. If the delay slot is empty or this is a
7285 nullified forward branch, the instruction after the reversed
7286 condition branch must be nullified. */
7287 if (dbr_sequence_length () == 0
7288 || (nullify && forward_branch_p (insn)))
7290 nullify = 1;
7291 xdelay = 0;
7292 operands[4] = GEN_INT (length);
7294 else
7296 xdelay = 1;
7297 operands[4] = GEN_INT (length + 4);
7300 if (GET_MODE (operands[0]) == DImode)
7301 strcpy (buf, "bb,*");
7302 else
7303 strcpy (buf, "{bvb,|bb,}");
7304 if ((which == 0 && negated)
7305 || (which == 1 && !negated))
7306 strcat (buf, "<");
7307 else
7308 strcat (buf, ">=");
7309 if (nullify)
7310 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
7311 else
7312 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
7313 output_asm_insn (buf, operands);
7314 return pa_output_lbranch (negated ? operands[3] : operands[2],
7315 insn, xdelay);
7317 return buf;
7320 /* Return the output template for emitting a dbra type insn.
7322 Note it may perform some output operations on its own before
7323 returning the final output string. */
7324 const char *
7325 pa_output_dbra (rtx *operands, rtx_insn *insn, int which_alternative)
7327 int length = get_attr_length (insn);
7329 /* A conditional branch to the following instruction (e.g. the delay slot) is
7330 asking for a disaster. Be prepared! */
7332 if (branch_to_delay_slot_p (insn))
7334 if (which_alternative == 0)
7335 return "ldo %1(%0),%0";
7336 else if (which_alternative == 1)
7338 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
7339 output_asm_insn ("ldw -16(%%r30),%4", operands);
7340 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7341 return "{fldws|fldw} -16(%%r30),%0";
7343 else
7345 output_asm_insn ("ldw %0,%4", operands);
7346 return "ldo %1(%4),%4\n\tstw %4,%0";
7350 if (which_alternative == 0)
7352 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7353 int xdelay;
7355 /* If this is a long branch with its delay slot unfilled, set `nullify'
7356 as it can nullify the delay slot and save a nop. */
7357 if (length == 8 && dbr_sequence_length () == 0)
7358 nullify = 1;
7360 /* If this is a short forward conditional branch which did not get
7361 its delay slot filled, the delay slot can still be nullified. */
7362 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7363 nullify = forward_branch_p (insn);
7365 switch (length)
7367 case 4:
7368 if (nullify)
7370 if (branch_needs_nop_p (insn))
7371 return "addib,%C2,n %1,%0,%3%#";
7372 else
7373 return "addib,%C2,n %1,%0,%3";
7375 else
7376 return "addib,%C2 %1,%0,%3";
7378 case 8:
7379 /* Handle weird backwards branch with a fulled delay slot
7380 which is nullified. */
7381 if (dbr_sequence_length () != 0
7382 && ! forward_branch_p (insn)
7383 && nullify)
7384 return "addib,%N2,n %1,%0,.+12\n\tb %3";
7385 /* Handle short backwards branch with an unfilled delay slot.
7386 Using a addb;nop rather than addi;bl saves 1 cycle for both
7387 taken and untaken branches. */
7388 else if (dbr_sequence_length () == 0
7389 && ! forward_branch_p (insn)
7390 && INSN_ADDRESSES_SET_P ()
7391 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7392 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7393 return "addib,%C2 %1,%0,%3%#";
7395 /* Handle normal cases. */
7396 if (nullify)
7397 return "addi,%N2 %1,%0,%0\n\tb,n %3";
7398 else
7399 return "addi,%N2 %1,%0,%0\n\tb %3";
7401 default:
7402 /* The reversed conditional branch must branch over one additional
7403 instruction if the delay slot is filled and needs to be extracted
7404 by pa_output_lbranch. If the delay slot is empty or this is a
7405 nullified forward branch, the instruction after the reversed
7406 condition branch must be nullified. */
7407 if (dbr_sequence_length () == 0
7408 || (nullify && forward_branch_p (insn)))
7410 nullify = 1;
7411 xdelay = 0;
7412 operands[4] = GEN_INT (length);
7414 else
7416 xdelay = 1;
7417 operands[4] = GEN_INT (length + 4);
7420 if (nullify)
7421 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
7422 else
7423 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
7425 return pa_output_lbranch (operands[3], insn, xdelay);
7429 /* Deal with gross reload from FP register case. */
7430 else if (which_alternative == 1)
7432 /* Move loop counter from FP register to MEM then into a GR,
7433 increment the GR, store the GR into MEM, and finally reload
7434 the FP register from MEM from within the branch's delay slot. */
7435 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
7436 operands);
7437 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7438 if (length == 24)
7439 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
7440 else if (length == 28)
7441 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7442 else
7444 operands[5] = GEN_INT (length - 16);
7445 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
7446 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7447 return pa_output_lbranch (operands[3], insn, 0);
7450 /* Deal with gross reload from memory case. */
7451 else
7453 /* Reload loop counter from memory, the store back to memory
7454 happens in the branch's delay slot. */
7455 output_asm_insn ("ldw %0,%4", operands);
7456 if (length == 12)
7457 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
7458 else if (length == 16)
7459 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
7460 else
7462 operands[5] = GEN_INT (length - 4);
7463 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
7464 return pa_output_lbranch (operands[3], insn, 0);
7469 /* Return the output template for emitting a movb type insn.
7471 Note it may perform some output operations on its own before
7472 returning the final output string. */
7473 const char *
7474 pa_output_movb (rtx *operands, rtx_insn *insn, int which_alternative,
7475 int reverse_comparison)
7477 int length = get_attr_length (insn);
7479 /* A conditional branch to the following instruction (e.g. the delay slot) is
7480 asking for a disaster. Be prepared! */
7482 if (branch_to_delay_slot_p (insn))
7484 if (which_alternative == 0)
7485 return "copy %1,%0";
7486 else if (which_alternative == 1)
7488 output_asm_insn ("stw %1,-16(%%r30)", operands);
7489 return "{fldws|fldw} -16(%%r30),%0";
7491 else if (which_alternative == 2)
7492 return "stw %1,%0";
7493 else
7494 return "mtsar %r1";
7497 /* Support the second variant. */
7498 if (reverse_comparison)
7499 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
7501 if (which_alternative == 0)
7503 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7504 int xdelay;
7506 /* If this is a long branch with its delay slot unfilled, set `nullify'
7507 as it can nullify the delay slot and save a nop. */
7508 if (length == 8 && dbr_sequence_length () == 0)
7509 nullify = 1;
7511 /* If this is a short forward conditional branch which did not get
7512 its delay slot filled, the delay slot can still be nullified. */
7513 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7514 nullify = forward_branch_p (insn);
7516 switch (length)
7518 case 4:
7519 if (nullify)
7521 if (branch_needs_nop_p (insn))
7522 return "movb,%C2,n %1,%0,%3%#";
7523 else
7524 return "movb,%C2,n %1,%0,%3";
7526 else
7527 return "movb,%C2 %1,%0,%3";
7529 case 8:
7530 /* Handle weird backwards branch with a filled delay slot
7531 which is nullified. */
7532 if (dbr_sequence_length () != 0
7533 && ! forward_branch_p (insn)
7534 && nullify)
7535 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7537 /* Handle short backwards branch with an unfilled delay slot.
7538 Using a movb;nop rather than or;bl saves 1 cycle for both
7539 taken and untaken branches. */
7540 else if (dbr_sequence_length () == 0
7541 && ! forward_branch_p (insn)
7542 && INSN_ADDRESSES_SET_P ()
7543 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7544 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7545 return "movb,%C2 %1,%0,%3%#";
7546 /* Handle normal cases. */
7547 if (nullify)
7548 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7549 else
7550 return "or,%N2 %1,%%r0,%0\n\tb %3";
7552 default:
7553 /* The reversed conditional branch must branch over one additional
7554 instruction if the delay slot is filled and needs to be extracted
7555 by pa_output_lbranch. If the delay slot is empty or this is a
7556 nullified forward branch, the instruction after the reversed
7557 condition branch must be nullified. */
7558 if (dbr_sequence_length () == 0
7559 || (nullify && forward_branch_p (insn)))
7561 nullify = 1;
7562 xdelay = 0;
7563 operands[4] = GEN_INT (length);
7565 else
7567 xdelay = 1;
7568 operands[4] = GEN_INT (length + 4);
7571 if (nullify)
7572 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
7573 else
7574 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
7576 return pa_output_lbranch (operands[3], insn, xdelay);
7579 /* Deal with gross reload for FP destination register case. */
7580 else if (which_alternative == 1)
7582 /* Move source register to MEM, perform the branch test, then
7583 finally load the FP register from MEM from within the branch's
7584 delay slot. */
7585 output_asm_insn ("stw %1,-16(%%r30)", operands);
7586 if (length == 12)
7587 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7588 else if (length == 16)
7589 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7590 else
7592 operands[4] = GEN_INT (length - 4);
7593 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
7594 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7595 return pa_output_lbranch (operands[3], insn, 0);
7598 /* Deal with gross reload from memory case. */
7599 else if (which_alternative == 2)
7601 /* Reload loop counter from memory, the store back to memory
7602 happens in the branch's delay slot. */
7603 if (length == 8)
7604 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7605 else if (length == 12)
7606 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7607 else
7609 operands[4] = GEN_INT (length);
7610 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7611 operands);
7612 return pa_output_lbranch (operands[3], insn, 0);
7615 /* Handle SAR as a destination. */
7616 else
7618 if (length == 8)
7619 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7620 else if (length == 12)
7621 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7622 else
7624 operands[4] = GEN_INT (length);
7625 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7626 operands);
7627 return pa_output_lbranch (operands[3], insn, 0);
7632 /* Copy any FP arguments in INSN into integer registers. */
7633 static void
7634 copy_fp_args (rtx_insn *insn)
7636 rtx link;
7637 rtx xoperands[2];
7639 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7641 int arg_mode, regno;
7642 rtx use = XEXP (link, 0);
7644 if (! (GET_CODE (use) == USE
7645 && GET_CODE (XEXP (use, 0)) == REG
7646 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7647 continue;
7649 arg_mode = GET_MODE (XEXP (use, 0));
7650 regno = REGNO (XEXP (use, 0));
7652 /* Is it a floating point register? */
7653 if (regno >= 32 && regno <= 39)
7655 /* Copy the FP register into an integer register via memory. */
7656 if (arg_mode == SFmode)
7658 xoperands[0] = XEXP (use, 0);
7659 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
7660 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
7661 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7663 else
7665 xoperands[0] = XEXP (use, 0);
7666 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
7667 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
7668 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
7669 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7675 /* Compute length of the FP argument copy sequence for INSN. */
7676 static int
7677 length_fp_args (rtx_insn *insn)
7679 int length = 0;
7680 rtx link;
7682 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7684 int arg_mode, regno;
7685 rtx use = XEXP (link, 0);
7687 if (! (GET_CODE (use) == USE
7688 && GET_CODE (XEXP (use, 0)) == REG
7689 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7690 continue;
7692 arg_mode = GET_MODE (XEXP (use, 0));
7693 regno = REGNO (XEXP (use, 0));
7695 /* Is it a floating point register? */
7696 if (regno >= 32 && regno <= 39)
7698 if (arg_mode == SFmode)
7699 length += 8;
7700 else
7701 length += 12;
7705 return length;
7708 /* Return the attribute length for the millicode call instruction INSN.
7709 The length must match the code generated by pa_output_millicode_call.
7710 We include the delay slot in the returned length as it is better to
7711 over estimate the length than to under estimate it. */
7714 pa_attr_length_millicode_call (rtx_insn *insn)
7716 unsigned long distance = -1;
7717 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7719 if (INSN_ADDRESSES_SET_P ())
7721 distance = (total + insn_current_reference_address (insn));
7722 if (distance < total)
7723 distance = -1;
7726 if (TARGET_64BIT)
7728 if (!TARGET_LONG_CALLS && distance < 7600000)
7729 return 8;
7731 return 20;
7733 else if (TARGET_PORTABLE_RUNTIME)
7734 return 24;
7735 else
7737 if (!TARGET_LONG_CALLS && distance < MAX_PCREL17F_OFFSET)
7738 return 8;
7740 if (!flag_pic)
7741 return 12;
7743 return 24;
7747 /* INSN is a function call.
7749 CALL_DEST is the routine we are calling. */
7751 const char *
7752 pa_output_millicode_call (rtx_insn *insn, rtx call_dest)
7754 int attr_length = get_attr_length (insn);
7755 int seq_length = dbr_sequence_length ();
7756 rtx xoperands[4];
7758 xoperands[0] = call_dest;
7760 /* Handle the common case where we are sure that the branch will
7761 reach the beginning of the $CODE$ subspace. The within reach
7762 form of the $$sh_func_adrs call has a length of 28. Because it
7763 has an attribute type of sh_func_adrs, it never has a nonzero
7764 sequence length (i.e., the delay slot is never filled). */
7765 if (!TARGET_LONG_CALLS
7766 && (attr_length == 8
7767 || (attr_length == 28
7768 && get_attr_type (insn) == TYPE_SH_FUNC_ADRS)))
7770 xoperands[1] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7771 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7773 else
7775 if (TARGET_64BIT)
7777 /* It might seem that one insn could be saved by accessing
7778 the millicode function using the linkage table. However,
7779 this doesn't work in shared libraries and other dynamically
7780 loaded objects. Using a pc-relative sequence also avoids
7781 problems related to the implicit use of the gp register. */
7782 xoperands[1] = gen_rtx_REG (Pmode, 1);
7783 xoperands[2] = xoperands[1];
7784 pa_output_pic_pcrel_sequence (xoperands);
7785 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7787 else if (TARGET_PORTABLE_RUNTIME)
7789 /* Pure portable runtime doesn't allow be/ble; we also don't
7790 have PIC support in the assembler/linker, so this sequence
7791 is needed. */
7793 /* Get the address of our target into %r1. */
7794 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7795 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7797 /* Get our return address into %r31. */
7798 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7799 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7801 /* Jump to our target address in %r1. */
7802 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7804 else if (!flag_pic)
7806 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7807 if (TARGET_PA_20)
7808 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7809 else
7810 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7812 else
7814 xoperands[1] = gen_rtx_REG (Pmode, 31);
7815 xoperands[2] = gen_rtx_REG (Pmode, 1);
7816 pa_output_pic_pcrel_sequence (xoperands);
7818 /* Adjust return address. */
7819 output_asm_insn ("ldo {16|24}(%%r31),%%r31", xoperands);
7821 /* Jump to our target address in %r1. */
7822 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7826 if (seq_length == 0)
7827 output_asm_insn ("nop", xoperands);
7829 return "";
7832 /* Return the attribute length of the call instruction INSN. The SIBCALL
7833 flag indicates whether INSN is a regular call or a sibling call. The
7834 length returned must be longer than the code actually generated by
7835 pa_output_call. Since branch shortening is done before delay branch
7836 sequencing, there is no way to determine whether or not the delay
7837 slot will be filled during branch shortening. Even when the delay
7838 slot is filled, we may have to add a nop if the delay slot contains
7839 a branch that can't reach its target. Thus, we always have to include
7840 the delay slot in the length estimate. This used to be done in
7841 pa_adjust_insn_length but we do it here now as some sequences always
7842 fill the delay slot and we can save four bytes in the estimate for
7843 these sequences. */
7846 pa_attr_length_call (rtx_insn *insn, int sibcall)
7848 int local_call;
7849 rtx call, call_dest;
7850 tree call_decl;
7851 int length = 0;
7852 rtx pat = PATTERN (insn);
7853 unsigned long distance = -1;
7855 gcc_assert (CALL_P (insn));
7857 if (INSN_ADDRESSES_SET_P ())
7859 unsigned long total;
7861 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7862 distance = (total + insn_current_reference_address (insn));
7863 if (distance < total)
7864 distance = -1;
7867 gcc_assert (GET_CODE (pat) == PARALLEL);
7869 /* Get the call rtx. */
7870 call = XVECEXP (pat, 0, 0);
7871 if (GET_CODE (call) == SET)
7872 call = SET_SRC (call);
7874 gcc_assert (GET_CODE (call) == CALL);
7876 /* Determine if this is a local call. */
7877 call_dest = XEXP (XEXP (call, 0), 0);
7878 call_decl = SYMBOL_REF_DECL (call_dest);
7879 local_call = call_decl && targetm.binds_local_p (call_decl);
7881 /* pc-relative branch. */
7882 if (!TARGET_LONG_CALLS
7883 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7884 || distance < MAX_PCREL17F_OFFSET))
7885 length += 8;
7887 /* 64-bit plabel sequence. */
7888 else if (TARGET_64BIT && !local_call)
7889 length += sibcall ? 28 : 24;
7891 /* non-pic long absolute branch sequence. */
7892 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7893 length += 12;
7895 /* long pc-relative branch sequence. */
7896 else if (TARGET_LONG_PIC_SDIFF_CALL
7897 || (TARGET_GAS && !TARGET_SOM && local_call))
7899 length += 20;
7901 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7902 length += 8;
7905 /* 32-bit plabel sequence. */
7906 else
7908 length += 32;
7910 if (TARGET_SOM)
7911 length += length_fp_args (insn);
7913 if (flag_pic)
7914 length += 4;
7916 if (!TARGET_PA_20)
7918 if (!sibcall)
7919 length += 8;
7921 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7922 length += 8;
7926 return length;
7929 /* INSN is a function call.
7931 CALL_DEST is the routine we are calling. */
7933 const char *
7934 pa_output_call (rtx_insn *insn, rtx call_dest, int sibcall)
7936 int seq_length = dbr_sequence_length ();
7937 tree call_decl = SYMBOL_REF_DECL (call_dest);
7938 int local_call = call_decl && targetm.binds_local_p (call_decl);
7939 rtx xoperands[4];
7941 xoperands[0] = call_dest;
7943 /* Handle the common case where we're sure that the branch will reach
7944 the beginning of the "$CODE$" subspace. This is the beginning of
7945 the current function if we are in a named section. */
7946 if (!TARGET_LONG_CALLS && pa_attr_length_call (insn, sibcall) == 8)
7948 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7949 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7951 else
7953 if (TARGET_64BIT && !local_call)
7955 /* ??? As far as I can tell, the HP linker doesn't support the
7956 long pc-relative sequence described in the 64-bit runtime
7957 architecture. So, we use a slightly longer indirect call. */
7958 xoperands[0] = pa_get_deferred_plabel (call_dest);
7959 xoperands[1] = gen_label_rtx ();
7961 /* If this isn't a sibcall, we put the load of %r27 into the
7962 delay slot. We can't do this in a sibcall as we don't
7963 have a second call-clobbered scratch register available.
7964 We don't need to do anything when generating fast indirect
7965 calls. */
7966 if (seq_length != 0 && !sibcall)
7968 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7969 optimize, 0, NULL);
7971 /* Now delete the delay insn. */
7972 SET_INSN_DELETED (NEXT_INSN (insn));
7973 seq_length = 0;
7976 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7977 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7978 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7980 if (sibcall)
7982 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7983 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7984 output_asm_insn ("bve (%%r1)", xoperands);
7986 else
7988 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7989 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7990 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7991 seq_length = 1;
7994 else
7996 int indirect_call = 0;
7998 /* Emit a long call. There are several different sequences
7999 of increasing length and complexity. In most cases,
8000 they don't allow an instruction in the delay slot. */
8001 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
8002 && !TARGET_LONG_PIC_SDIFF_CALL
8003 && !(TARGET_GAS && !TARGET_SOM && local_call)
8004 && !TARGET_64BIT)
8005 indirect_call = 1;
8007 if (seq_length != 0
8008 && !sibcall
8009 && (!TARGET_PA_20
8010 || indirect_call
8011 || ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)))
8013 /* A non-jump insn in the delay slot. By definition we can
8014 emit this insn before the call (and in fact before argument
8015 relocating. */
8016 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
8017 NULL);
8019 /* Now delete the delay insn. */
8020 SET_INSN_DELETED (NEXT_INSN (insn));
8021 seq_length = 0;
8024 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
8026 /* This is the best sequence for making long calls in
8027 non-pic code. Unfortunately, GNU ld doesn't provide
8028 the stub needed for external calls, and GAS's support
8029 for this with the SOM linker is buggy. It is safe
8030 to use this for local calls. */
8031 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8032 if (sibcall)
8033 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
8034 else
8036 if (TARGET_PA_20)
8037 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
8038 xoperands);
8039 else
8040 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
8042 output_asm_insn ("copy %%r31,%%r2", xoperands);
8043 seq_length = 1;
8046 else
8048 /* The HP assembler and linker can handle relocations for
8049 the difference of two symbols. The HP assembler
8050 recognizes the sequence as a pc-relative call and
8051 the linker provides stubs when needed. */
8053 /* GAS currently can't generate the relocations that
8054 are needed for the SOM linker under HP-UX using this
8055 sequence. The GNU linker doesn't generate the stubs
8056 that are needed for external calls on TARGET_ELF32
8057 with this sequence. For now, we have to use a longer
8058 plabel sequence when using GAS for non local calls. */
8059 if (TARGET_LONG_PIC_SDIFF_CALL
8060 || (TARGET_GAS && !TARGET_SOM && local_call))
8062 xoperands[1] = gen_rtx_REG (Pmode, 1);
8063 xoperands[2] = xoperands[1];
8064 pa_output_pic_pcrel_sequence (xoperands);
8066 else
8068 /* Emit a long plabel-based call sequence. This is
8069 essentially an inline implementation of $$dyncall.
8070 We don't actually try to call $$dyncall as this is
8071 as difficult as calling the function itself. */
8072 xoperands[0] = pa_get_deferred_plabel (call_dest);
8073 xoperands[1] = gen_label_rtx ();
8075 /* Since the call is indirect, FP arguments in registers
8076 need to be copied to the general registers. Then, the
8077 argument relocation stub will copy them back. */
8078 if (TARGET_SOM)
8079 copy_fp_args (insn);
8081 if (flag_pic)
8083 output_asm_insn ("addil LT'%0,%%r19", xoperands);
8084 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
8085 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
8087 else
8089 output_asm_insn ("addil LR'%0-$global$,%%r27",
8090 xoperands);
8091 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
8092 xoperands);
8095 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
8096 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
8097 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
8098 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
8100 if (!sibcall && !TARGET_PA_20)
8102 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
8103 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8104 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
8105 else
8106 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
8110 if (TARGET_PA_20)
8112 if (sibcall)
8113 output_asm_insn ("bve (%%r1)", xoperands);
8114 else
8116 if (indirect_call)
8118 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
8119 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
8120 seq_length = 1;
8122 else
8123 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
8126 else
8128 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
8129 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
8130 xoperands);
8132 if (sibcall)
8134 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8135 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
8136 else
8137 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
8139 else
8141 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8142 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
8143 else
8144 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
8146 if (indirect_call)
8147 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
8148 else
8149 output_asm_insn ("copy %%r31,%%r2", xoperands);
8150 seq_length = 1;
8157 if (seq_length == 0)
8158 output_asm_insn ("nop", xoperands);
8160 return "";
8163 /* Return the attribute length of the indirect call instruction INSN.
8164 The length must match the code generated by output_indirect call.
8165 The returned length includes the delay slot. Currently, the delay
8166 slot of an indirect call sequence is not exposed and it is used by
8167 the sequence itself. */
8170 pa_attr_length_indirect_call (rtx_insn *insn)
8172 unsigned long distance = -1;
8173 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
8175 if (INSN_ADDRESSES_SET_P ())
8177 distance = (total + insn_current_reference_address (insn));
8178 if (distance < total)
8179 distance = -1;
8182 if (TARGET_64BIT)
8183 return 12;
8185 if (TARGET_FAST_INDIRECT_CALLS)
8186 return 8;
8188 if (TARGET_PORTABLE_RUNTIME)
8189 return 16;
8191 /* Inline version of $$dyncall. */
8192 if ((TARGET_NO_SPACE_REGS || TARGET_PA_20) && !optimize_size)
8193 return 20;
8195 if (!TARGET_LONG_CALLS
8196 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
8197 || distance < MAX_PCREL17F_OFFSET))
8198 return 8;
8200 /* Out of reach, can use ble. */
8201 if (!flag_pic)
8202 return 12;
8204 /* Inline version of $$dyncall. */
8205 if (TARGET_NO_SPACE_REGS || TARGET_PA_20)
8206 return 20;
8208 if (!optimize_size)
8209 return 36;
8211 /* Long PIC pc-relative call. */
8212 return 20;
8215 const char *
8216 pa_output_indirect_call (rtx_insn *insn, rtx call_dest)
8218 rtx xoperands[4];
8219 int length;
8221 if (TARGET_64BIT)
8223 xoperands[0] = call_dest;
8224 output_asm_insn ("ldd 16(%0),%%r2\n\t"
8225 "bve,l (%%r2),%%r2\n\t"
8226 "ldd 24(%0),%%r27", xoperands);
8227 return "";
8230 /* First the special case for kernels, level 0 systems, etc. */
8231 if (TARGET_FAST_INDIRECT_CALLS)
8233 pa_output_arg_descriptor (insn);
8234 if (TARGET_PA_20)
8235 return "bve,l,n (%%r22),%%r2\n\tnop";
8236 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8239 if (TARGET_PORTABLE_RUNTIME)
8241 output_asm_insn ("ldil L'$$dyncall,%%r31\n\t"
8242 "ldo R'$$dyncall(%%r31),%%r31", xoperands);
8243 pa_output_arg_descriptor (insn);
8244 return "blr %%r0,%%r2\n\tbv,n %%r0(%%r31)";
8247 /* Maybe emit a fast inline version of $$dyncall. */
8248 if ((TARGET_NO_SPACE_REGS || TARGET_PA_20) && !optimize_size)
8250 output_asm_insn ("bb,>=,n %%r22,30,.+12\n\t"
8251 "ldw 2(%%r22),%%r19\n\t"
8252 "ldw -2(%%r22),%%r22", xoperands);
8253 pa_output_arg_descriptor (insn);
8254 if (TARGET_NO_SPACE_REGS)
8256 if (TARGET_PA_20)
8257 return "bve,l,n (%%r22),%%r2\n\tnop";
8258 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8260 return "bve,l (%%r22),%%r2\n\tstw %%r2,-24(%%sp)";
8263 /* Now the normal case -- we can reach $$dyncall directly or
8264 we're sure that we can get there via a long-branch stub.
8266 No need to check target flags as the length uniquely identifies
8267 the remaining cases. */
8268 length = pa_attr_length_indirect_call (insn);
8269 if (length == 8)
8271 pa_output_arg_descriptor (insn);
8273 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
8274 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
8275 variant of the B,L instruction can't be used on the SOM target. */
8276 if (TARGET_PA_20 && !TARGET_SOM)
8277 return "b,l,n $$dyncall,%%r2\n\tnop";
8278 else
8279 return "bl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
8282 /* Long millicode call, but we are not generating PIC or portable runtime
8283 code. */
8284 if (length == 12)
8286 output_asm_insn ("ldil L'$$dyncall,%%r2", xoperands);
8287 pa_output_arg_descriptor (insn);
8288 return "ble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
8291 /* Maybe emit a fast inline version of $$dyncall. The long PIC
8292 pc-relative call sequence is five instructions. The inline PA 2.0
8293 version of $$dyncall is also five instructions. The PA 1.X versions
8294 are longer but still an overall win. */
8295 if (TARGET_NO_SPACE_REGS || TARGET_PA_20 || !optimize_size)
8297 output_asm_insn ("bb,>=,n %%r22,30,.+12\n\t"
8298 "ldw 2(%%r22),%%r19\n\t"
8299 "ldw -2(%%r22),%%r22", xoperands);
8300 if (TARGET_NO_SPACE_REGS)
8302 pa_output_arg_descriptor (insn);
8303 if (TARGET_PA_20)
8304 return "bve,l,n (%%r22),%%r2\n\tnop";
8305 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8307 if (TARGET_PA_20)
8309 pa_output_arg_descriptor (insn);
8310 return "bve,l (%%r22),%%r2\n\tstw %%r2,-24(%%sp)";
8312 output_asm_insn ("bl .+8,%%r2\n\t"
8313 "ldo 16(%%r2),%%r2\n\t"
8314 "ldsid (%%r22),%%r1\n\t"
8315 "mtsp %%r1,%%sr0", xoperands);
8316 pa_output_arg_descriptor (insn);
8317 return "be 0(%%sr0,%%r22)\n\tstw %%r2,-24(%%sp)";
8320 /* We need a long PIC call to $$dyncall. */
8321 xoperands[0] = gen_rtx_SYMBOL_REF (Pmode, "$$dyncall");
8322 xoperands[1] = gen_rtx_REG (Pmode, 2);
8323 xoperands[2] = gen_rtx_REG (Pmode, 1);
8324 pa_output_pic_pcrel_sequence (xoperands);
8325 pa_output_arg_descriptor (insn);
8326 return "bv %%r0(%%r1)\n\tldo {12|20}(%%r2),%%r2";
8329 /* In HPUX 8.0's shared library scheme, special relocations are needed
8330 for function labels if they might be passed to a function
8331 in a shared library (because shared libraries don't live in code
8332 space), and special magic is needed to construct their address. */
8334 void
8335 pa_encode_label (rtx sym)
8337 const char *str = XSTR (sym, 0);
8338 int len = strlen (str) + 1;
8339 char *newstr, *p;
8341 p = newstr = XALLOCAVEC (char, len + 1);
8342 *p++ = '@';
8343 strcpy (p, str);
8345 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
8348 static void
8349 pa_encode_section_info (tree decl, rtx rtl, int first)
8351 int old_referenced = 0;
8353 if (!first && MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
8354 old_referenced
8355 = SYMBOL_REF_FLAGS (XEXP (rtl, 0)) & SYMBOL_FLAG_REFERENCED;
8357 default_encode_section_info (decl, rtl, first);
8359 if (first && TEXT_SPACE_P (decl))
8361 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
8362 if (TREE_CODE (decl) == FUNCTION_DECL)
8363 pa_encode_label (XEXP (rtl, 0));
8365 else if (old_referenced)
8366 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= old_referenced;
8369 /* This is sort of inverse to pa_encode_section_info. */
8371 static const char *
8372 pa_strip_name_encoding (const char *str)
8374 str += (*str == '@');
8375 str += (*str == '*');
8376 return str;
8379 /* Returns 1 if OP is a function label involved in a simple addition
8380 with a constant. Used to keep certain patterns from matching
8381 during instruction combination. */
8383 pa_is_function_label_plus_const (rtx op)
8385 /* Strip off any CONST. */
8386 if (GET_CODE (op) == CONST)
8387 op = XEXP (op, 0);
8389 return (GET_CODE (op) == PLUS
8390 && function_label_operand (XEXP (op, 0), VOIDmode)
8391 && GET_CODE (XEXP (op, 1)) == CONST_INT);
8394 /* Output assembly code for a thunk to FUNCTION. */
8396 static void
8397 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
8398 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
8399 tree function)
8401 static unsigned int current_thunk_number;
8402 int val_14 = VAL_14_BITS_P (delta);
8403 unsigned int old_last_address = last_address, nbytes = 0;
8404 char label[17];
8405 rtx xoperands[4];
8407 xoperands[0] = XEXP (DECL_RTL (function), 0);
8408 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
8409 xoperands[2] = GEN_INT (delta);
8411 final_start_function (emit_barrier (), file, 1);
8413 /* Output the thunk. We know that the function is in the same
8414 translation unit (i.e., the same space) as the thunk, and that
8415 thunks are output after their method. Thus, we don't need an
8416 external branch to reach the function. With SOM and GAS,
8417 functions and thunks are effectively in different sections.
8418 Thus, we can always use a IA-relative branch and the linker
8419 will add a long branch stub if necessary.
8421 However, we have to be careful when generating PIC code on the
8422 SOM port to ensure that the sequence does not transfer to an
8423 import stub for the target function as this could clobber the
8424 return value saved at SP-24. This would also apply to the
8425 32-bit linux port if the multi-space model is implemented. */
8426 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8427 && !(flag_pic && TREE_PUBLIC (function))
8428 && (TARGET_GAS || last_address < 262132))
8429 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8430 && ((targetm_common.have_named_sections
8431 && DECL_SECTION_NAME (thunk_fndecl) != NULL
8432 /* The GNU 64-bit linker has rather poor stub management.
8433 So, we use a long branch from thunks that aren't in
8434 the same section as the target function. */
8435 && ((!TARGET_64BIT
8436 && (DECL_SECTION_NAME (thunk_fndecl)
8437 != DECL_SECTION_NAME (function)))
8438 || ((DECL_SECTION_NAME (thunk_fndecl)
8439 == DECL_SECTION_NAME (function))
8440 && last_address < 262132)))
8441 /* In this case, we need to be able to reach the start of
8442 the stub table even though the function is likely closer
8443 and can be jumped to directly. */
8444 || (targetm_common.have_named_sections
8445 && DECL_SECTION_NAME (thunk_fndecl) == NULL
8446 && DECL_SECTION_NAME (function) == NULL
8447 && total_code_bytes < MAX_PCREL17F_OFFSET)
8448 /* Likewise. */
8449 || (!targetm_common.have_named_sections
8450 && total_code_bytes < MAX_PCREL17F_OFFSET))))
8452 if (!val_14)
8453 output_asm_insn ("addil L'%2,%%r26", xoperands);
8455 output_asm_insn ("b %0", xoperands);
8457 if (val_14)
8459 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8460 nbytes += 8;
8462 else
8464 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8465 nbytes += 12;
8468 else if (TARGET_64BIT)
8470 rtx xop[4];
8472 /* We only have one call-clobbered scratch register, so we can't
8473 make use of the delay slot if delta doesn't fit in 14 bits. */
8474 if (!val_14)
8476 output_asm_insn ("addil L'%2,%%r26", xoperands);
8477 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8480 /* Load function address into %r1. */
8481 xop[0] = xoperands[0];
8482 xop[1] = gen_rtx_REG (Pmode, 1);
8483 xop[2] = xop[1];
8484 pa_output_pic_pcrel_sequence (xop);
8486 if (val_14)
8488 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8489 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8490 nbytes += 20;
8492 else
8494 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
8495 nbytes += 24;
8498 else if (TARGET_PORTABLE_RUNTIME)
8500 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8501 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
8503 if (!val_14)
8504 output_asm_insn ("ldil L'%2,%%r26", xoperands);
8506 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8508 if (val_14)
8510 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8511 nbytes += 16;
8513 else
8515 output_asm_insn ("ldo R'%2(%%r26),%%r26", xoperands);
8516 nbytes += 20;
8519 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8521 /* The function is accessible from outside this module. The only
8522 way to avoid an import stub between the thunk and function is to
8523 call the function directly with an indirect sequence similar to
8524 that used by $$dyncall. This is possible because $$dyncall acts
8525 as the import stub in an indirect call. */
8526 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
8527 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8528 output_asm_insn ("addil LT'%3,%%r19", xoperands);
8529 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
8530 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8531 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
8532 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
8533 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
8534 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8536 if (!val_14)
8538 output_asm_insn ("addil L'%2,%%r26", xoperands);
8539 nbytes += 4;
8542 if (TARGET_PA_20)
8544 output_asm_insn ("bve (%%r22)", xoperands);
8545 nbytes += 36;
8547 else if (TARGET_NO_SPACE_REGS)
8549 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
8550 nbytes += 36;
8552 else
8554 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
8555 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
8556 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
8557 nbytes += 44;
8560 if (val_14)
8561 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8562 else
8563 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8565 else if (flag_pic)
8567 rtx xop[4];
8569 /* Load function address into %r22. */
8570 xop[0] = xoperands[0];
8571 xop[1] = gen_rtx_REG (Pmode, 1);
8572 xop[2] = gen_rtx_REG (Pmode, 22);
8573 pa_output_pic_pcrel_sequence (xop);
8575 if (!val_14)
8576 output_asm_insn ("addil L'%2,%%r26", xoperands);
8578 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8580 if (val_14)
8582 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8583 nbytes += 20;
8585 else
8587 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8588 nbytes += 24;
8591 else
8593 if (!val_14)
8594 output_asm_insn ("addil L'%2,%%r26", xoperands);
8596 output_asm_insn ("ldil L'%0,%%r22", xoperands);
8597 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
8599 if (val_14)
8601 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8602 nbytes += 12;
8604 else
8606 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8607 nbytes += 16;
8611 final_end_function ();
8613 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8615 switch_to_section (data_section);
8616 output_asm_insn (".align 4", xoperands);
8617 ASM_OUTPUT_LABEL (file, label);
8618 output_asm_insn (".word P'%0", xoperands);
8621 current_thunk_number++;
8622 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8623 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8624 last_address += nbytes;
8625 if (old_last_address > last_address)
8626 last_address = UINT_MAX;
8627 update_total_code_bytes (nbytes);
8630 /* Only direct calls to static functions are allowed to be sibling (tail)
8631 call optimized.
8633 This restriction is necessary because some linker generated stubs will
8634 store return pointers into rp' in some cases which might clobber a
8635 live value already in rp'.
8637 In a sibcall the current function and the target function share stack
8638 space. Thus if the path to the current function and the path to the
8639 target function save a value in rp', they save the value into the
8640 same stack slot, which has undesirable consequences.
8642 Because of the deferred binding nature of shared libraries any function
8643 with external scope could be in a different load module and thus require
8644 rp' to be saved when calling that function. So sibcall optimizations
8645 can only be safe for static function.
8647 Note that GCC never needs return value relocations, so we don't have to
8648 worry about static calls with return value relocations (which require
8649 saving rp').
8651 It is safe to perform a sibcall optimization when the target function
8652 will never return. */
8653 static bool
8654 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8656 if (TARGET_PORTABLE_RUNTIME)
8657 return false;
8659 /* Sibcalls are not ok because the arg pointer register is not a fixed
8660 register. This prevents the sibcall optimization from occurring. In
8661 addition, there are problems with stub placement using GNU ld. This
8662 is because a normal sibcall branch uses a 17-bit relocation while
8663 a regular call branch uses a 22-bit relocation. As a result, more
8664 care needs to be taken in the placement of long-branch stubs. */
8665 if (TARGET_64BIT)
8666 return false;
8668 /* Sibcalls are only ok within a translation unit. */
8669 return (decl && !TREE_PUBLIC (decl));
8672 /* ??? Addition is not commutative on the PA due to the weird implicit
8673 space register selection rules for memory addresses. Therefore, we
8674 don't consider a + b == b + a, as this might be inside a MEM. */
8675 static bool
8676 pa_commutative_p (const_rtx x, int outer_code)
8678 return (COMMUTATIVE_P (x)
8679 && (TARGET_NO_SPACE_REGS
8680 || (outer_code != UNKNOWN && outer_code != MEM)
8681 || GET_CODE (x) != PLUS));
8684 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8685 use in fmpyadd instructions. */
8687 pa_fmpyaddoperands (rtx *operands)
8689 machine_mode mode = GET_MODE (operands[0]);
8691 /* Must be a floating point mode. */
8692 if (mode != SFmode && mode != DFmode)
8693 return 0;
8695 /* All modes must be the same. */
8696 if (! (mode == GET_MODE (operands[1])
8697 && mode == GET_MODE (operands[2])
8698 && mode == GET_MODE (operands[3])
8699 && mode == GET_MODE (operands[4])
8700 && mode == GET_MODE (operands[5])))
8701 return 0;
8703 /* All operands must be registers. */
8704 if (! (GET_CODE (operands[1]) == REG
8705 && GET_CODE (operands[2]) == REG
8706 && GET_CODE (operands[3]) == REG
8707 && GET_CODE (operands[4]) == REG
8708 && GET_CODE (operands[5]) == REG))
8709 return 0;
8711 /* Only 2 real operands to the addition. One of the input operands must
8712 be the same as the output operand. */
8713 if (! rtx_equal_p (operands[3], operands[4])
8714 && ! rtx_equal_p (operands[3], operands[5]))
8715 return 0;
8717 /* Inout operand of add cannot conflict with any operands from multiply. */
8718 if (rtx_equal_p (operands[3], operands[0])
8719 || rtx_equal_p (operands[3], operands[1])
8720 || rtx_equal_p (operands[3], operands[2]))
8721 return 0;
8723 /* multiply cannot feed into addition operands. */
8724 if (rtx_equal_p (operands[4], operands[0])
8725 || rtx_equal_p (operands[5], operands[0]))
8726 return 0;
8728 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8729 if (mode == SFmode
8730 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8731 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8732 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8733 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8734 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8735 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8736 return 0;
8738 /* Passed. Operands are suitable for fmpyadd. */
8739 return 1;
8742 #if !defined(USE_COLLECT2)
8743 static void
8744 pa_asm_out_constructor (rtx symbol, int priority)
8746 if (!function_label_operand (symbol, VOIDmode))
8747 pa_encode_label (symbol);
8749 #ifdef CTORS_SECTION_ASM_OP
8750 default_ctor_section_asm_out_constructor (symbol, priority);
8751 #else
8752 # ifdef TARGET_ASM_NAMED_SECTION
8753 default_named_section_asm_out_constructor (symbol, priority);
8754 # else
8755 default_stabs_asm_out_constructor (symbol, priority);
8756 # endif
8757 #endif
8760 static void
8761 pa_asm_out_destructor (rtx symbol, int priority)
8763 if (!function_label_operand (symbol, VOIDmode))
8764 pa_encode_label (symbol);
8766 #ifdef DTORS_SECTION_ASM_OP
8767 default_dtor_section_asm_out_destructor (symbol, priority);
8768 #else
8769 # ifdef TARGET_ASM_NAMED_SECTION
8770 default_named_section_asm_out_destructor (symbol, priority);
8771 # else
8772 default_stabs_asm_out_destructor (symbol, priority);
8773 # endif
8774 #endif
8776 #endif
8778 /* This function places uninitialized global data in the bss section.
8779 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8780 function on the SOM port to prevent uninitialized global data from
8781 being placed in the data section. */
8783 void
8784 pa_asm_output_aligned_bss (FILE *stream,
8785 const char *name,
8786 unsigned HOST_WIDE_INT size,
8787 unsigned int align)
8789 switch_to_section (bss_section);
8790 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8792 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8793 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8794 #endif
8796 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8797 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8798 #endif
8800 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8801 ASM_OUTPUT_LABEL (stream, name);
8802 fprintf (stream, "\t.block " HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8805 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8806 that doesn't allow the alignment of global common storage to be directly
8807 specified. The SOM linker aligns common storage based on the rounded
8808 value of the NUM_BYTES parameter in the .comm directive. It's not
8809 possible to use the .align directive as it doesn't affect the alignment
8810 of the label associated with a .comm directive. */
8812 void
8813 pa_asm_output_aligned_common (FILE *stream,
8814 const char *name,
8815 unsigned HOST_WIDE_INT size,
8816 unsigned int align)
8818 unsigned int max_common_align;
8820 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8821 if (align > max_common_align)
8823 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8824 "for global common data. Using %u",
8825 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8826 align = max_common_align;
8829 switch_to_section (bss_section);
8831 assemble_name (stream, name);
8832 fprintf (stream, "\t.comm " HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8833 MAX (size, align / BITS_PER_UNIT));
8836 /* We can't use .comm for local common storage as the SOM linker effectively
8837 treats the symbol as universal and uses the same storage for local symbols
8838 with the same name in different object files. The .block directive
8839 reserves an uninitialized block of storage. However, it's not common
8840 storage. Fortunately, GCC never requests common storage with the same
8841 name in any given translation unit. */
8843 void
8844 pa_asm_output_aligned_local (FILE *stream,
8845 const char *name,
8846 unsigned HOST_WIDE_INT size,
8847 unsigned int align)
8849 switch_to_section (bss_section);
8850 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8852 #ifdef LOCAL_ASM_OP
8853 fprintf (stream, "%s", LOCAL_ASM_OP);
8854 assemble_name (stream, name);
8855 fprintf (stream, "\n");
8856 #endif
8858 ASM_OUTPUT_LABEL (stream, name);
8859 fprintf (stream, "\t.block " HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8862 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8863 use in fmpysub instructions. */
8865 pa_fmpysuboperands (rtx *operands)
8867 machine_mode mode = GET_MODE (operands[0]);
8869 /* Must be a floating point mode. */
8870 if (mode != SFmode && mode != DFmode)
8871 return 0;
8873 /* All modes must be the same. */
8874 if (! (mode == GET_MODE (operands[1])
8875 && mode == GET_MODE (operands[2])
8876 && mode == GET_MODE (operands[3])
8877 && mode == GET_MODE (operands[4])
8878 && mode == GET_MODE (operands[5])))
8879 return 0;
8881 /* All operands must be registers. */
8882 if (! (GET_CODE (operands[1]) == REG
8883 && GET_CODE (operands[2]) == REG
8884 && GET_CODE (operands[3]) == REG
8885 && GET_CODE (operands[4]) == REG
8886 && GET_CODE (operands[5]) == REG))
8887 return 0;
8889 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8890 operation, so operands[4] must be the same as operand[3]. */
8891 if (! rtx_equal_p (operands[3], operands[4]))
8892 return 0;
8894 /* multiply cannot feed into subtraction. */
8895 if (rtx_equal_p (operands[5], operands[0]))
8896 return 0;
8898 /* Inout operand of sub cannot conflict with any operands from multiply. */
8899 if (rtx_equal_p (operands[3], operands[0])
8900 || rtx_equal_p (operands[3], operands[1])
8901 || rtx_equal_p (operands[3], operands[2]))
8902 return 0;
8904 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8905 if (mode == SFmode
8906 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8907 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8908 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8909 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8910 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8911 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8912 return 0;
8914 /* Passed. Operands are suitable for fmpysub. */
8915 return 1;
8918 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8919 constants for a MULT embedded inside a memory address. */
8921 pa_mem_shadd_constant_p (int val)
8923 if (val == 2 || val == 4 || val == 8)
8924 return 1;
8925 else
8926 return 0;
8929 /* Return 1 if the given constant is 1, 2, or 3. These are the valid
8930 constants for shadd instructions. */
8932 pa_shadd_constant_p (int val)
8934 if (val == 1 || val == 2 || val == 3)
8935 return 1;
8936 else
8937 return 0;
8940 /* Return TRUE if INSN branches forward. */
8942 static bool
8943 forward_branch_p (rtx_insn *insn)
8945 rtx lab = JUMP_LABEL (insn);
8947 /* The INSN must have a jump label. */
8948 gcc_assert (lab != NULL_RTX);
8950 if (INSN_ADDRESSES_SET_P ())
8951 return INSN_ADDRESSES (INSN_UID (lab)) > INSN_ADDRESSES (INSN_UID (insn));
8953 while (insn)
8955 if (insn == lab)
8956 return true;
8957 else
8958 insn = NEXT_INSN (insn);
8961 return false;
8964 /* Output an unconditional move and branch insn. */
8966 const char *
8967 pa_output_parallel_movb (rtx *operands, rtx_insn *insn)
8969 int length = get_attr_length (insn);
8971 /* These are the cases in which we win. */
8972 if (length == 4)
8973 return "mov%I1b,tr %1,%0,%2";
8975 /* None of the following cases win, but they don't lose either. */
8976 if (length == 8)
8978 if (dbr_sequence_length () == 0)
8980 /* Nothing in the delay slot, fake it by putting the combined
8981 insn (the copy or add) in the delay slot of a bl. */
8982 if (GET_CODE (operands[1]) == CONST_INT)
8983 return "b %2\n\tldi %1,%0";
8984 else
8985 return "b %2\n\tcopy %1,%0";
8987 else
8989 /* Something in the delay slot, but we've got a long branch. */
8990 if (GET_CODE (operands[1]) == CONST_INT)
8991 return "ldi %1,%0\n\tb %2";
8992 else
8993 return "copy %1,%0\n\tb %2";
8997 if (GET_CODE (operands[1]) == CONST_INT)
8998 output_asm_insn ("ldi %1,%0", operands);
8999 else
9000 output_asm_insn ("copy %1,%0", operands);
9001 return pa_output_lbranch (operands[2], insn, 1);
9004 /* Output an unconditional add and branch insn. */
9006 const char *
9007 pa_output_parallel_addb (rtx *operands, rtx_insn *insn)
9009 int length = get_attr_length (insn);
9011 /* To make life easy we want operand0 to be the shared input/output
9012 operand and operand1 to be the readonly operand. */
9013 if (operands[0] == operands[1])
9014 operands[1] = operands[2];
9016 /* These are the cases in which we win. */
9017 if (length == 4)
9018 return "add%I1b,tr %1,%0,%3";
9020 /* None of the following cases win, but they don't lose either. */
9021 if (length == 8)
9023 if (dbr_sequence_length () == 0)
9024 /* Nothing in the delay slot, fake it by putting the combined
9025 insn (the copy or add) in the delay slot of a bl. */
9026 return "b %3\n\tadd%I1 %1,%0,%0";
9027 else
9028 /* Something in the delay slot, but we've got a long branch. */
9029 return "add%I1 %1,%0,%0\n\tb %3";
9032 output_asm_insn ("add%I1 %1,%0,%0", operands);
9033 return pa_output_lbranch (operands[3], insn, 1);
9036 /* We use this hook to perform a PA specific optimization which is difficult
9037 to do in earlier passes. */
9039 static void
9040 pa_reorg (void)
9042 remove_useless_addtr_insns (1);
9044 if (pa_cpu < PROCESSOR_8000)
9045 pa_combine_instructions ();
9048 /* The PA has a number of odd instructions which can perform multiple
9049 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
9050 it may be profitable to combine two instructions into one instruction
9051 with two outputs. It's not profitable PA2.0 machines because the
9052 two outputs would take two slots in the reorder buffers.
9054 This routine finds instructions which can be combined and combines
9055 them. We only support some of the potential combinations, and we
9056 only try common ways to find suitable instructions.
9058 * addb can add two registers or a register and a small integer
9059 and jump to a nearby (+-8k) location. Normally the jump to the
9060 nearby location is conditional on the result of the add, but by
9061 using the "true" condition we can make the jump unconditional.
9062 Thus addb can perform two independent operations in one insn.
9064 * movb is similar to addb in that it can perform a reg->reg
9065 or small immediate->reg copy and jump to a nearby (+-8k location).
9067 * fmpyadd and fmpysub can perform a FP multiply and either an
9068 FP add or FP sub if the operands of the multiply and add/sub are
9069 independent (there are other minor restrictions). Note both
9070 the fmpy and fadd/fsub can in theory move to better spots according
9071 to data dependencies, but for now we require the fmpy stay at a
9072 fixed location.
9074 * Many of the memory operations can perform pre & post updates
9075 of index registers. GCC's pre/post increment/decrement addressing
9076 is far too simple to take advantage of all the possibilities. This
9077 pass may not be suitable since those insns may not be independent.
9079 * comclr can compare two ints or an int and a register, nullify
9080 the following instruction and zero some other register. This
9081 is more difficult to use as it's harder to find an insn which
9082 will generate a comclr than finding something like an unconditional
9083 branch. (conditional moves & long branches create comclr insns).
9085 * Most arithmetic operations can conditionally skip the next
9086 instruction. They can be viewed as "perform this operation
9087 and conditionally jump to this nearby location" (where nearby
9088 is an insns away). These are difficult to use due to the
9089 branch length restrictions. */
9091 static void
9092 pa_combine_instructions (void)
9094 rtx_insn *anchor;
9096 /* This can get expensive since the basic algorithm is on the
9097 order of O(n^2) (or worse). Only do it for -O2 or higher
9098 levels of optimization. */
9099 if (optimize < 2)
9100 return;
9102 /* Walk down the list of insns looking for "anchor" insns which
9103 may be combined with "floating" insns. As the name implies,
9104 "anchor" instructions don't move, while "floating" insns may
9105 move around. */
9106 rtx par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
9107 rtx_insn *new_rtx = make_insn_raw (par);
9109 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
9111 enum attr_pa_combine_type anchor_attr;
9112 enum attr_pa_combine_type floater_attr;
9114 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
9115 Also ignore any special USE insns. */
9116 if ((! NONJUMP_INSN_P (anchor) && ! JUMP_P (anchor) && ! CALL_P (anchor))
9117 || GET_CODE (PATTERN (anchor)) == USE
9118 || GET_CODE (PATTERN (anchor)) == CLOBBER)
9119 continue;
9121 anchor_attr = get_attr_pa_combine_type (anchor);
9122 /* See if anchor is an insn suitable for combination. */
9123 if (anchor_attr == PA_COMBINE_TYPE_FMPY
9124 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
9125 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9126 && ! forward_branch_p (anchor)))
9128 rtx_insn *floater;
9130 for (floater = PREV_INSN (anchor);
9131 floater;
9132 floater = PREV_INSN (floater))
9134 if (NOTE_P (floater)
9135 || (NONJUMP_INSN_P (floater)
9136 && (GET_CODE (PATTERN (floater)) == USE
9137 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9138 continue;
9140 /* Anything except a regular INSN will stop our search. */
9141 if (! NONJUMP_INSN_P (floater))
9143 floater = NULL;
9144 break;
9147 /* See if FLOATER is suitable for combination with the
9148 anchor. */
9149 floater_attr = get_attr_pa_combine_type (floater);
9150 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9151 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9152 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9153 && floater_attr == PA_COMBINE_TYPE_FMPY))
9155 /* If ANCHOR and FLOATER can be combined, then we're
9156 done with this pass. */
9157 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9158 SET_DEST (PATTERN (floater)),
9159 XEXP (SET_SRC (PATTERN (floater)), 0),
9160 XEXP (SET_SRC (PATTERN (floater)), 1)))
9161 break;
9164 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9165 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
9167 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
9169 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9170 SET_DEST (PATTERN (floater)),
9171 XEXP (SET_SRC (PATTERN (floater)), 0),
9172 XEXP (SET_SRC (PATTERN (floater)), 1)))
9173 break;
9175 else
9177 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9178 SET_DEST (PATTERN (floater)),
9179 SET_SRC (PATTERN (floater)),
9180 SET_SRC (PATTERN (floater))))
9181 break;
9186 /* If we didn't find anything on the backwards scan try forwards. */
9187 if (!floater
9188 && (anchor_attr == PA_COMBINE_TYPE_FMPY
9189 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
9191 for (floater = anchor; floater; floater = NEXT_INSN (floater))
9193 if (NOTE_P (floater)
9194 || (NONJUMP_INSN_P (floater)
9195 && (GET_CODE (PATTERN (floater)) == USE
9196 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9198 continue;
9200 /* Anything except a regular INSN will stop our search. */
9201 if (! NONJUMP_INSN_P (floater))
9203 floater = NULL;
9204 break;
9207 /* See if FLOATER is suitable for combination with the
9208 anchor. */
9209 floater_attr = get_attr_pa_combine_type (floater);
9210 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9211 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9212 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9213 && floater_attr == PA_COMBINE_TYPE_FMPY))
9215 /* If ANCHOR and FLOATER can be combined, then we're
9216 done with this pass. */
9217 if (pa_can_combine_p (new_rtx, anchor, floater, 1,
9218 SET_DEST (PATTERN (floater)),
9219 XEXP (SET_SRC (PATTERN (floater)),
9221 XEXP (SET_SRC (PATTERN (floater)),
9222 1)))
9223 break;
9228 /* FLOATER will be nonzero if we found a suitable floating
9229 insn for combination with ANCHOR. */
9230 if (floater
9231 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9232 || anchor_attr == PA_COMBINE_TYPE_FMPY))
9234 /* Emit the new instruction and delete the old anchor. */
9235 rtvec vtemp = gen_rtvec (2, copy_rtx (PATTERN (anchor)),
9236 copy_rtx (PATTERN (floater)));
9237 rtx temp = gen_rtx_PARALLEL (VOIDmode, vtemp);
9238 emit_insn_before (temp, anchor);
9240 SET_INSN_DELETED (anchor);
9242 /* Emit a special USE insn for FLOATER, then delete
9243 the floating insn. */
9244 temp = copy_rtx (PATTERN (floater));
9245 emit_insn_before (gen_rtx_USE (VOIDmode, temp), floater);
9246 delete_insn (floater);
9248 continue;
9250 else if (floater
9251 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
9253 /* Emit the new_jump instruction and delete the old anchor. */
9254 rtvec vtemp = gen_rtvec (2, copy_rtx (PATTERN (anchor)),
9255 copy_rtx (PATTERN (floater)));
9256 rtx temp = gen_rtx_PARALLEL (VOIDmode, vtemp);
9257 temp = emit_jump_insn_before (temp, anchor);
9259 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
9260 SET_INSN_DELETED (anchor);
9262 /* Emit a special USE insn for FLOATER, then delete
9263 the floating insn. */
9264 temp = copy_rtx (PATTERN (floater));
9265 emit_insn_before (gen_rtx_USE (VOIDmode, temp), floater);
9266 delete_insn (floater);
9267 continue;
9273 static int
9274 pa_can_combine_p (rtx_insn *new_rtx, rtx_insn *anchor, rtx_insn *floater,
9275 int reversed, rtx dest,
9276 rtx src1, rtx src2)
9278 int insn_code_number;
9279 rtx_insn *start, *end;
9281 /* Create a PARALLEL with the patterns of ANCHOR and
9282 FLOATER, try to recognize it, then test constraints
9283 for the resulting pattern.
9285 If the pattern doesn't match or the constraints
9286 aren't met keep searching for a suitable floater
9287 insn. */
9288 XVECEXP (PATTERN (new_rtx), 0, 0) = PATTERN (anchor);
9289 XVECEXP (PATTERN (new_rtx), 0, 1) = PATTERN (floater);
9290 INSN_CODE (new_rtx) = -1;
9291 insn_code_number = recog_memoized (new_rtx);
9292 basic_block bb = BLOCK_FOR_INSN (anchor);
9293 if (insn_code_number < 0
9294 || (extract_insn (new_rtx),
9295 !constrain_operands (1, get_preferred_alternatives (new_rtx, bb))))
9296 return 0;
9298 if (reversed)
9300 start = anchor;
9301 end = floater;
9303 else
9305 start = floater;
9306 end = anchor;
9309 /* There's up to three operands to consider. One
9310 output and two inputs.
9312 The output must not be used between FLOATER & ANCHOR
9313 exclusive. The inputs must not be set between
9314 FLOATER and ANCHOR exclusive. */
9316 if (reg_used_between_p (dest, start, end))
9317 return 0;
9319 if (reg_set_between_p (src1, start, end))
9320 return 0;
9322 if (reg_set_between_p (src2, start, end))
9323 return 0;
9325 /* If we get here, then everything is good. */
9326 return 1;
9329 /* Return nonzero if references for INSN are delayed.
9331 Millicode insns are actually function calls with some special
9332 constraints on arguments and register usage.
9334 Millicode calls always expect their arguments in the integer argument
9335 registers, and always return their result in %r29 (ret1). They
9336 are expected to clobber their arguments, %r1, %r29, and the return
9337 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9339 This function tells reorg that the references to arguments and
9340 millicode calls do not appear to happen until after the millicode call.
9341 This allows reorg to put insns which set the argument registers into the
9342 delay slot of the millicode call -- thus they act more like traditional
9343 CALL_INSNs.
9345 Note we cannot consider side effects of the insn to be delayed because
9346 the branch and link insn will clobber the return pointer. If we happened
9347 to use the return pointer in the delay slot of the call, then we lose.
9349 get_attr_type will try to recognize the given insn, so make sure to
9350 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9351 in particular. */
9353 pa_insn_refs_are_delayed (rtx_insn *insn)
9355 return ((NONJUMP_INSN_P (insn)
9356 && GET_CODE (PATTERN (insn)) != SEQUENCE
9357 && GET_CODE (PATTERN (insn)) != USE
9358 && GET_CODE (PATTERN (insn)) != CLOBBER
9359 && get_attr_type (insn) == TYPE_MILLI));
9362 /* Promote the return value, but not the arguments. */
9364 static machine_mode
9365 pa_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
9366 machine_mode mode,
9367 int *punsignedp ATTRIBUTE_UNUSED,
9368 const_tree fntype ATTRIBUTE_UNUSED,
9369 int for_return)
9371 if (for_return == 0)
9372 return mode;
9373 return promote_mode (type, mode, punsignedp);
9376 /* On the HP-PA the value is found in register(s) 28(-29), unless
9377 the mode is SF or DF. Then the value is returned in fr4 (32).
9379 This must perform the same promotions as PROMOTE_MODE, else promoting
9380 return values in TARGET_PROMOTE_FUNCTION_MODE will not work correctly.
9382 Small structures must be returned in a PARALLEL on PA64 in order
9383 to match the HP Compiler ABI. */
9385 static rtx
9386 pa_function_value (const_tree valtype,
9387 const_tree func ATTRIBUTE_UNUSED,
9388 bool outgoing ATTRIBUTE_UNUSED)
9390 machine_mode valmode;
9392 if (AGGREGATE_TYPE_P (valtype)
9393 || TREE_CODE (valtype) == COMPLEX_TYPE
9394 || TREE_CODE (valtype) == VECTOR_TYPE)
9396 HOST_WIDE_INT valsize = int_size_in_bytes (valtype);
9398 /* Handle aggregates that fit exactly in a word or double word. */
9399 if ((valsize & (UNITS_PER_WORD - 1)) == 0)
9400 return gen_rtx_REG (TYPE_MODE (valtype), 28);
9402 if (TARGET_64BIT)
9404 /* Aggregates with a size less than or equal to 128 bits are
9405 returned in GR 28(-29). They are left justified. The pad
9406 bits are undefined. Larger aggregates are returned in
9407 memory. */
9408 rtx loc[2];
9409 int i, offset = 0;
9410 int ub = valsize <= UNITS_PER_WORD ? 1 : 2;
9412 for (i = 0; i < ub; i++)
9414 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9415 gen_rtx_REG (DImode, 28 + i),
9416 GEN_INT (offset));
9417 offset += 8;
9420 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9422 else if (valsize > UNITS_PER_WORD)
9424 /* Aggregates 5 to 8 bytes in size are returned in general
9425 registers r28-r29 in the same manner as other non
9426 floating-point objects. The data is right-justified and
9427 zero-extended to 64 bits. This is opposite to the normal
9428 justification used on big endian targets and requires
9429 special treatment. */
9430 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9431 gen_rtx_REG (DImode, 28), const0_rtx);
9432 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9436 if ((INTEGRAL_TYPE_P (valtype)
9437 && GET_MODE_BITSIZE (TYPE_MODE (valtype)) < BITS_PER_WORD)
9438 || POINTER_TYPE_P (valtype))
9439 valmode = word_mode;
9440 else
9441 valmode = TYPE_MODE (valtype);
9443 if (TREE_CODE (valtype) == REAL_TYPE
9444 && !AGGREGATE_TYPE_P (valtype)
9445 && TYPE_MODE (valtype) != TFmode
9446 && !TARGET_SOFT_FLOAT)
9447 return gen_rtx_REG (valmode, 32);
9449 return gen_rtx_REG (valmode, 28);
9452 /* Implement the TARGET_LIBCALL_VALUE hook. */
9454 static rtx
9455 pa_libcall_value (machine_mode mode,
9456 const_rtx fun ATTRIBUTE_UNUSED)
9458 if (! TARGET_SOFT_FLOAT
9459 && (mode == SFmode || mode == DFmode))
9460 return gen_rtx_REG (mode, 32);
9461 else
9462 return gen_rtx_REG (mode, 28);
9465 /* Implement the TARGET_FUNCTION_VALUE_REGNO_P hook. */
9467 static bool
9468 pa_function_value_regno_p (const unsigned int regno)
9470 if (regno == 28
9471 || (! TARGET_SOFT_FLOAT && regno == 32))
9472 return true;
9474 return false;
9477 /* Update the data in CUM to advance over an argument
9478 of mode MODE and data type TYPE.
9479 (TYPE is null for libcalls where that information may not be available.) */
9481 static void
9482 pa_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
9483 const_tree type, bool named ATTRIBUTE_UNUSED)
9485 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9486 int arg_size = FUNCTION_ARG_SIZE (mode, type);
9488 cum->nargs_prototype--;
9489 cum->words += (arg_size
9490 + ((cum->words & 01)
9491 && type != NULL_TREE
9492 && arg_size > 1));
9495 /* Return the location of a parameter that is passed in a register or NULL
9496 if the parameter has any component that is passed in memory.
9498 This is new code and will be pushed to into the net sources after
9499 further testing.
9501 ??? We might want to restructure this so that it looks more like other
9502 ports. */
9503 static rtx
9504 pa_function_arg (cumulative_args_t cum_v, machine_mode mode,
9505 const_tree type, bool named ATTRIBUTE_UNUSED)
9507 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9508 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9509 int alignment = 0;
9510 int arg_size;
9511 int fpr_reg_base;
9512 int gpr_reg_base;
9513 rtx retval;
9515 if (mode == VOIDmode)
9516 return NULL_RTX;
9518 arg_size = FUNCTION_ARG_SIZE (mode, type);
9520 /* If this arg would be passed partially or totally on the stack, then
9521 this routine should return zero. pa_arg_partial_bytes will
9522 handle arguments which are split between regs and stack slots if
9523 the ABI mandates split arguments. */
9524 if (!TARGET_64BIT)
9526 /* The 32-bit ABI does not split arguments. */
9527 if (cum->words + arg_size > max_arg_words)
9528 return NULL_RTX;
9530 else
9532 if (arg_size > 1)
9533 alignment = cum->words & 1;
9534 if (cum->words + alignment >= max_arg_words)
9535 return NULL_RTX;
9538 /* The 32bit ABIs and the 64bit ABIs are rather different,
9539 particularly in their handling of FP registers. We might
9540 be able to cleverly share code between them, but I'm not
9541 going to bother in the hope that splitting them up results
9542 in code that is more easily understood. */
9544 if (TARGET_64BIT)
9546 /* Advance the base registers to their current locations.
9548 Remember, gprs grow towards smaller register numbers while
9549 fprs grow to higher register numbers. Also remember that
9550 although FP regs are 32-bit addressable, we pretend that
9551 the registers are 64-bits wide. */
9552 gpr_reg_base = 26 - cum->words;
9553 fpr_reg_base = 32 + cum->words;
9555 /* Arguments wider than one word and small aggregates need special
9556 treatment. */
9557 if (arg_size > 1
9558 || mode == BLKmode
9559 || (type && (AGGREGATE_TYPE_P (type)
9560 || TREE_CODE (type) == COMPLEX_TYPE
9561 || TREE_CODE (type) == VECTOR_TYPE)))
9563 /* Double-extended precision (80-bit), quad-precision (128-bit)
9564 and aggregates including complex numbers are aligned on
9565 128-bit boundaries. The first eight 64-bit argument slots
9566 are associated one-to-one, with general registers r26
9567 through r19, and also with floating-point registers fr4
9568 through fr11. Arguments larger than one word are always
9569 passed in general registers.
9571 Using a PARALLEL with a word mode register results in left
9572 justified data on a big-endian target. */
9574 rtx loc[8];
9575 int i, offset = 0, ub = arg_size;
9577 /* Align the base register. */
9578 gpr_reg_base -= alignment;
9580 ub = MIN (ub, max_arg_words - cum->words - alignment);
9581 for (i = 0; i < ub; i++)
9583 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9584 gen_rtx_REG (DImode, gpr_reg_base),
9585 GEN_INT (offset));
9586 gpr_reg_base -= 1;
9587 offset += 8;
9590 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9593 else
9595 /* If the argument is larger than a word, then we know precisely
9596 which registers we must use. */
9597 if (arg_size > 1)
9599 if (cum->words)
9601 gpr_reg_base = 23;
9602 fpr_reg_base = 38;
9604 else
9606 gpr_reg_base = 25;
9607 fpr_reg_base = 34;
9610 /* Structures 5 to 8 bytes in size are passed in the general
9611 registers in the same manner as other non floating-point
9612 objects. The data is right-justified and zero-extended
9613 to 64 bits. This is opposite to the normal justification
9614 used on big endian targets and requires special treatment.
9615 We now define BLOCK_REG_PADDING to pad these objects.
9616 Aggregates, complex and vector types are passed in the same
9617 manner as structures. */
9618 if (mode == BLKmode
9619 || (type && (AGGREGATE_TYPE_P (type)
9620 || TREE_CODE (type) == COMPLEX_TYPE
9621 || TREE_CODE (type) == VECTOR_TYPE)))
9623 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9624 gen_rtx_REG (DImode, gpr_reg_base),
9625 const0_rtx);
9626 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9629 else
9631 /* We have a single word (32 bits). A simple computation
9632 will get us the register #s we need. */
9633 gpr_reg_base = 26 - cum->words;
9634 fpr_reg_base = 32 + 2 * cum->words;
9638 /* Determine if the argument needs to be passed in both general and
9639 floating point registers. */
9640 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9641 /* If we are doing soft-float with portable runtime, then there
9642 is no need to worry about FP regs. */
9643 && !TARGET_SOFT_FLOAT
9644 /* The parameter must be some kind of scalar float, else we just
9645 pass it in integer registers. */
9646 && GET_MODE_CLASS (mode) == MODE_FLOAT
9647 /* The target function must not have a prototype. */
9648 && cum->nargs_prototype <= 0
9649 /* libcalls do not need to pass items in both FP and general
9650 registers. */
9651 && type != NULL_TREE
9652 /* All this hair applies to "outgoing" args only. This includes
9653 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9654 && !cum->incoming)
9655 /* Also pass outgoing floating arguments in both registers in indirect
9656 calls with the 32 bit ABI and the HP assembler since there is no
9657 way to the specify argument locations in static functions. */
9658 || (!TARGET_64BIT
9659 && !TARGET_GAS
9660 && !cum->incoming
9661 && cum->indirect
9662 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9664 retval
9665 = gen_rtx_PARALLEL
9666 (mode,
9667 gen_rtvec (2,
9668 gen_rtx_EXPR_LIST (VOIDmode,
9669 gen_rtx_REG (mode, fpr_reg_base),
9670 const0_rtx),
9671 gen_rtx_EXPR_LIST (VOIDmode,
9672 gen_rtx_REG (mode, gpr_reg_base),
9673 const0_rtx)));
9675 else
9677 /* See if we should pass this parameter in a general register. */
9678 if (TARGET_SOFT_FLOAT
9679 /* Indirect calls in the normal 32bit ABI require all arguments
9680 to be passed in general registers. */
9681 || (!TARGET_PORTABLE_RUNTIME
9682 && !TARGET_64BIT
9683 && !TARGET_ELF32
9684 && cum->indirect)
9685 /* If the parameter is not a scalar floating-point parameter,
9686 then it belongs in GPRs. */
9687 || GET_MODE_CLASS (mode) != MODE_FLOAT
9688 /* Structure with single SFmode field belongs in GPR. */
9689 || (type && AGGREGATE_TYPE_P (type)))
9690 retval = gen_rtx_REG (mode, gpr_reg_base);
9691 else
9692 retval = gen_rtx_REG (mode, fpr_reg_base);
9694 return retval;
9697 /* Arguments larger than one word are double word aligned. */
9699 static unsigned int
9700 pa_function_arg_boundary (machine_mode mode, const_tree type)
9702 bool singleword = (type
9703 ? (integer_zerop (TYPE_SIZE (type))
9704 || !TREE_CONSTANT (TYPE_SIZE (type))
9705 || int_size_in_bytes (type) <= UNITS_PER_WORD)
9706 : GET_MODE_SIZE (mode) <= UNITS_PER_WORD);
9708 return singleword ? PARM_BOUNDARY : MAX_PARM_BOUNDARY;
9711 /* If this arg would be passed totally in registers or totally on the stack,
9712 then this routine should return zero. */
9714 static int
9715 pa_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
9716 tree type, bool named ATTRIBUTE_UNUSED)
9718 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9719 unsigned int max_arg_words = 8;
9720 unsigned int offset = 0;
9722 if (!TARGET_64BIT)
9723 return 0;
9725 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9726 offset = 1;
9728 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9729 /* Arg fits fully into registers. */
9730 return 0;
9731 else if (cum->words + offset >= max_arg_words)
9732 /* Arg fully on the stack. */
9733 return 0;
9734 else
9735 /* Arg is split. */
9736 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9740 /* A get_unnamed_section callback for switching to the text section.
9742 This function is only used with SOM. Because we don't support
9743 named subspaces, we can only create a new subspace or switch back
9744 to the default text subspace. */
9746 static void
9747 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9749 gcc_assert (TARGET_SOM);
9750 if (TARGET_GAS)
9752 if (cfun && cfun->machine && !cfun->machine->in_nsubspa)
9754 /* We only want to emit a .nsubspa directive once at the
9755 start of the function. */
9756 cfun->machine->in_nsubspa = 1;
9758 /* Create a new subspace for the text. This provides
9759 better stub placement and one-only functions. */
9760 if (cfun->decl
9761 && DECL_ONE_ONLY (cfun->decl)
9762 && !DECL_WEAK (cfun->decl))
9764 output_section_asm_op ("\t.SPACE $TEXT$\n"
9765 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9766 "ACCESS=44,SORT=24,COMDAT");
9767 return;
9770 else
9772 /* There isn't a current function or the body of the current
9773 function has been completed. So, we are changing to the
9774 text section to output debugging information. Thus, we
9775 need to forget that we are in the text section so that
9776 varasm.c will call us when text_section is selected again. */
9777 gcc_assert (!cfun || !cfun->machine
9778 || cfun->machine->in_nsubspa == 2);
9779 in_section = NULL;
9781 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9782 return;
9784 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9787 /* A get_unnamed_section callback for switching to comdat data
9788 sections. This function is only used with SOM. */
9790 static void
9791 som_output_comdat_data_section_asm_op (const void *data)
9793 in_section = NULL;
9794 output_section_asm_op (data);
9797 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9799 static void
9800 pa_som_asm_init_sections (void)
9802 text_section
9803 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9805 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9806 is not being generated. */
9807 som_readonly_data_section
9808 = get_unnamed_section (0, output_section_asm_op,
9809 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9811 /* When secondary definitions are not supported, SOM makes readonly
9812 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9813 the comdat flag. */
9814 som_one_only_readonly_data_section
9815 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9816 "\t.SPACE $TEXT$\n"
9817 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9818 "ACCESS=0x2c,SORT=16,COMDAT");
9821 /* When secondary definitions are not supported, SOM makes data one-only
9822 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9823 som_one_only_data_section
9824 = get_unnamed_section (SECTION_WRITE,
9825 som_output_comdat_data_section_asm_op,
9826 "\t.SPACE $PRIVATE$\n"
9827 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9828 "ACCESS=31,SORT=24,COMDAT");
9830 if (flag_tm)
9831 som_tm_clone_table_section
9832 = get_unnamed_section (0, output_section_asm_op,
9833 "\t.SPACE $PRIVATE$\n\t.SUBSPA $TM_CLONE_TABLE$");
9835 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9836 which reference data within the $TEXT$ space (for example constant
9837 strings in the $LIT$ subspace).
9839 The assemblers (GAS and HP as) both have problems with handling
9840 the difference of two symbols which is the other correct way to
9841 reference constant data during PIC code generation.
9843 So, there's no way to reference constant data which is in the
9844 $TEXT$ space during PIC generation. Instead place all constant
9845 data into the $PRIVATE$ subspace (this reduces sharing, but it
9846 works correctly). */
9847 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9849 /* We must not have a reference to an external symbol defined in a
9850 shared library in a readonly section, else the SOM linker will
9851 complain.
9853 So, we force exception information into the data section. */
9854 exception_section = data_section;
9857 /* Implement TARGET_ASM_TM_CLONE_TABLE_SECTION. */
9859 static section *
9860 pa_som_tm_clone_table_section (void)
9862 return som_tm_clone_table_section;
9865 /* On hpux10, the linker will give an error if we have a reference
9866 in the read-only data section to a symbol defined in a shared
9867 library. Therefore, expressions that might require a reloc can
9868 not be placed in the read-only data section. */
9870 static section *
9871 pa_select_section (tree exp, int reloc,
9872 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9874 if (TREE_CODE (exp) == VAR_DECL
9875 && TREE_READONLY (exp)
9876 && !TREE_THIS_VOLATILE (exp)
9877 && DECL_INITIAL (exp)
9878 && (DECL_INITIAL (exp) == error_mark_node
9879 || TREE_CONSTANT (DECL_INITIAL (exp)))
9880 && !reloc)
9882 if (TARGET_SOM
9883 && DECL_ONE_ONLY (exp)
9884 && !DECL_WEAK (exp))
9885 return som_one_only_readonly_data_section;
9886 else
9887 return readonly_data_section;
9889 else if (CONSTANT_CLASS_P (exp) && !reloc)
9890 return readonly_data_section;
9891 else if (TARGET_SOM
9892 && TREE_CODE (exp) == VAR_DECL
9893 && DECL_ONE_ONLY (exp)
9894 && !DECL_WEAK (exp))
9895 return som_one_only_data_section;
9896 else
9897 return data_section;
9900 /* Implement pa_reloc_rw_mask. */
9902 static int
9903 pa_reloc_rw_mask (void)
9905 /* We force (const (plus (symbol) (const_int))) to memory when the
9906 const_int doesn't fit in a 14-bit integer. The SOM linker can't
9907 handle this construct in read-only memory and we want to avoid
9908 this for ELF. So, we always force an RTX needing relocation to
9909 the data section. */
9910 return 3;
9913 static void
9914 pa_globalize_label (FILE *stream, const char *name)
9916 /* We only handle DATA objects here, functions are globalized in
9917 ASM_DECLARE_FUNCTION_NAME. */
9918 if (! FUNCTION_NAME_P (name))
9920 fputs ("\t.EXPORT ", stream);
9921 assemble_name (stream, name);
9922 fputs (",DATA\n", stream);
9926 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9928 static rtx
9929 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9930 int incoming ATTRIBUTE_UNUSED)
9932 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9935 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9937 bool
9938 pa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9940 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9941 PA64 ABI says that objects larger than 128 bits are returned in memory.
9942 Note, int_size_in_bytes can return -1 if the size of the object is
9943 variable or larger than the maximum value that can be expressed as
9944 a HOST_WIDE_INT. It can also return zero for an empty type. The
9945 simplest way to handle variable and empty types is to pass them in
9946 memory. This avoids problems in defining the boundaries of argument
9947 slots, allocating registers, etc. */
9948 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9949 || int_size_in_bytes (type) <= 0);
9952 /* Structure to hold declaration and name of external symbols that are
9953 emitted by GCC. We generate a vector of these symbols and output them
9954 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9955 This avoids putting out names that are never really used. */
9957 typedef struct GTY(()) extern_symbol
9959 tree decl;
9960 const char *name;
9961 } extern_symbol;
9963 /* Define gc'd vector type for extern_symbol. */
9965 /* Vector of extern_symbol pointers. */
9966 static GTY(()) vec<extern_symbol, va_gc> *extern_symbols;
9968 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9969 /* Mark DECL (name NAME) as an external reference (assembler output
9970 file FILE). This saves the names to output at the end of the file
9971 if actually referenced. */
9973 void
9974 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9976 gcc_assert (file == asm_out_file);
9977 extern_symbol p = {decl, name};
9978 vec_safe_push (extern_symbols, p);
9980 #endif
9982 /* Output text required at the end of an assembler file.
9983 This includes deferred plabels and .import directives for
9984 all external symbols that were actually referenced. */
9986 static void
9987 pa_file_end (void)
9989 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9990 unsigned int i;
9991 extern_symbol *p;
9993 if (!NO_DEFERRED_PROFILE_COUNTERS)
9994 output_deferred_profile_counters ();
9995 #endif
9997 output_deferred_plabels ();
9999 #ifdef ASM_OUTPUT_EXTERNAL_REAL
10000 for (i = 0; vec_safe_iterate (extern_symbols, i, &p); i++)
10002 tree decl = p->decl;
10004 if (!TREE_ASM_WRITTEN (decl)
10005 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
10006 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
10009 vec_free (extern_symbols);
10010 #endif
10012 if (NEED_INDICATE_EXEC_STACK)
10013 file_end_indicate_exec_stack ();
10016 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
10018 static bool
10019 pa_can_change_mode_class (machine_mode from, machine_mode to,
10020 reg_class_t rclass)
10022 if (from == to)
10023 return true;
10025 if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to))
10026 return true;
10028 /* Reject changes to/from modes with zero size. */
10029 if (!GET_MODE_SIZE (from) || !GET_MODE_SIZE (to))
10030 return false;
10032 /* Reject changes to/from complex and vector modes. */
10033 if (COMPLEX_MODE_P (from) || VECTOR_MODE_P (from)
10034 || COMPLEX_MODE_P (to) || VECTOR_MODE_P (to))
10035 return false;
10037 /* There is no way to load QImode or HImode values directly from memory
10038 to a FP register. SImode loads to the FP registers are not zero
10039 extended. On the 64-bit target, this conflicts with the definition
10040 of LOAD_EXTEND_OP. Thus, we can't allow changing between modes with
10041 different sizes in the floating-point registers. */
10042 if (MAYBE_FP_REG_CLASS_P (rclass))
10043 return false;
10045 /* TARGET_HARD_REGNO_MODE_OK places modes with sizes larger than a word
10046 in specific sets of registers. Thus, we cannot allow changing
10047 to a larger mode when it's larger than a word. */
10048 if (GET_MODE_SIZE (to) > UNITS_PER_WORD
10049 && GET_MODE_SIZE (to) > GET_MODE_SIZE (from))
10050 return false;
10052 return true;
10055 /* Implement TARGET_MODES_TIEABLE_P.
10057 We should return FALSE for QImode and HImode because these modes
10058 are not ok in the floating-point registers. However, this prevents
10059 tieing these modes to SImode and DImode in the general registers.
10060 So, this isn't a good idea. We rely on TARGET_HARD_REGNO_MODE_OK and
10061 TARGET_CAN_CHANGE_MODE_CLASS to prevent these modes from being used
10062 in the floating-point registers. */
10064 static bool
10065 pa_modes_tieable_p (machine_mode mode1, machine_mode mode2)
10067 /* Don't tie modes in different classes. */
10068 if (GET_MODE_CLASS (mode1) != GET_MODE_CLASS (mode2))
10069 return false;
10071 return true;
10075 /* Length in units of the trampoline instruction code. */
10077 #define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 32 : 40))
10080 /* Output assembler code for a block containing the constant parts
10081 of a trampoline, leaving space for the variable parts.\
10083 The trampoline sets the static chain pointer to STATIC_CHAIN_REGNUM
10084 and then branches to the specified routine.
10086 This code template is copied from text segment to stack location
10087 and then patched with pa_trampoline_init to contain valid values,
10088 and then entered as a subroutine.
10090 It is best to keep this as small as possible to avoid having to
10091 flush multiple lines in the cache. */
10093 static void
10094 pa_asm_trampoline_template (FILE *f)
10096 if (!TARGET_64BIT)
10098 fputs ("\tldw 36(%r22),%r21\n", f);
10099 fputs ("\tbb,>=,n %r21,30,.+16\n", f);
10100 if (ASSEMBLER_DIALECT == 0)
10101 fputs ("\tdepi 0,31,2,%r21\n", f);
10102 else
10103 fputs ("\tdepwi 0,31,2,%r21\n", f);
10104 fputs ("\tldw 4(%r21),%r19\n", f);
10105 fputs ("\tldw 0(%r21),%r21\n", f);
10106 if (TARGET_PA_20)
10108 fputs ("\tbve (%r21)\n", f);
10109 fputs ("\tldw 40(%r22),%r29\n", f);
10110 fputs ("\t.word 0\n", f);
10111 fputs ("\t.word 0\n", f);
10113 else
10115 fputs ("\tldsid (%r21),%r1\n", f);
10116 fputs ("\tmtsp %r1,%sr0\n", f);
10117 fputs ("\tbe 0(%sr0,%r21)\n", f);
10118 fputs ("\tldw 40(%r22),%r29\n", f);
10120 fputs ("\t.word 0\n", f);
10121 fputs ("\t.word 0\n", f);
10122 fputs ("\t.word 0\n", f);
10123 fputs ("\t.word 0\n", f);
10125 else
10127 fputs ("\t.dword 0\n", f);
10128 fputs ("\t.dword 0\n", f);
10129 fputs ("\t.dword 0\n", f);
10130 fputs ("\t.dword 0\n", f);
10131 fputs ("\tmfia %r31\n", f);
10132 fputs ("\tldd 24(%r31),%r1\n", f);
10133 fputs ("\tldd 24(%r1),%r27\n", f);
10134 fputs ("\tldd 16(%r1),%r1\n", f);
10135 fputs ("\tbve (%r1)\n", f);
10136 fputs ("\tldd 32(%r31),%r31\n", f);
10137 fputs ("\t.dword 0 ; fptr\n", f);
10138 fputs ("\t.dword 0 ; static link\n", f);
10142 /* Emit RTL insns to initialize the variable parts of a trampoline.
10143 FNADDR is an RTX for the address of the function's pure code.
10144 CXT is an RTX for the static chain value for the function.
10146 Move the function address to the trampoline template at offset 36.
10147 Move the static chain value to trampoline template at offset 40.
10148 Move the trampoline address to trampoline template at offset 44.
10149 Move r19 to trampoline template at offset 48. The latter two
10150 words create a plabel for the indirect call to the trampoline.
10152 A similar sequence is used for the 64-bit port but the plabel is
10153 at the beginning of the trampoline.
10155 Finally, the cache entries for the trampoline code are flushed.
10156 This is necessary to ensure that the trampoline instruction sequence
10157 is written to memory prior to any attempts at prefetching the code
10158 sequence. */
10160 static void
10161 pa_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
10163 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
10164 rtx start_addr = gen_reg_rtx (Pmode);
10165 rtx end_addr = gen_reg_rtx (Pmode);
10166 rtx line_length = gen_reg_rtx (Pmode);
10167 rtx r_tramp, tmp;
10169 emit_block_move (m_tramp, assemble_trampoline_template (),
10170 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
10171 r_tramp = force_reg (Pmode, XEXP (m_tramp, 0));
10173 if (!TARGET_64BIT)
10175 tmp = adjust_address (m_tramp, Pmode, 36);
10176 emit_move_insn (tmp, fnaddr);
10177 tmp = adjust_address (m_tramp, Pmode, 40);
10178 emit_move_insn (tmp, chain_value);
10180 /* Create a fat pointer for the trampoline. */
10181 tmp = adjust_address (m_tramp, Pmode, 44);
10182 emit_move_insn (tmp, r_tramp);
10183 tmp = adjust_address (m_tramp, Pmode, 48);
10184 emit_move_insn (tmp, gen_rtx_REG (Pmode, 19));
10186 /* fdc and fic only use registers for the address to flush,
10187 they do not accept integer displacements. We align the
10188 start and end addresses to the beginning of their respective
10189 cache lines to minimize the number of lines flushed. */
10190 emit_insn (gen_andsi3 (start_addr, r_tramp,
10191 GEN_INT (-MIN_CACHELINE_SIZE)));
10192 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp,
10193 TRAMPOLINE_CODE_SIZE-1));
10194 emit_insn (gen_andsi3 (end_addr, tmp,
10195 GEN_INT (-MIN_CACHELINE_SIZE)));
10196 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10197 emit_insn (gen_dcacheflushsi (start_addr, end_addr, line_length));
10198 emit_insn (gen_icacheflushsi (start_addr, end_addr, line_length,
10199 gen_reg_rtx (Pmode),
10200 gen_reg_rtx (Pmode)));
10202 else
10204 tmp = adjust_address (m_tramp, Pmode, 56);
10205 emit_move_insn (tmp, fnaddr);
10206 tmp = adjust_address (m_tramp, Pmode, 64);
10207 emit_move_insn (tmp, chain_value);
10209 /* Create a fat pointer for the trampoline. */
10210 tmp = adjust_address (m_tramp, Pmode, 16);
10211 emit_move_insn (tmp, force_reg (Pmode, plus_constant (Pmode,
10212 r_tramp, 32)));
10213 tmp = adjust_address (m_tramp, Pmode, 24);
10214 emit_move_insn (tmp, gen_rtx_REG (Pmode, 27));
10216 /* fdc and fic only use registers for the address to flush,
10217 they do not accept integer displacements. We align the
10218 start and end addresses to the beginning of their respective
10219 cache lines to minimize the number of lines flushed. */
10220 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp, 32));
10221 emit_insn (gen_anddi3 (start_addr, tmp,
10222 GEN_INT (-MIN_CACHELINE_SIZE)));
10223 tmp = force_reg (Pmode, plus_constant (Pmode, tmp,
10224 TRAMPOLINE_CODE_SIZE - 1));
10225 emit_insn (gen_anddi3 (end_addr, tmp,
10226 GEN_INT (-MIN_CACHELINE_SIZE)));
10227 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10228 emit_insn (gen_dcacheflushdi (start_addr, end_addr, line_length));
10229 emit_insn (gen_icacheflushdi (start_addr, end_addr, line_length,
10230 gen_reg_rtx (Pmode),
10231 gen_reg_rtx (Pmode)));
10234 #ifdef HAVE_ENABLE_EXECUTE_STACK
10235  emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
10236 LCT_NORMAL, VOIDmode, XEXP (m_tramp, 0), Pmode);
10237 #endif
10240 /* Perform any machine-specific adjustment in the address of the trampoline.
10241 ADDR contains the address that was passed to pa_trampoline_init.
10242 Adjust the trampoline address to point to the plabel at offset 44. */
10244 static rtx
10245 pa_trampoline_adjust_address (rtx addr)
10247 if (!TARGET_64BIT)
10248 addr = memory_address (Pmode, plus_constant (Pmode, addr, 46));
10249 return addr;
10252 static rtx
10253 pa_delegitimize_address (rtx orig_x)
10255 rtx x = delegitimize_mem_from_attrs (orig_x);
10257 if (GET_CODE (x) == LO_SUM
10258 && GET_CODE (XEXP (x, 1)) == UNSPEC
10259 && XINT (XEXP (x, 1), 1) == UNSPEC_DLTIND14R)
10260 return gen_const_mem (Pmode, XVECEXP (XEXP (x, 1), 0, 0));
10261 return x;
10264 static rtx
10265 pa_internal_arg_pointer (void)
10267 /* The argument pointer and the hard frame pointer are the same in
10268 the 32-bit runtime, so we don't need a copy. */
10269 if (TARGET_64BIT)
10270 return copy_to_reg (virtual_incoming_args_rtx);
10271 else
10272 return virtual_incoming_args_rtx;
10275 /* Given FROM and TO register numbers, say whether this elimination is allowed.
10276 Frame pointer elimination is automatically handled. */
10278 static bool
10279 pa_can_eliminate (const int from, const int to)
10281 /* The argument cannot be eliminated in the 64-bit runtime. */
10282 if (TARGET_64BIT && from == ARG_POINTER_REGNUM)
10283 return false;
10285 return (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
10286 ? ! frame_pointer_needed
10287 : true);
10290 /* Define the offset between two registers, FROM to be eliminated and its
10291 replacement TO, at the start of a routine. */
10292 HOST_WIDE_INT
10293 pa_initial_elimination_offset (int from, int to)
10295 HOST_WIDE_INT offset;
10297 if ((from == HARD_FRAME_POINTER_REGNUM || from == FRAME_POINTER_REGNUM)
10298 && to == STACK_POINTER_REGNUM)
10299 offset = -pa_compute_frame_size (get_frame_size (), 0);
10300 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10301 offset = 0;
10302 else
10303 gcc_unreachable ();
10305 return offset;
10308 static void
10309 pa_conditional_register_usage (void)
10311 int i;
10313 if (!TARGET_64BIT && !TARGET_PA_11)
10315 for (i = 56; i <= FP_REG_LAST; i++)
10316 fixed_regs[i] = call_used_regs[i] = 1;
10317 for (i = 33; i < 56; i += 2)
10318 fixed_regs[i] = call_used_regs[i] = 1;
10320 if (TARGET_DISABLE_FPREGS || TARGET_SOFT_FLOAT)
10322 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
10323 fixed_regs[i] = call_used_regs[i] = 1;
10325 if (flag_pic)
10326 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10329 /* Target hook for c_mode_for_suffix. */
10331 static machine_mode
10332 pa_c_mode_for_suffix (char suffix)
10334 if (HPUX_LONG_DOUBLE_LIBRARY)
10336 if (suffix == 'q')
10337 return TFmode;
10340 return VOIDmode;
10343 /* Target hook for function_section. */
10345 static section *
10346 pa_function_section (tree decl, enum node_frequency freq,
10347 bool startup, bool exit)
10349 /* Put functions in text section if target doesn't have named sections. */
10350 if (!targetm_common.have_named_sections)
10351 return text_section;
10353 /* Force nested functions into the same section as the containing
10354 function. */
10355 if (decl
10356 && DECL_SECTION_NAME (decl) == NULL
10357 && DECL_CONTEXT (decl) != NULL_TREE
10358 && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL
10359 && DECL_SECTION_NAME (DECL_CONTEXT (decl)) == NULL)
10360 return function_section (DECL_CONTEXT (decl));
10362 /* Otherwise, use the default function section. */
10363 return default_function_section (decl, freq, startup, exit);
10366 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
10368 In 64-bit mode, we reject CONST_DOUBLES. We also reject CONST_INTS
10369 that need more than three instructions to load prior to reload. This
10370 limit is somewhat arbitrary. It takes three instructions to load a
10371 CONST_INT from memory but two are memory accesses. It may be better
10372 to increase the allowed range for CONST_INTS. We may also be able
10373 to handle CONST_DOUBLES. */
10375 static bool
10376 pa_legitimate_constant_p (machine_mode mode, rtx x)
10378 if (GET_MODE_CLASS (mode) == MODE_FLOAT && x != CONST0_RTX (mode))
10379 return false;
10381 if (!NEW_HP_ASSEMBLER && !TARGET_GAS && GET_CODE (x) == LABEL_REF)
10382 return false;
10384 /* TLS_MODEL_GLOBAL_DYNAMIC and TLS_MODEL_LOCAL_DYNAMIC are not
10385 legitimate constants. The other variants can't be handled by
10386 the move patterns after reload starts. */
10387 if (tls_referenced_p (x))
10388 return false;
10390 if (TARGET_64BIT && GET_CODE (x) == CONST_DOUBLE)
10391 return false;
10393 if (TARGET_64BIT
10394 && HOST_BITS_PER_WIDE_INT > 32
10395 && GET_CODE (x) == CONST_INT
10396 && !reload_in_progress
10397 && !reload_completed
10398 && !LEGITIMATE_64BIT_CONST_INT_P (INTVAL (x))
10399 && !pa_cint_ok_for_move (UINTVAL (x)))
10400 return false;
10402 if (function_label_operand (x, mode))
10403 return false;
10405 return true;
10408 /* Implement TARGET_SECTION_TYPE_FLAGS. */
10410 static unsigned int
10411 pa_section_type_flags (tree decl, const char *name, int reloc)
10413 unsigned int flags;
10415 flags = default_section_type_flags (decl, name, reloc);
10417 /* Function labels are placed in the constant pool. This can
10418 cause a section conflict if decls are put in ".data.rel.ro"
10419 or ".data.rel.ro.local" using the __attribute__ construct. */
10420 if (strcmp (name, ".data.rel.ro") == 0
10421 || strcmp (name, ".data.rel.ro.local") == 0)
10422 flags |= SECTION_WRITE | SECTION_RELRO;
10424 return flags;
10427 /* pa_legitimate_address_p recognizes an RTL expression that is a
10428 valid memory address for an instruction. The MODE argument is the
10429 machine mode for the MEM expression that wants to use this address.
10431 On HP PA-RISC, the legitimate address forms are REG+SMALLINT,
10432 REG+REG, and REG+(REG*SCALE). The indexed address forms are only
10433 available with floating point loads and stores, and integer loads.
10434 We get better code by allowing indexed addresses in the initial
10435 RTL generation.
10437 The acceptance of indexed addresses as legitimate implies that we
10438 must provide patterns for doing indexed integer stores, or the move
10439 expanders must force the address of an indexed store to a register.
10440 We have adopted the latter approach.
10442 Another function of pa_legitimate_address_p is to ensure that
10443 the base register is a valid pointer for indexed instructions.
10444 On targets that have non-equivalent space registers, we have to
10445 know at the time of assembler output which register in a REG+REG
10446 pair is the base register. The REG_POINTER flag is sometimes lost
10447 in reload and the following passes, so it can't be relied on during
10448 code generation. Thus, we either have to canonicalize the order
10449 of the registers in REG+REG indexed addresses, or treat REG+REG
10450 addresses separately and provide patterns for both permutations.
10452 The latter approach requires several hundred additional lines of
10453 code in pa.md. The downside to canonicalizing is that a PLUS
10454 in the wrong order can't combine to form to make a scaled indexed
10455 memory operand. As we won't need to canonicalize the operands if
10456 the REG_POINTER lossage can be fixed, it seems better canonicalize.
10458 We initially break out scaled indexed addresses in canonical order
10459 in pa_emit_move_sequence. LEGITIMIZE_ADDRESS also canonicalizes
10460 scaled indexed addresses during RTL generation. However, fold_rtx
10461 has its own opinion on how the operands of a PLUS should be ordered.
10462 If one of the operands is equivalent to a constant, it will make
10463 that operand the second operand. As the base register is likely to
10464 be equivalent to a SYMBOL_REF, we have made it the second operand.
10466 pa_legitimate_address_p accepts REG+REG as legitimate when the
10467 operands are in the order INDEX+BASE on targets with non-equivalent
10468 space registers, and in any order on targets with equivalent space
10469 registers. It accepts both MULT+BASE and BASE+MULT for scaled indexing.
10471 We treat a SYMBOL_REF as legitimate if it is part of the current
10472 function's constant-pool, because such addresses can actually be
10473 output as REG+SMALLINT. */
10475 static bool
10476 pa_legitimate_address_p (machine_mode mode, rtx x, bool strict)
10478 if ((REG_P (x)
10479 && (strict ? STRICT_REG_OK_FOR_BASE_P (x)
10480 : REG_OK_FOR_BASE_P (x)))
10481 || ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_DEC
10482 || GET_CODE (x) == PRE_INC || GET_CODE (x) == POST_INC)
10483 && REG_P (XEXP (x, 0))
10484 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10485 : REG_OK_FOR_BASE_P (XEXP (x, 0)))))
10486 return true;
10488 if (GET_CODE (x) == PLUS)
10490 rtx base, index;
10492 /* For REG+REG, the base register should be in XEXP (x, 1),
10493 so check it first. */
10494 if (REG_P (XEXP (x, 1))
10495 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 1))
10496 : REG_OK_FOR_BASE_P (XEXP (x, 1))))
10497 base = XEXP (x, 1), index = XEXP (x, 0);
10498 else if (REG_P (XEXP (x, 0))
10499 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10500 : REG_OK_FOR_BASE_P (XEXP (x, 0))))
10501 base = XEXP (x, 0), index = XEXP (x, 1);
10502 else
10503 return false;
10505 if (GET_CODE (index) == CONST_INT)
10507 if (INT_5_BITS (index))
10508 return true;
10510 /* When INT14_OK_STRICT is false, a secondary reload is needed
10511 to adjust the displacement of SImode and DImode floating point
10512 instructions but this may fail when the register also needs
10513 reloading. So, we return false when STRICT is true. We
10514 also reject long displacements for float mode addresses since
10515 the majority of accesses will use floating point instructions
10516 that don't support 14-bit offsets. */
10517 if (!INT14_OK_STRICT
10518 && (strict || !(reload_in_progress || reload_completed))
10519 && mode != QImode
10520 && mode != HImode)
10521 return false;
10523 return base14_operand (index, mode);
10526 if (!TARGET_DISABLE_INDEXING
10527 /* Only accept the "canonical" INDEX+BASE operand order
10528 on targets with non-equivalent space registers. */
10529 && (TARGET_NO_SPACE_REGS
10530 ? REG_P (index)
10531 : (base == XEXP (x, 1) && REG_P (index)
10532 && (reload_completed
10533 || (reload_in_progress && HARD_REGISTER_P (base))
10534 || REG_POINTER (base))
10535 && (reload_completed
10536 || (reload_in_progress && HARD_REGISTER_P (index))
10537 || !REG_POINTER (index))))
10538 && MODE_OK_FOR_UNSCALED_INDEXING_P (mode)
10539 && (strict ? STRICT_REG_OK_FOR_INDEX_P (index)
10540 : REG_OK_FOR_INDEX_P (index))
10541 && borx_reg_operand (base, Pmode)
10542 && borx_reg_operand (index, Pmode))
10543 return true;
10545 if (!TARGET_DISABLE_INDEXING
10546 && GET_CODE (index) == MULT
10547 /* Only accept base operands with the REG_POINTER flag prior to
10548 reload on targets with non-equivalent space registers. */
10549 && (TARGET_NO_SPACE_REGS
10550 || (base == XEXP (x, 1)
10551 && (reload_completed
10552 || (reload_in_progress && HARD_REGISTER_P (base))
10553 || REG_POINTER (base))))
10554 && REG_P (XEXP (index, 0))
10555 && GET_MODE (XEXP (index, 0)) == Pmode
10556 && MODE_OK_FOR_SCALED_INDEXING_P (mode)
10557 && (strict ? STRICT_REG_OK_FOR_INDEX_P (XEXP (index, 0))
10558 : REG_OK_FOR_INDEX_P (XEXP (index, 0)))
10559 && GET_CODE (XEXP (index, 1)) == CONST_INT
10560 && INTVAL (XEXP (index, 1))
10561 == (HOST_WIDE_INT) GET_MODE_SIZE (mode)
10562 && borx_reg_operand (base, Pmode))
10563 return true;
10565 return false;
10568 if (GET_CODE (x) == LO_SUM)
10570 rtx y = XEXP (x, 0);
10572 if (GET_CODE (y) == SUBREG)
10573 y = SUBREG_REG (y);
10575 if (REG_P (y)
10576 && (strict ? STRICT_REG_OK_FOR_BASE_P (y)
10577 : REG_OK_FOR_BASE_P (y)))
10579 /* Needed for -fPIC */
10580 if (mode == Pmode
10581 && GET_CODE (XEXP (x, 1)) == UNSPEC)
10582 return true;
10584 if (!INT14_OK_STRICT
10585 && (strict || !(reload_in_progress || reload_completed))
10586 && mode != QImode
10587 && mode != HImode)
10588 return false;
10590 if (CONSTANT_P (XEXP (x, 1)))
10591 return true;
10593 return false;
10596 if (GET_CODE (x) == CONST_INT && INT_5_BITS (x))
10597 return true;
10599 return false;
10602 /* Look for machine dependent ways to make the invalid address AD a
10603 valid address.
10605 For the PA, transform:
10607 memory(X + <large int>)
10609 into:
10611 if (<large int> & mask) >= 16
10612 Y = (<large int> & ~mask) + mask + 1 Round up.
10613 else
10614 Y = (<large int> & ~mask) Round down.
10615 Z = X + Y
10616 memory (Z + (<large int> - Y));
10618 This makes reload inheritance and reload_cse work better since Z
10619 can be reused.
10621 There may be more opportunities to improve code with this hook. */
10624 pa_legitimize_reload_address (rtx ad, machine_mode mode,
10625 int opnum, int type,
10626 int ind_levels ATTRIBUTE_UNUSED)
10628 long offset, newoffset, mask;
10629 rtx new_rtx, temp = NULL_RTX;
10631 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
10632 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
10634 if (optimize && GET_CODE (ad) == PLUS)
10635 temp = simplify_binary_operation (PLUS, Pmode,
10636 XEXP (ad, 0), XEXP (ad, 1));
10638 new_rtx = temp ? temp : ad;
10640 if (optimize
10641 && GET_CODE (new_rtx) == PLUS
10642 && GET_CODE (XEXP (new_rtx, 0)) == REG
10643 && GET_CODE (XEXP (new_rtx, 1)) == CONST_INT)
10645 offset = INTVAL (XEXP ((new_rtx), 1));
10647 /* Choose rounding direction. Round up if we are >= halfway. */
10648 if ((offset & mask) >= ((mask + 1) / 2))
10649 newoffset = (offset & ~mask) + mask + 1;
10650 else
10651 newoffset = offset & ~mask;
10653 /* Ensure that long displacements are aligned. */
10654 if (mask == 0x3fff
10655 && (GET_MODE_CLASS (mode) == MODE_FLOAT
10656 || (TARGET_64BIT && (mode) == DImode)))
10657 newoffset &= ~(GET_MODE_SIZE (mode) - 1);
10659 if (newoffset != 0 && VAL_14_BITS_P (newoffset))
10661 temp = gen_rtx_PLUS (Pmode, XEXP (new_rtx, 0),
10662 GEN_INT (newoffset));
10663 ad = gen_rtx_PLUS (Pmode, temp, GEN_INT (offset - newoffset));
10664 push_reload (XEXP (ad, 0), 0, &XEXP (ad, 0), 0,
10665 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
10666 opnum, (enum reload_type) type);
10667 return ad;
10671 return NULL_RTX;
10674 /* Output address vector. */
10676 void
10677 pa_output_addr_vec (rtx lab, rtx body)
10679 int idx, vlen = XVECLEN (body, 0);
10681 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10682 if (TARGET_GAS)
10683 fputs ("\t.begin_brtab\n", asm_out_file);
10684 for (idx = 0; idx < vlen; idx++)
10686 ASM_OUTPUT_ADDR_VEC_ELT
10687 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10689 if (TARGET_GAS)
10690 fputs ("\t.end_brtab\n", asm_out_file);
10693 /* Output address difference vector. */
10695 void
10696 pa_output_addr_diff_vec (rtx lab, rtx body)
10698 rtx base = XEXP (XEXP (body, 0), 0);
10699 int idx, vlen = XVECLEN (body, 1);
10701 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10702 if (TARGET_GAS)
10703 fputs ("\t.begin_brtab\n", asm_out_file);
10704 for (idx = 0; idx < vlen; idx++)
10706 ASM_OUTPUT_ADDR_DIFF_ELT
10707 (asm_out_file,
10708 body,
10709 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
10710 CODE_LABEL_NUMBER (base));
10712 if (TARGET_GAS)
10713 fputs ("\t.end_brtab\n", asm_out_file);
10716 /* This is a helper function for the other atomic operations. This function
10717 emits a loop that contains SEQ that iterates until a compare-and-swap
10718 operation at the end succeeds. MEM is the memory to be modified. SEQ is
10719 a set of instructions that takes a value from OLD_REG as an input and
10720 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
10721 set to the current contents of MEM. After SEQ, a compare-and-swap will
10722 attempt to update MEM with NEW_REG. The function returns true when the
10723 loop was generated successfully. */
10725 static bool
10726 pa_expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
10728 machine_mode mode = GET_MODE (mem);
10729 rtx_code_label *label;
10730 rtx cmp_reg, success, oldval;
10732 /* The loop we want to generate looks like
10734 cmp_reg = mem;
10735 label:
10736 old_reg = cmp_reg;
10737 seq;
10738 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
10739 if (success)
10740 goto label;
10742 Note that we only do the plain load from memory once. Subsequent
10743 iterations use the value loaded by the compare-and-swap pattern. */
10745 label = gen_label_rtx ();
10746 cmp_reg = gen_reg_rtx (mode);
10748 emit_move_insn (cmp_reg, mem);
10749 emit_label (label);
10750 emit_move_insn (old_reg, cmp_reg);
10751 if (seq)
10752 emit_insn (seq);
10754 success = NULL_RTX;
10755 oldval = cmp_reg;
10756 if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
10757 new_reg, false, MEMMODEL_SYNC_SEQ_CST,
10758 MEMMODEL_RELAXED))
10759 return false;
10761 if (oldval != cmp_reg)
10762 emit_move_insn (cmp_reg, oldval);
10764 /* Mark this jump predicted not taken. */
10765 emit_cmp_and_jump_insns (success, const0_rtx, EQ, const0_rtx,
10766 GET_MODE (success), 1, label,
10767 profile_probability::guessed_never ());
10768 return true;
10771 /* This function tries to implement an atomic exchange operation using a
10772 compare_and_swap loop. VAL is written to *MEM. The previous contents of
10773 *MEM are returned, using TARGET if possible. No memory model is required
10774 since a compare_and_swap loop is seq-cst. */
10777 pa_maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val)
10779 machine_mode mode = GET_MODE (mem);
10781 if (can_compare_and_swap_p (mode, true))
10783 if (!target || !register_operand (target, mode))
10784 target = gen_reg_rtx (mode);
10785 if (pa_expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
10786 return target;
10789 return NULL_RTX;
10792 /* Implement TARGET_CALLEE_COPIES. The callee is responsible for copying
10793 arguments passed by hidden reference in the 32-bit HP runtime. Users
10794 can override this behavior for better compatibility with openmp at the
10795 risk of library incompatibilities. Arguments are always passed by value
10796 in the 64-bit HP runtime. */
10798 static bool
10799 pa_callee_copies (cumulative_args_t cum ATTRIBUTE_UNUSED,
10800 machine_mode mode ATTRIBUTE_UNUSED,
10801 const_tree type ATTRIBUTE_UNUSED,
10802 bool named ATTRIBUTE_UNUSED)
10804 return !TARGET_CALLER_COPIES;
10807 /* Implement TARGET_HARD_REGNO_NREGS. */
10809 static unsigned int
10810 pa_hard_regno_nregs (unsigned int regno ATTRIBUTE_UNUSED, machine_mode mode)
10812 return PA_HARD_REGNO_NREGS (regno, mode);
10815 /* Implement TARGET_HARD_REGNO_MODE_OK. */
10817 static bool
10818 pa_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
10820 return PA_HARD_REGNO_MODE_OK (regno, mode);
10823 /* Implement TARGET_STARTING_FRAME_OFFSET.
10825 On the 32-bit ports, we reserve one slot for the previous frame
10826 pointer and one fill slot. The fill slot is for compatibility
10827 with HP compiled programs. On the 64-bit ports, we reserve one
10828 slot for the previous frame pointer. */
10830 static HOST_WIDE_INT
10831 pa_starting_frame_offset (void)
10833 return 8;
10836 #include "gt-pa.h"