pa.c (hppa_profile_hook): Remove offset adjustment.
[official-gcc.git] / gcc / config / pa / pa.c
blob51c879d5a0a58dcd41f48216946f6da5320bdb23
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992-2019 Free Software Foundation, Inc.
3 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #define IN_TARGET_CODE 1
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "memmodel.h"
27 #include "backend.h"
28 #include "target.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "df.h"
32 #include "tm_p.h"
33 #include "stringpool.h"
34 #include "attribs.h"
35 #include "optabs.h"
36 #include "regs.h"
37 #include "emit-rtl.h"
38 #include "recog.h"
39 #include "diagnostic-core.h"
40 #include "insn-attr.h"
41 #include "alias.h"
42 #include "fold-const.h"
43 #include "stor-layout.h"
44 #include "varasm.h"
45 #include "calls.h"
46 #include "output.h"
47 #include "except.h"
48 #include "explow.h"
49 #include "expr.h"
50 #include "reload.h"
51 #include "common/common-target.h"
52 #include "langhooks.h"
53 #include "cfgrtl.h"
54 #include "opts.h"
55 #include "builtins.h"
57 /* This file should be included last. */
58 #include "target-def.h"
60 /* Return nonzero if there is a bypass for the output of
61 OUT_INSN and the fp store IN_INSN. */
62 int
63 pa_fpstore_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
65 machine_mode store_mode;
66 machine_mode other_mode;
67 rtx set;
69 if (recog_memoized (in_insn) < 0
70 || (get_attr_type (in_insn) != TYPE_FPSTORE
71 && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD)
72 || recog_memoized (out_insn) < 0)
73 return 0;
75 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
77 set = single_set (out_insn);
78 if (!set)
79 return 0;
81 other_mode = GET_MODE (SET_SRC (set));
83 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
87 #ifndef DO_FRAME_NOTES
88 #ifdef INCOMING_RETURN_ADDR_RTX
89 #define DO_FRAME_NOTES 1
90 #else
91 #define DO_FRAME_NOTES 0
92 #endif
93 #endif
95 static void pa_option_override (void);
96 static void copy_reg_pointer (rtx, rtx);
97 static void fix_range (const char *);
98 static int hppa_register_move_cost (machine_mode mode, reg_class_t,
99 reg_class_t);
100 static int hppa_address_cost (rtx, machine_mode mode, addr_space_t, bool);
101 static bool hppa_rtx_costs (rtx, machine_mode, int, int, int *, bool);
102 static inline rtx force_mode (machine_mode, rtx);
103 static void pa_reorg (void);
104 static void pa_combine_instructions (void);
105 static int pa_can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, int, rtx,
106 rtx, rtx);
107 static bool forward_branch_p (rtx_insn *);
108 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
109 static void compute_zdepdi_operands (unsigned HOST_WIDE_INT, unsigned *);
110 static int compute_movmem_length (rtx_insn *);
111 static int compute_clrmem_length (rtx_insn *);
112 static bool pa_assemble_integer (rtx, unsigned int, int);
113 static void remove_useless_addtr_insns (int);
114 static void store_reg (int, HOST_WIDE_INT, int);
115 static void store_reg_modify (int, int, HOST_WIDE_INT);
116 static void load_reg (int, HOST_WIDE_INT, int);
117 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
118 static rtx pa_function_value (const_tree, const_tree, bool);
119 static rtx pa_libcall_value (machine_mode, const_rtx);
120 static bool pa_function_value_regno_p (const unsigned int);
121 static void pa_output_function_prologue (FILE *);
122 static void update_total_code_bytes (unsigned int);
123 static void pa_output_function_epilogue (FILE *);
124 static int pa_adjust_cost (rtx_insn *, int, rtx_insn *, int, unsigned int);
125 static int pa_issue_rate (void);
126 static int pa_reloc_rw_mask (void);
127 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
128 static section *pa_som_tm_clone_table_section (void) ATTRIBUTE_UNUSED;
129 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
130 ATTRIBUTE_UNUSED;
131 static void pa_encode_section_info (tree, rtx, int);
132 static const char *pa_strip_name_encoding (const char *);
133 static bool pa_function_ok_for_sibcall (tree, tree);
134 static void pa_globalize_label (FILE *, const char *)
135 ATTRIBUTE_UNUSED;
136 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
137 HOST_WIDE_INT, tree);
138 #if !defined(USE_COLLECT2)
139 static void pa_asm_out_constructor (rtx, int);
140 static void pa_asm_out_destructor (rtx, int);
141 #endif
142 static void pa_init_builtins (void);
143 static rtx pa_expand_builtin (tree, rtx, rtx, machine_mode mode, int);
144 static rtx hppa_builtin_saveregs (void);
145 static void hppa_va_start (tree, rtx);
146 static tree hppa_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
147 static bool pa_scalar_mode_supported_p (scalar_mode);
148 static bool pa_commutative_p (const_rtx x, int outer_code);
149 static void copy_fp_args (rtx_insn *) ATTRIBUTE_UNUSED;
150 static int length_fp_args (rtx_insn *) ATTRIBUTE_UNUSED;
151 static rtx hppa_legitimize_address (rtx, rtx, machine_mode);
152 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
153 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
154 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
155 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
156 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
157 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
158 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
159 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
160 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
161 static void output_deferred_plabels (void);
162 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
163 static void pa_file_end (void);
164 static void pa_init_libfuncs (void);
165 static rtx pa_struct_value_rtx (tree, int);
166 static bool pa_pass_by_reference (cumulative_args_t, machine_mode,
167 const_tree, bool);
168 static int pa_arg_partial_bytes (cumulative_args_t, machine_mode,
169 tree, bool);
170 static void pa_function_arg_advance (cumulative_args_t, machine_mode,
171 const_tree, bool);
172 static rtx pa_function_arg (cumulative_args_t, machine_mode,
173 const_tree, bool);
174 static pad_direction pa_function_arg_padding (machine_mode, const_tree);
175 static unsigned int pa_function_arg_boundary (machine_mode, const_tree);
176 static struct machine_function * pa_init_machine_status (void);
177 static reg_class_t pa_secondary_reload (bool, rtx, reg_class_t,
178 machine_mode,
179 secondary_reload_info *);
180 static bool pa_secondary_memory_needed (machine_mode,
181 reg_class_t, reg_class_t);
182 static void pa_extra_live_on_entry (bitmap);
183 static machine_mode pa_promote_function_mode (const_tree,
184 machine_mode, int *,
185 const_tree, int);
187 static void pa_asm_trampoline_template (FILE *);
188 static void pa_trampoline_init (rtx, tree, rtx);
189 static rtx pa_trampoline_adjust_address (rtx);
190 static rtx pa_delegitimize_address (rtx);
191 static bool pa_print_operand_punct_valid_p (unsigned char);
192 static rtx pa_internal_arg_pointer (void);
193 static bool pa_can_eliminate (const int, const int);
194 static void pa_conditional_register_usage (void);
195 static machine_mode pa_c_mode_for_suffix (char);
196 static section *pa_function_section (tree, enum node_frequency, bool, bool);
197 static bool pa_cannot_force_const_mem (machine_mode, rtx);
198 static bool pa_legitimate_constant_p (machine_mode, rtx);
199 static unsigned int pa_section_type_flags (tree, const char *, int);
200 static bool pa_legitimate_address_p (machine_mode, rtx, bool);
201 static bool pa_callee_copies (cumulative_args_t, machine_mode,
202 const_tree, bool);
203 static unsigned int pa_hard_regno_nregs (unsigned int, machine_mode);
204 static bool pa_hard_regno_mode_ok (unsigned int, machine_mode);
205 static bool pa_modes_tieable_p (machine_mode, machine_mode);
206 static bool pa_can_change_mode_class (machine_mode, machine_mode, reg_class_t);
207 static HOST_WIDE_INT pa_starting_frame_offset (void);
209 /* The following extra sections are only used for SOM. */
210 static GTY(()) section *som_readonly_data_section;
211 static GTY(()) section *som_one_only_readonly_data_section;
212 static GTY(()) section *som_one_only_data_section;
213 static GTY(()) section *som_tm_clone_table_section;
215 /* Counts for the number of callee-saved general and floating point
216 registers which were saved by the current function's prologue. */
217 static int gr_saved, fr_saved;
219 /* Boolean indicating whether the return pointer was saved by the
220 current function's prologue. */
221 static bool rp_saved;
223 static rtx find_addr_reg (rtx);
225 /* Keep track of the number of bytes we have output in the CODE subspace
226 during this compilation so we'll know when to emit inline long-calls. */
227 unsigned long total_code_bytes;
229 /* The last address of the previous function plus the number of bytes in
230 associated thunks that have been output. This is used to determine if
231 a thunk can use an IA-relative branch to reach its target function. */
232 static unsigned int last_address;
234 /* Variables to handle plabels that we discover are necessary at assembly
235 output time. They are output after the current function. */
236 struct GTY(()) deferred_plabel
238 rtx internal_label;
239 rtx symbol;
241 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
242 deferred_plabels;
243 static size_t n_deferred_plabels = 0;
245 /* Initialize the GCC target structure. */
247 #undef TARGET_OPTION_OVERRIDE
248 #define TARGET_OPTION_OVERRIDE pa_option_override
250 #undef TARGET_ASM_ALIGNED_HI_OP
251 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
252 #undef TARGET_ASM_ALIGNED_SI_OP
253 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
254 #undef TARGET_ASM_ALIGNED_DI_OP
255 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
256 #undef TARGET_ASM_UNALIGNED_HI_OP
257 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
258 #undef TARGET_ASM_UNALIGNED_SI_OP
259 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
260 #undef TARGET_ASM_UNALIGNED_DI_OP
261 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
262 #undef TARGET_ASM_INTEGER
263 #define TARGET_ASM_INTEGER pa_assemble_integer
265 #undef TARGET_ASM_FUNCTION_PROLOGUE
266 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
267 #undef TARGET_ASM_FUNCTION_EPILOGUE
268 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
270 #undef TARGET_FUNCTION_VALUE
271 #define TARGET_FUNCTION_VALUE pa_function_value
272 #undef TARGET_LIBCALL_VALUE
273 #define TARGET_LIBCALL_VALUE pa_libcall_value
274 #undef TARGET_FUNCTION_VALUE_REGNO_P
275 #define TARGET_FUNCTION_VALUE_REGNO_P pa_function_value_regno_p
277 #undef TARGET_LEGITIMIZE_ADDRESS
278 #define TARGET_LEGITIMIZE_ADDRESS hppa_legitimize_address
280 #undef TARGET_SCHED_ADJUST_COST
281 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
282 #undef TARGET_SCHED_ISSUE_RATE
283 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
285 #undef TARGET_ENCODE_SECTION_INFO
286 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
287 #undef TARGET_STRIP_NAME_ENCODING
288 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
290 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
291 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
293 #undef TARGET_COMMUTATIVE_P
294 #define TARGET_COMMUTATIVE_P pa_commutative_p
296 #undef TARGET_ASM_OUTPUT_MI_THUNK
297 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
298 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
299 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
301 #undef TARGET_ASM_FILE_END
302 #define TARGET_ASM_FILE_END pa_file_end
304 #undef TARGET_ASM_RELOC_RW_MASK
305 #define TARGET_ASM_RELOC_RW_MASK pa_reloc_rw_mask
307 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
308 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P pa_print_operand_punct_valid_p
310 #if !defined(USE_COLLECT2)
311 #undef TARGET_ASM_CONSTRUCTOR
312 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
313 #undef TARGET_ASM_DESTRUCTOR
314 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
315 #endif
317 #undef TARGET_INIT_BUILTINS
318 #define TARGET_INIT_BUILTINS pa_init_builtins
320 #undef TARGET_EXPAND_BUILTIN
321 #define TARGET_EXPAND_BUILTIN pa_expand_builtin
323 #undef TARGET_REGISTER_MOVE_COST
324 #define TARGET_REGISTER_MOVE_COST hppa_register_move_cost
325 #undef TARGET_RTX_COSTS
326 #define TARGET_RTX_COSTS hppa_rtx_costs
327 #undef TARGET_ADDRESS_COST
328 #define TARGET_ADDRESS_COST hppa_address_cost
330 #undef TARGET_MACHINE_DEPENDENT_REORG
331 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
333 #undef TARGET_INIT_LIBFUNCS
334 #define TARGET_INIT_LIBFUNCS pa_init_libfuncs
336 #undef TARGET_PROMOTE_FUNCTION_MODE
337 #define TARGET_PROMOTE_FUNCTION_MODE pa_promote_function_mode
338 #undef TARGET_PROMOTE_PROTOTYPES
339 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
341 #undef TARGET_STRUCT_VALUE_RTX
342 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
343 #undef TARGET_RETURN_IN_MEMORY
344 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
345 #undef TARGET_MUST_PASS_IN_STACK
346 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
347 #undef TARGET_PASS_BY_REFERENCE
348 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
349 #undef TARGET_CALLEE_COPIES
350 #define TARGET_CALLEE_COPIES pa_callee_copies
351 #undef TARGET_ARG_PARTIAL_BYTES
352 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
353 #undef TARGET_FUNCTION_ARG
354 #define TARGET_FUNCTION_ARG pa_function_arg
355 #undef TARGET_FUNCTION_ARG_ADVANCE
356 #define TARGET_FUNCTION_ARG_ADVANCE pa_function_arg_advance
357 #undef TARGET_FUNCTION_ARG_PADDING
358 #define TARGET_FUNCTION_ARG_PADDING pa_function_arg_padding
359 #undef TARGET_FUNCTION_ARG_BOUNDARY
360 #define TARGET_FUNCTION_ARG_BOUNDARY pa_function_arg_boundary
362 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
363 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
364 #undef TARGET_EXPAND_BUILTIN_VA_START
365 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
366 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
367 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
369 #undef TARGET_SCALAR_MODE_SUPPORTED_P
370 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
372 #undef TARGET_CANNOT_FORCE_CONST_MEM
373 #define TARGET_CANNOT_FORCE_CONST_MEM pa_cannot_force_const_mem
375 #undef TARGET_SECONDARY_RELOAD
376 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
377 #undef TARGET_SECONDARY_MEMORY_NEEDED
378 #define TARGET_SECONDARY_MEMORY_NEEDED pa_secondary_memory_needed
380 #undef TARGET_EXTRA_LIVE_ON_ENTRY
381 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
383 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
384 #define TARGET_ASM_TRAMPOLINE_TEMPLATE pa_asm_trampoline_template
385 #undef TARGET_TRAMPOLINE_INIT
386 #define TARGET_TRAMPOLINE_INIT pa_trampoline_init
387 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
388 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS pa_trampoline_adjust_address
389 #undef TARGET_DELEGITIMIZE_ADDRESS
390 #define TARGET_DELEGITIMIZE_ADDRESS pa_delegitimize_address
391 #undef TARGET_INTERNAL_ARG_POINTER
392 #define TARGET_INTERNAL_ARG_POINTER pa_internal_arg_pointer
393 #undef TARGET_CAN_ELIMINATE
394 #define TARGET_CAN_ELIMINATE pa_can_eliminate
395 #undef TARGET_CONDITIONAL_REGISTER_USAGE
396 #define TARGET_CONDITIONAL_REGISTER_USAGE pa_conditional_register_usage
397 #undef TARGET_C_MODE_FOR_SUFFIX
398 #define TARGET_C_MODE_FOR_SUFFIX pa_c_mode_for_suffix
399 #undef TARGET_ASM_FUNCTION_SECTION
400 #define TARGET_ASM_FUNCTION_SECTION pa_function_section
402 #undef TARGET_LEGITIMATE_CONSTANT_P
403 #define TARGET_LEGITIMATE_CONSTANT_P pa_legitimate_constant_p
404 #undef TARGET_SECTION_TYPE_FLAGS
405 #define TARGET_SECTION_TYPE_FLAGS pa_section_type_flags
406 #undef TARGET_LEGITIMATE_ADDRESS_P
407 #define TARGET_LEGITIMATE_ADDRESS_P pa_legitimate_address_p
409 #undef TARGET_LRA_P
410 #define TARGET_LRA_P hook_bool_void_false
412 #undef TARGET_HARD_REGNO_NREGS
413 #define TARGET_HARD_REGNO_NREGS pa_hard_regno_nregs
414 #undef TARGET_HARD_REGNO_MODE_OK
415 #define TARGET_HARD_REGNO_MODE_OK pa_hard_regno_mode_ok
416 #undef TARGET_MODES_TIEABLE_P
417 #define TARGET_MODES_TIEABLE_P pa_modes_tieable_p
419 #undef TARGET_CAN_CHANGE_MODE_CLASS
420 #define TARGET_CAN_CHANGE_MODE_CLASS pa_can_change_mode_class
422 #undef TARGET_CONSTANT_ALIGNMENT
423 #define TARGET_CONSTANT_ALIGNMENT constant_alignment_word_strings
425 #undef TARGET_STARTING_FRAME_OFFSET
426 #define TARGET_STARTING_FRAME_OFFSET pa_starting_frame_offset
428 #undef TARGET_HAVE_SPECULATION_SAFE_VALUE
429 #define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
431 struct gcc_target targetm = TARGET_INITIALIZER;
433 /* Parse the -mfixed-range= option string. */
435 static void
436 fix_range (const char *const_str)
438 int i, first, last;
439 char *str, *dash, *comma;
441 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
442 REG2 are either register names or register numbers. The effect
443 of this option is to mark the registers in the range from REG1 to
444 REG2 as ``fixed'' so they won't be used by the compiler. This is
445 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
447 i = strlen (const_str);
448 str = (char *) alloca (i + 1);
449 memcpy (str, const_str, i + 1);
451 while (1)
453 dash = strchr (str, '-');
454 if (!dash)
456 warning (0, "value of %<-mfixed-range%> must have form REG1-REG2");
457 return;
459 *dash = '\0';
461 comma = strchr (dash + 1, ',');
462 if (comma)
463 *comma = '\0';
465 first = decode_reg_name (str);
466 if (first < 0)
468 warning (0, "unknown register name: %s", str);
469 return;
472 last = decode_reg_name (dash + 1);
473 if (last < 0)
475 warning (0, "unknown register name: %s", dash + 1);
476 return;
479 *dash = '-';
481 if (first > last)
483 warning (0, "%s-%s is an empty range", str, dash + 1);
484 return;
487 for (i = first; i <= last; ++i)
488 fixed_regs[i] = call_used_regs[i] = 1;
490 if (!comma)
491 break;
493 *comma = ',';
494 str = comma + 1;
497 /* Check if all floating point registers have been fixed. */
498 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
499 if (!fixed_regs[i])
500 break;
502 if (i > FP_REG_LAST)
503 target_flags |= MASK_DISABLE_FPREGS;
506 /* Implement the TARGET_OPTION_OVERRIDE hook. */
508 static void
509 pa_option_override (void)
511 unsigned int i;
512 cl_deferred_option *opt;
513 vec<cl_deferred_option> *v
514 = (vec<cl_deferred_option> *) pa_deferred_options;
516 if (v)
517 FOR_EACH_VEC_ELT (*v, i, opt)
519 switch (opt->opt_index)
521 case OPT_mfixed_range_:
522 fix_range (opt->arg);
523 break;
525 default:
526 gcc_unreachable ();
530 if (flag_pic && TARGET_PORTABLE_RUNTIME)
532 warning (0, "PIC code generation is not supported in the portable runtime model");
535 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
537 warning (0, "PIC code generation is not compatible with fast indirect calls");
540 if (! TARGET_GAS && write_symbols != NO_DEBUG)
542 warning (0, "%<-g%> is only supported when using GAS on this processor,");
543 warning (0, "%<-g%> option disabled");
544 write_symbols = NO_DEBUG;
547 /* We only support the "big PIC" model now. And we always generate PIC
548 code when in 64bit mode. */
549 if (flag_pic == 1 || TARGET_64BIT)
550 flag_pic = 2;
552 /* Disable -freorder-blocks-and-partition as we don't support hot and
553 cold partitioning. */
554 if (flag_reorder_blocks_and_partition)
556 inform (input_location,
557 "%<-freorder-blocks-and-partition%> does not work "
558 "on this architecture");
559 flag_reorder_blocks_and_partition = 0;
560 flag_reorder_blocks = 1;
563 /* We can't guarantee that .dword is available for 32-bit targets. */
564 if (UNITS_PER_WORD == 4)
565 targetm.asm_out.aligned_op.di = NULL;
567 /* The unaligned ops are only available when using GAS. */
568 if (!TARGET_GAS)
570 targetm.asm_out.unaligned_op.hi = NULL;
571 targetm.asm_out.unaligned_op.si = NULL;
572 targetm.asm_out.unaligned_op.di = NULL;
575 init_machine_status = pa_init_machine_status;
578 enum pa_builtins
580 PA_BUILTIN_COPYSIGNQ,
581 PA_BUILTIN_FABSQ,
582 PA_BUILTIN_INFQ,
583 PA_BUILTIN_HUGE_VALQ,
584 PA_BUILTIN_max
587 static GTY(()) tree pa_builtins[(int) PA_BUILTIN_max];
589 static void
590 pa_init_builtins (void)
592 #ifdef DONT_HAVE_FPUTC_UNLOCKED
594 tree decl = builtin_decl_explicit (BUILT_IN_PUTC_UNLOCKED);
595 set_builtin_decl (BUILT_IN_FPUTC_UNLOCKED, decl,
596 builtin_decl_implicit_p (BUILT_IN_PUTC_UNLOCKED));
598 #endif
599 #if TARGET_HPUX_11
601 tree decl;
603 if ((decl = builtin_decl_explicit (BUILT_IN_FINITE)) != NULL_TREE)
604 set_user_assembler_name (decl, "_Isfinite");
605 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEF)) != NULL_TREE)
606 set_user_assembler_name (decl, "_Isfinitef");
608 #endif
610 if (HPUX_LONG_DOUBLE_LIBRARY)
612 tree decl, ftype;
614 /* Under HPUX, the __float128 type is a synonym for "long double". */
615 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
616 "__float128");
618 /* TFmode support builtins. */
619 ftype = build_function_type_list (long_double_type_node,
620 long_double_type_node,
621 NULL_TREE);
622 decl = add_builtin_function ("__builtin_fabsq", ftype,
623 PA_BUILTIN_FABSQ, BUILT_IN_MD,
624 "_U_Qfabs", NULL_TREE);
625 TREE_READONLY (decl) = 1;
626 pa_builtins[PA_BUILTIN_FABSQ] = decl;
628 ftype = build_function_type_list (long_double_type_node,
629 long_double_type_node,
630 long_double_type_node,
631 NULL_TREE);
632 decl = add_builtin_function ("__builtin_copysignq", ftype,
633 PA_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
634 "_U_Qfcopysign", NULL_TREE);
635 TREE_READONLY (decl) = 1;
636 pa_builtins[PA_BUILTIN_COPYSIGNQ] = decl;
638 ftype = build_function_type_list (long_double_type_node, NULL_TREE);
639 decl = add_builtin_function ("__builtin_infq", ftype,
640 PA_BUILTIN_INFQ, BUILT_IN_MD,
641 NULL, NULL_TREE);
642 pa_builtins[PA_BUILTIN_INFQ] = decl;
644 decl = add_builtin_function ("__builtin_huge_valq", ftype,
645 PA_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
646 NULL, NULL_TREE);
647 pa_builtins[PA_BUILTIN_HUGE_VALQ] = decl;
651 static rtx
652 pa_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
653 machine_mode mode ATTRIBUTE_UNUSED,
654 int ignore ATTRIBUTE_UNUSED)
656 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
657 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
659 switch (fcode)
661 case PA_BUILTIN_FABSQ:
662 case PA_BUILTIN_COPYSIGNQ:
663 return expand_call (exp, target, ignore);
665 case PA_BUILTIN_INFQ:
666 case PA_BUILTIN_HUGE_VALQ:
668 machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
669 REAL_VALUE_TYPE inf;
670 rtx tmp;
672 real_inf (&inf);
673 tmp = const_double_from_real_value (inf, target_mode);
675 tmp = validize_mem (force_const_mem (target_mode, tmp));
677 if (target == 0)
678 target = gen_reg_rtx (target_mode);
680 emit_move_insn (target, tmp);
681 return target;
684 default:
685 gcc_unreachable ();
688 return NULL_RTX;
691 /* Function to init struct machine_function.
692 This will be called, via a pointer variable,
693 from push_function_context. */
695 static struct machine_function *
696 pa_init_machine_status (void)
698 return ggc_cleared_alloc<machine_function> ();
701 /* If FROM is a probable pointer register, mark TO as a probable
702 pointer register with the same pointer alignment as FROM. */
704 static void
705 copy_reg_pointer (rtx to, rtx from)
707 if (REG_POINTER (from))
708 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
711 /* Return 1 if X contains a symbolic expression. We know these
712 expressions will have one of a few well defined forms, so
713 we need only check those forms. */
715 pa_symbolic_expression_p (rtx x)
718 /* Strip off any HIGH. */
719 if (GET_CODE (x) == HIGH)
720 x = XEXP (x, 0);
722 return symbolic_operand (x, VOIDmode);
725 /* Accept any constant that can be moved in one instruction into a
726 general register. */
728 pa_cint_ok_for_move (unsigned HOST_WIDE_INT ival)
730 /* OK if ldo, ldil, or zdepi, can be used. */
731 return (VAL_14_BITS_P (ival)
732 || pa_ldil_cint_p (ival)
733 || pa_zdepi_cint_p (ival));
736 /* True iff ldil can be used to load this CONST_INT. The least
737 significant 11 bits of the value must be zero and the value must
738 not change sign when extended from 32 to 64 bits. */
740 pa_ldil_cint_p (unsigned HOST_WIDE_INT ival)
742 unsigned HOST_WIDE_INT x;
744 x = ival & (((unsigned HOST_WIDE_INT) -1 << 31) | 0x7ff);
745 return x == 0 || x == ((unsigned HOST_WIDE_INT) -1 << 31);
748 /* True iff zdepi can be used to generate this CONST_INT.
749 zdepi first sign extends a 5-bit signed number to a given field
750 length, then places this field anywhere in a zero. */
752 pa_zdepi_cint_p (unsigned HOST_WIDE_INT x)
754 unsigned HOST_WIDE_INT lsb_mask, t;
756 /* This might not be obvious, but it's at least fast.
757 This function is critical; we don't have the time loops would take. */
758 lsb_mask = x & -x;
759 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
760 /* Return true iff t is a power of two. */
761 return ((t & (t - 1)) == 0);
764 /* True iff depi or extru can be used to compute (reg & mask).
765 Accept bit pattern like these:
766 0....01....1
767 1....10....0
768 1..10..01..1 */
770 pa_and_mask_p (unsigned HOST_WIDE_INT mask)
772 mask = ~mask;
773 mask += mask & -mask;
774 return (mask & (mask - 1)) == 0;
777 /* True iff depi can be used to compute (reg | MASK). */
779 pa_ior_mask_p (unsigned HOST_WIDE_INT mask)
781 mask += mask & -mask;
782 return (mask & (mask - 1)) == 0;
785 /* Legitimize PIC addresses. If the address is already
786 position-independent, we return ORIG. Newly generated
787 position-independent addresses go to REG. If we need more
788 than one register, we lose. */
790 static rtx
791 legitimize_pic_address (rtx orig, machine_mode mode, rtx reg)
793 rtx pic_ref = orig;
795 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
797 /* Labels need special handling. */
798 if (pic_label_operand (orig, mode))
800 rtx_insn *insn;
802 /* We do not want to go through the movXX expanders here since that
803 would create recursion.
805 Nor do we really want to call a generator for a named pattern
806 since that requires multiple patterns if we want to support
807 multiple word sizes.
809 So instead we just emit the raw set, which avoids the movXX
810 expanders completely. */
811 mark_reg_pointer (reg, BITS_PER_UNIT);
812 insn = emit_insn (gen_rtx_SET (reg, orig));
814 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
815 add_reg_note (insn, REG_EQUAL, orig);
817 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
818 and update LABEL_NUSES because this is not done automatically. */
819 if (reload_in_progress || reload_completed)
821 /* Extract LABEL_REF. */
822 if (GET_CODE (orig) == CONST)
823 orig = XEXP (XEXP (orig, 0), 0);
824 /* Extract CODE_LABEL. */
825 orig = XEXP (orig, 0);
826 add_reg_note (insn, REG_LABEL_OPERAND, orig);
827 /* Make sure we have label and not a note. */
828 if (LABEL_P (orig))
829 LABEL_NUSES (orig)++;
831 crtl->uses_pic_offset_table = 1;
832 return reg;
834 if (GET_CODE (orig) == SYMBOL_REF)
836 rtx_insn *insn;
837 rtx tmp_reg;
839 gcc_assert (reg);
841 /* Before reload, allocate a temporary register for the intermediate
842 result. This allows the sequence to be deleted when the final
843 result is unused and the insns are trivially dead. */
844 tmp_reg = ((reload_in_progress || reload_completed)
845 ? reg : gen_reg_rtx (Pmode));
847 if (function_label_operand (orig, VOIDmode))
849 /* Force function label into memory in word mode. */
850 orig = XEXP (force_const_mem (word_mode, orig), 0);
851 /* Load plabel address from DLT. */
852 emit_move_insn (tmp_reg,
853 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
854 gen_rtx_HIGH (word_mode, orig)));
855 pic_ref
856 = gen_const_mem (Pmode,
857 gen_rtx_LO_SUM (Pmode, tmp_reg,
858 gen_rtx_UNSPEC (Pmode,
859 gen_rtvec (1, orig),
860 UNSPEC_DLTIND14R)));
861 emit_move_insn (reg, pic_ref);
862 /* Now load address of function descriptor. */
863 pic_ref = gen_rtx_MEM (Pmode, reg);
865 else
867 /* Load symbol reference from DLT. */
868 emit_move_insn (tmp_reg,
869 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
870 gen_rtx_HIGH (word_mode, orig)));
871 pic_ref
872 = gen_const_mem (Pmode,
873 gen_rtx_LO_SUM (Pmode, tmp_reg,
874 gen_rtx_UNSPEC (Pmode,
875 gen_rtvec (1, orig),
876 UNSPEC_DLTIND14R)));
879 crtl->uses_pic_offset_table = 1;
880 mark_reg_pointer (reg, BITS_PER_UNIT);
881 insn = emit_move_insn (reg, pic_ref);
883 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
884 set_unique_reg_note (insn, REG_EQUAL, orig);
886 return reg;
888 else if (GET_CODE (orig) == CONST)
890 rtx base;
892 if (GET_CODE (XEXP (orig, 0)) == PLUS
893 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
894 return orig;
896 gcc_assert (reg);
897 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
899 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
900 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
901 base == reg ? 0 : reg);
903 if (GET_CODE (orig) == CONST_INT)
905 if (INT_14_BITS (orig))
906 return plus_constant (Pmode, base, INTVAL (orig));
907 orig = force_reg (Pmode, orig);
909 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
910 /* Likewise, should we set special REG_NOTEs here? */
913 return pic_ref;
916 static GTY(()) rtx gen_tls_tga;
918 static rtx
919 gen_tls_get_addr (void)
921 if (!gen_tls_tga)
922 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
923 return gen_tls_tga;
926 static rtx
927 hppa_tls_call (rtx arg)
929 rtx ret;
931 ret = gen_reg_rtx (Pmode);
932 emit_library_call_value (gen_tls_get_addr (), ret,
933 LCT_CONST, Pmode, arg, Pmode);
935 return ret;
938 static rtx
939 legitimize_tls_address (rtx addr)
941 rtx ret, tmp, t1, t2, tp;
942 rtx_insn *insn;
944 /* Currently, we can't handle anything but a SYMBOL_REF. */
945 if (GET_CODE (addr) != SYMBOL_REF)
946 return addr;
948 switch (SYMBOL_REF_TLS_MODEL (addr))
950 case TLS_MODEL_GLOBAL_DYNAMIC:
951 tmp = gen_reg_rtx (Pmode);
952 if (flag_pic)
953 emit_insn (gen_tgd_load_pic (tmp, addr));
954 else
955 emit_insn (gen_tgd_load (tmp, addr));
956 ret = hppa_tls_call (tmp);
957 break;
959 case TLS_MODEL_LOCAL_DYNAMIC:
960 ret = gen_reg_rtx (Pmode);
961 tmp = gen_reg_rtx (Pmode);
962 start_sequence ();
963 if (flag_pic)
964 emit_insn (gen_tld_load_pic (tmp, addr));
965 else
966 emit_insn (gen_tld_load (tmp, addr));
967 t1 = hppa_tls_call (tmp);
968 insn = get_insns ();
969 end_sequence ();
970 t2 = gen_reg_rtx (Pmode);
971 emit_libcall_block (insn, t2, t1,
972 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
973 UNSPEC_TLSLDBASE));
974 emit_insn (gen_tld_offset_load (ret, addr, t2));
975 break;
977 case TLS_MODEL_INITIAL_EXEC:
978 tp = gen_reg_rtx (Pmode);
979 tmp = gen_reg_rtx (Pmode);
980 ret = gen_reg_rtx (Pmode);
981 emit_insn (gen_tp_load (tp));
982 if (flag_pic)
983 emit_insn (gen_tie_load_pic (tmp, addr));
984 else
985 emit_insn (gen_tie_load (tmp, addr));
986 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
987 break;
989 case TLS_MODEL_LOCAL_EXEC:
990 tp = gen_reg_rtx (Pmode);
991 ret = gen_reg_rtx (Pmode);
992 emit_insn (gen_tp_load (tp));
993 emit_insn (gen_tle_load (ret, addr, tp));
994 break;
996 default:
997 gcc_unreachable ();
1000 return ret;
1003 /* Helper for hppa_legitimize_address. Given X, return true if it
1004 is a left shift by 1, 2 or 3 positions or a multiply by 2, 4 or 8.
1006 This respectively represent canonical shift-add rtxs or scaled
1007 memory addresses. */
1008 static bool
1009 mem_shadd_or_shadd_rtx_p (rtx x)
1011 return ((GET_CODE (x) == ASHIFT
1012 || GET_CODE (x) == MULT)
1013 && GET_CODE (XEXP (x, 1)) == CONST_INT
1014 && ((GET_CODE (x) == ASHIFT
1015 && pa_shadd_constant_p (INTVAL (XEXP (x, 1))))
1016 || (GET_CODE (x) == MULT
1017 && pa_mem_shadd_constant_p (INTVAL (XEXP (x, 1))))));
1020 /* Try machine-dependent ways of modifying an illegitimate address
1021 to be legitimate. If we find one, return the new, valid address.
1022 This macro is used in only one place: `memory_address' in explow.c.
1024 OLDX is the address as it was before break_out_memory_refs was called.
1025 In some cases it is useful to look at this to decide what needs to be done.
1027 It is always safe for this macro to do nothing. It exists to recognize
1028 opportunities to optimize the output.
1030 For the PA, transform:
1032 memory(X + <large int>)
1034 into:
1036 if (<large int> & mask) >= 16
1037 Y = (<large int> & ~mask) + mask + 1 Round up.
1038 else
1039 Y = (<large int> & ~mask) Round down.
1040 Z = X + Y
1041 memory (Z + (<large int> - Y));
1043 This is for CSE to find several similar references, and only use one Z.
1045 X can either be a SYMBOL_REF or REG, but because combine cannot
1046 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
1047 D will not fit in 14 bits.
1049 MODE_FLOAT references allow displacements which fit in 5 bits, so use
1050 0x1f as the mask.
1052 MODE_INT references allow displacements which fit in 14 bits, so use
1053 0x3fff as the mask.
1055 This relies on the fact that most mode MODE_FLOAT references will use FP
1056 registers and most mode MODE_INT references will use integer registers.
1057 (In the rare case of an FP register used in an integer MODE, we depend
1058 on secondary reloads to clean things up.)
1061 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
1062 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
1063 addressing modes to be used).
1065 Note that the addresses passed into hppa_legitimize_address always
1066 come from a MEM, so we only have to match the MULT form on incoming
1067 addresses. But to be future proof we also match the ASHIFT form.
1069 However, this routine always places those shift-add sequences into
1070 registers, so we have to generate the ASHIFT form as our output.
1072 Put X and Z into registers. Then put the entire expression into
1073 a register. */
1076 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1077 machine_mode mode)
1079 rtx orig = x;
1081 /* We need to canonicalize the order of operands in unscaled indexed
1082 addresses since the code that checks if an address is valid doesn't
1083 always try both orders. */
1084 if (!TARGET_NO_SPACE_REGS
1085 && GET_CODE (x) == PLUS
1086 && GET_MODE (x) == Pmode
1087 && REG_P (XEXP (x, 0))
1088 && REG_P (XEXP (x, 1))
1089 && REG_POINTER (XEXP (x, 0))
1090 && !REG_POINTER (XEXP (x, 1)))
1091 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
1093 if (tls_referenced_p (x))
1094 return legitimize_tls_address (x);
1095 else if (flag_pic)
1096 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
1098 /* Strip off CONST. */
1099 if (GET_CODE (x) == CONST)
1100 x = XEXP (x, 0);
1102 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1103 That should always be safe. */
1104 if (GET_CODE (x) == PLUS
1105 && GET_CODE (XEXP (x, 0)) == REG
1106 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
1108 rtx reg = force_reg (Pmode, XEXP (x, 1));
1109 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
1112 /* Note we must reject symbols which represent function addresses
1113 since the assembler/linker can't handle arithmetic on plabels. */
1114 if (GET_CODE (x) == PLUS
1115 && GET_CODE (XEXP (x, 1)) == CONST_INT
1116 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1117 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
1118 || GET_CODE (XEXP (x, 0)) == REG))
1120 rtx int_part, ptr_reg;
1121 int newoffset;
1122 int offset = INTVAL (XEXP (x, 1));
1123 int mask;
1125 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
1126 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
1128 /* Choose which way to round the offset. Round up if we
1129 are >= halfway to the next boundary. */
1130 if ((offset & mask) >= ((mask + 1) / 2))
1131 newoffset = (offset & ~ mask) + mask + 1;
1132 else
1133 newoffset = (offset & ~ mask);
1135 /* If the newoffset will not fit in 14 bits (ldo), then
1136 handling this would take 4 or 5 instructions (2 to load
1137 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1138 add the new offset and the SYMBOL_REF.) Combine cannot
1139 handle 4->2 or 5->2 combinations, so do not create
1140 them. */
1141 if (! VAL_14_BITS_P (newoffset)
1142 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
1144 rtx const_part = plus_constant (Pmode, XEXP (x, 0), newoffset);
1145 rtx tmp_reg
1146 = force_reg (Pmode,
1147 gen_rtx_HIGH (Pmode, const_part));
1148 ptr_reg
1149 = force_reg (Pmode,
1150 gen_rtx_LO_SUM (Pmode,
1151 tmp_reg, const_part));
1153 else
1155 if (! VAL_14_BITS_P (newoffset))
1156 int_part = force_reg (Pmode, GEN_INT (newoffset));
1157 else
1158 int_part = GEN_INT (newoffset);
1160 ptr_reg = force_reg (Pmode,
1161 gen_rtx_PLUS (Pmode,
1162 force_reg (Pmode, XEXP (x, 0)),
1163 int_part));
1165 return plus_constant (Pmode, ptr_reg, offset - newoffset);
1168 /* Handle (plus (mult (a) (mem_shadd_constant)) (b)). */
1170 if (GET_CODE (x) == PLUS
1171 && mem_shadd_or_shadd_rtx_p (XEXP (x, 0))
1172 && (OBJECT_P (XEXP (x, 1))
1173 || GET_CODE (XEXP (x, 1)) == SUBREG)
1174 && GET_CODE (XEXP (x, 1)) != CONST)
1176 /* If we were given a MULT, we must fix the constant
1177 as we're going to create the ASHIFT form. */
1178 int shift_val = INTVAL (XEXP (XEXP (x, 0), 1));
1179 if (GET_CODE (XEXP (x, 0)) == MULT)
1180 shift_val = exact_log2 (shift_val);
1182 rtx reg1, reg2;
1183 reg1 = XEXP (x, 1);
1184 if (GET_CODE (reg1) != REG)
1185 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1187 reg2 = XEXP (XEXP (x, 0), 0);
1188 if (GET_CODE (reg2) != REG)
1189 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1191 return force_reg (Pmode,
1192 gen_rtx_PLUS (Pmode,
1193 gen_rtx_ASHIFT (Pmode, reg2,
1194 GEN_INT (shift_val)),
1195 reg1));
1198 /* Similarly for (plus (plus (mult (a) (mem_shadd_constant)) (b)) (c)).
1200 Only do so for floating point modes since this is more speculative
1201 and we lose if it's an integer store. */
1202 if (GET_CODE (x) == PLUS
1203 && GET_CODE (XEXP (x, 0)) == PLUS
1204 && mem_shadd_or_shadd_rtx_p (XEXP (XEXP (x, 0), 0))
1205 && (mode == SFmode || mode == DFmode))
1207 int shift_val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
1209 /* If we were given a MULT, we must fix the constant
1210 as we're going to create the ASHIFT form. */
1211 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
1212 shift_val = exact_log2 (shift_val);
1214 /* Try and figure out what to use as a base register. */
1215 rtx reg1, reg2, base, idx;
1217 reg1 = XEXP (XEXP (x, 0), 1);
1218 reg2 = XEXP (x, 1);
1219 base = NULL_RTX;
1220 idx = NULL_RTX;
1222 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1223 then pa_emit_move_sequence will turn on REG_POINTER so we'll know
1224 it's a base register below. */
1225 if (GET_CODE (reg1) != REG)
1226 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1228 if (GET_CODE (reg2) != REG)
1229 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1231 /* Figure out what the base and index are. */
1233 if (GET_CODE (reg1) == REG
1234 && REG_POINTER (reg1))
1236 base = reg1;
1237 idx = gen_rtx_PLUS (Pmode,
1238 gen_rtx_ASHIFT (Pmode,
1239 XEXP (XEXP (XEXP (x, 0), 0), 0),
1240 GEN_INT (shift_val)),
1241 XEXP (x, 1));
1243 else if (GET_CODE (reg2) == REG
1244 && REG_POINTER (reg2))
1246 base = reg2;
1247 idx = XEXP (x, 0);
1250 if (base == 0)
1251 return orig;
1253 /* If the index adds a large constant, try to scale the
1254 constant so that it can be loaded with only one insn. */
1255 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1256 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1257 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1258 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1260 /* Divide the CONST_INT by the scale factor, then add it to A. */
1261 int val = INTVAL (XEXP (idx, 1));
1262 val /= (1 << shift_val);
1264 reg1 = XEXP (XEXP (idx, 0), 0);
1265 if (GET_CODE (reg1) != REG)
1266 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1268 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1270 /* We can now generate a simple scaled indexed address. */
1271 return
1272 force_reg
1273 (Pmode, gen_rtx_PLUS (Pmode,
1274 gen_rtx_ASHIFT (Pmode, reg1,
1275 GEN_INT (shift_val)),
1276 base));
1279 /* If B + C is still a valid base register, then add them. */
1280 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1281 && INTVAL (XEXP (idx, 1)) <= 4096
1282 && INTVAL (XEXP (idx, 1)) >= -4096)
1284 rtx reg1, reg2;
1286 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1288 reg2 = XEXP (XEXP (idx, 0), 0);
1289 if (GET_CODE (reg2) != CONST_INT)
1290 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1292 return force_reg (Pmode,
1293 gen_rtx_PLUS (Pmode,
1294 gen_rtx_ASHIFT (Pmode, reg2,
1295 GEN_INT (shift_val)),
1296 reg1));
1299 /* Get the index into a register, then add the base + index and
1300 return a register holding the result. */
1302 /* First get A into a register. */
1303 reg1 = XEXP (XEXP (idx, 0), 0);
1304 if (GET_CODE (reg1) != REG)
1305 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1307 /* And get B into a register. */
1308 reg2 = XEXP (idx, 1);
1309 if (GET_CODE (reg2) != REG)
1310 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1312 reg1 = force_reg (Pmode,
1313 gen_rtx_PLUS (Pmode,
1314 gen_rtx_ASHIFT (Pmode, reg1,
1315 GEN_INT (shift_val)),
1316 reg2));
1318 /* Add the result to our base register and return. */
1319 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1323 /* Uh-oh. We might have an address for x[n-100000]. This needs
1324 special handling to avoid creating an indexed memory address
1325 with x-100000 as the base.
1327 If the constant part is small enough, then it's still safe because
1328 there is a guard page at the beginning and end of the data segment.
1330 Scaled references are common enough that we want to try and rearrange the
1331 terms so that we can use indexing for these addresses too. Only
1332 do the optimization for floatint point modes. */
1334 if (GET_CODE (x) == PLUS
1335 && pa_symbolic_expression_p (XEXP (x, 1)))
1337 /* Ugly. We modify things here so that the address offset specified
1338 by the index expression is computed first, then added to x to form
1339 the entire address. */
1341 rtx regx1, regx2, regy1, regy2, y;
1343 /* Strip off any CONST. */
1344 y = XEXP (x, 1);
1345 if (GET_CODE (y) == CONST)
1346 y = XEXP (y, 0);
1348 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1350 /* See if this looks like
1351 (plus (mult (reg) (mem_shadd_const))
1352 (const (plus (symbol_ref) (const_int))))
1354 Where const_int is small. In that case the const
1355 expression is a valid pointer for indexing.
1357 If const_int is big, but can be divided evenly by shadd_const
1358 and added to (reg). This allows more scaled indexed addresses. */
1359 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1360 && mem_shadd_or_shadd_rtx_p (XEXP (x, 0))
1361 && GET_CODE (XEXP (y, 1)) == CONST_INT
1362 && INTVAL (XEXP (y, 1)) >= -4096
1363 && INTVAL (XEXP (y, 1)) <= 4095)
1365 int shift_val = INTVAL (XEXP (XEXP (x, 0), 1));
1367 /* If we were given a MULT, we must fix the constant
1368 as we're going to create the ASHIFT form. */
1369 if (GET_CODE (XEXP (x, 0)) == MULT)
1370 shift_val = exact_log2 (shift_val);
1372 rtx reg1, reg2;
1374 reg1 = XEXP (x, 1);
1375 if (GET_CODE (reg1) != REG)
1376 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1378 reg2 = XEXP (XEXP (x, 0), 0);
1379 if (GET_CODE (reg2) != REG)
1380 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1382 return
1383 force_reg (Pmode,
1384 gen_rtx_PLUS (Pmode,
1385 gen_rtx_ASHIFT (Pmode,
1386 reg2,
1387 GEN_INT (shift_val)),
1388 reg1));
1390 else if ((mode == DFmode || mode == SFmode)
1391 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1392 && mem_shadd_or_shadd_rtx_p (XEXP (x, 0))
1393 && GET_CODE (XEXP (y, 1)) == CONST_INT
1394 && INTVAL (XEXP (y, 1)) % (1 << INTVAL (XEXP (XEXP (x, 0), 1))) == 0)
1396 int shift_val = INTVAL (XEXP (XEXP (x, 0), 1));
1398 /* If we were given a MULT, we must fix the constant
1399 as we're going to create the ASHIFT form. */
1400 if (GET_CODE (XEXP (x, 0)) == MULT)
1401 shift_val = exact_log2 (shift_val);
1403 regx1
1404 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1405 / INTVAL (XEXP (XEXP (x, 0), 1))));
1406 regx2 = XEXP (XEXP (x, 0), 0);
1407 if (GET_CODE (regx2) != REG)
1408 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1409 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1410 regx2, regx1));
1411 return
1412 force_reg (Pmode,
1413 gen_rtx_PLUS (Pmode,
1414 gen_rtx_ASHIFT (Pmode, regx2,
1415 GEN_INT (shift_val)),
1416 force_reg (Pmode, XEXP (y, 0))));
1418 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1419 && INTVAL (XEXP (y, 1)) >= -4096
1420 && INTVAL (XEXP (y, 1)) <= 4095)
1422 /* This is safe because of the guard page at the
1423 beginning and end of the data space. Just
1424 return the original address. */
1425 return orig;
1427 else
1429 /* Doesn't look like one we can optimize. */
1430 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1431 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1432 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1433 regx1 = force_reg (Pmode,
1434 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1435 regx1, regy2));
1436 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1441 return orig;
1444 /* Implement the TARGET_REGISTER_MOVE_COST hook.
1446 Compute extra cost of moving data between one register class
1447 and another.
1449 Make moves from SAR so expensive they should never happen. We used to
1450 have 0xffff here, but that generates overflow in rare cases.
1452 Copies involving a FP register and a non-FP register are relatively
1453 expensive because they must go through memory.
1455 Other copies are reasonably cheap. */
1457 static int
1458 hppa_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
1459 reg_class_t from, reg_class_t to)
1461 if (from == SHIFT_REGS)
1462 return 0x100;
1463 else if (to == SHIFT_REGS && FP_REG_CLASS_P (from))
1464 return 18;
1465 else if ((FP_REG_CLASS_P (from) && ! FP_REG_CLASS_P (to))
1466 || (FP_REG_CLASS_P (to) && ! FP_REG_CLASS_P (from)))
1467 return 16;
1468 else
1469 return 2;
1472 /* For the HPPA, REG and REG+CONST is cost 0
1473 and addresses involving symbolic constants are cost 2.
1475 PIC addresses are very expensive.
1477 It is no coincidence that this has the same structure
1478 as pa_legitimate_address_p. */
1480 static int
1481 hppa_address_cost (rtx X, machine_mode mode ATTRIBUTE_UNUSED,
1482 addr_space_t as ATTRIBUTE_UNUSED,
1483 bool speed ATTRIBUTE_UNUSED)
1485 switch (GET_CODE (X))
1487 case REG:
1488 case PLUS:
1489 case LO_SUM:
1490 return 1;
1491 case HIGH:
1492 return 2;
1493 default:
1494 return 4;
1498 /* Compute a (partial) cost for rtx X. Return true if the complete
1499 cost has been computed, and false if subexpressions should be
1500 scanned. In either case, *TOTAL contains the cost result. */
1502 static bool
1503 hppa_rtx_costs (rtx x, machine_mode mode, int outer_code,
1504 int opno ATTRIBUTE_UNUSED,
1505 int *total, bool speed ATTRIBUTE_UNUSED)
1507 int factor;
1508 int code = GET_CODE (x);
1510 switch (code)
1512 case CONST_INT:
1513 if (INTVAL (x) == 0)
1514 *total = 0;
1515 else if (INT_14_BITS (x))
1516 *total = 1;
1517 else
1518 *total = 2;
1519 return true;
1521 case HIGH:
1522 *total = 2;
1523 return true;
1525 case CONST:
1526 case LABEL_REF:
1527 case SYMBOL_REF:
1528 *total = 4;
1529 return true;
1531 case CONST_DOUBLE:
1532 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1533 && outer_code != SET)
1534 *total = 0;
1535 else
1536 *total = 8;
1537 return true;
1539 case MULT:
1540 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1542 *total = COSTS_N_INSNS (3);
1543 return true;
1546 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1547 factor = GET_MODE_SIZE (mode) / 4;
1548 if (factor == 0)
1549 factor = 1;
1551 if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1552 *total = factor * factor * COSTS_N_INSNS (8);
1553 else
1554 *total = factor * factor * COSTS_N_INSNS (20);
1555 return true;
1557 case DIV:
1558 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1560 *total = COSTS_N_INSNS (14);
1561 return true;
1563 /* FALLTHRU */
1565 case UDIV:
1566 case MOD:
1567 case UMOD:
1568 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1569 factor = GET_MODE_SIZE (mode) / 4;
1570 if (factor == 0)
1571 factor = 1;
1573 *total = factor * factor * COSTS_N_INSNS (60);
1574 return true;
1576 case PLUS: /* this includes shNadd insns */
1577 case MINUS:
1578 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1580 *total = COSTS_N_INSNS (3);
1581 return true;
1584 /* A size N times larger than UNITS_PER_WORD needs N times as
1585 many insns, taking N times as long. */
1586 factor = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
1587 if (factor == 0)
1588 factor = 1;
1589 *total = factor * COSTS_N_INSNS (1);
1590 return true;
1592 case ASHIFT:
1593 case ASHIFTRT:
1594 case LSHIFTRT:
1595 *total = COSTS_N_INSNS (1);
1596 return true;
1598 default:
1599 return false;
1603 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1604 new rtx with the correct mode. */
1605 static inline rtx
1606 force_mode (machine_mode mode, rtx orig)
1608 if (mode == GET_MODE (orig))
1609 return orig;
1611 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1613 return gen_rtx_REG (mode, REGNO (orig));
1616 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1618 static bool
1619 pa_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1621 return tls_referenced_p (x);
1624 /* Emit insns to move operands[1] into operands[0].
1626 Return 1 if we have written out everything that needs to be done to
1627 do the move. Otherwise, return 0 and the caller will emit the move
1628 normally.
1630 Note SCRATCH_REG may not be in the proper mode depending on how it
1631 will be used. This routine is responsible for creating a new copy
1632 of SCRATCH_REG in the proper mode. */
1635 pa_emit_move_sequence (rtx *operands, machine_mode mode, rtx scratch_reg)
1637 register rtx operand0 = operands[0];
1638 register rtx operand1 = operands[1];
1639 register rtx tem;
1641 /* We can only handle indexed addresses in the destination operand
1642 of floating point stores. Thus, we need to break out indexed
1643 addresses from the destination operand. */
1644 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1646 gcc_assert (can_create_pseudo_p ());
1648 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1649 operand0 = replace_equiv_address (operand0, tem);
1652 /* On targets with non-equivalent space registers, break out unscaled
1653 indexed addresses from the source operand before the final CSE.
1654 We have to do this because the REG_POINTER flag is not correctly
1655 carried through various optimization passes and CSE may substitute
1656 a pseudo without the pointer set for one with the pointer set. As
1657 a result, we loose various opportunities to create insns with
1658 unscaled indexed addresses. */
1659 if (!TARGET_NO_SPACE_REGS
1660 && !cse_not_expected
1661 && GET_CODE (operand1) == MEM
1662 && GET_CODE (XEXP (operand1, 0)) == PLUS
1663 && REG_P (XEXP (XEXP (operand1, 0), 0))
1664 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1665 operand1
1666 = replace_equiv_address (operand1,
1667 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1669 if (scratch_reg
1670 && reload_in_progress && GET_CODE (operand0) == REG
1671 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1672 operand0 = reg_equiv_mem (REGNO (operand0));
1673 else if (scratch_reg
1674 && reload_in_progress && GET_CODE (operand0) == SUBREG
1675 && GET_CODE (SUBREG_REG (operand0)) == REG
1676 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1678 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1679 the code which tracks sets/uses for delete_output_reload. */
1680 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1681 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
1682 SUBREG_BYTE (operand0));
1683 operand0 = alter_subreg (&temp, true);
1686 if (scratch_reg
1687 && reload_in_progress && GET_CODE (operand1) == REG
1688 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1689 operand1 = reg_equiv_mem (REGNO (operand1));
1690 else if (scratch_reg
1691 && reload_in_progress && GET_CODE (operand1) == SUBREG
1692 && GET_CODE (SUBREG_REG (operand1)) == REG
1693 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1695 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1696 the code which tracks sets/uses for delete_output_reload. */
1697 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1698 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
1699 SUBREG_BYTE (operand1));
1700 operand1 = alter_subreg (&temp, true);
1703 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1704 && ((tem = find_replacement (&XEXP (operand0, 0)))
1705 != XEXP (operand0, 0)))
1706 operand0 = replace_equiv_address (operand0, tem);
1708 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1709 && ((tem = find_replacement (&XEXP (operand1, 0)))
1710 != XEXP (operand1, 0)))
1711 operand1 = replace_equiv_address (operand1, tem);
1713 /* Handle secondary reloads for loads/stores of FP registers from
1714 REG+D addresses where D does not fit in 5 or 14 bits, including
1715 (subreg (mem (addr))) cases, and reloads for other unsupported
1716 memory operands. */
1717 if (scratch_reg
1718 && FP_REG_P (operand0)
1719 && (MEM_P (operand1)
1720 || (GET_CODE (operand1) == SUBREG
1721 && MEM_P (XEXP (operand1, 0)))))
1723 rtx op1 = operand1;
1725 if (GET_CODE (op1) == SUBREG)
1726 op1 = XEXP (op1, 0);
1728 if (reg_plus_base_memory_operand (op1, GET_MODE (op1)))
1730 if (!(TARGET_PA_20
1731 && !TARGET_ELF32
1732 && INT_14_BITS (XEXP (XEXP (op1, 0), 1)))
1733 && !INT_5_BITS (XEXP (XEXP (op1, 0), 1)))
1735 /* SCRATCH_REG will hold an address and maybe the actual data.
1736 We want it in WORD_MODE regardless of what mode it was
1737 originally given to us. */
1738 scratch_reg = force_mode (word_mode, scratch_reg);
1740 /* D might not fit in 14 bits either; for such cases load D
1741 into scratch reg. */
1742 if (!INT_14_BITS (XEXP (XEXP (op1, 0), 1)))
1744 emit_move_insn (scratch_reg, XEXP (XEXP (op1, 0), 1));
1745 emit_move_insn (scratch_reg,
1746 gen_rtx_fmt_ee (GET_CODE (XEXP (op1, 0)),
1747 Pmode,
1748 XEXP (XEXP (op1, 0), 0),
1749 scratch_reg));
1751 else
1752 emit_move_insn (scratch_reg, XEXP (op1, 0));
1753 op1 = replace_equiv_address (op1, scratch_reg);
1756 else if ((!INT14_OK_STRICT && symbolic_memory_operand (op1, VOIDmode))
1757 || IS_LO_SUM_DLT_ADDR_P (XEXP (op1, 0))
1758 || IS_INDEX_ADDR_P (XEXP (op1, 0)))
1760 /* Load memory address into SCRATCH_REG. */
1761 scratch_reg = force_mode (word_mode, scratch_reg);
1762 emit_move_insn (scratch_reg, XEXP (op1, 0));
1763 op1 = replace_equiv_address (op1, scratch_reg);
1765 emit_insn (gen_rtx_SET (operand0, op1));
1766 return 1;
1768 else if (scratch_reg
1769 && FP_REG_P (operand1)
1770 && (MEM_P (operand0)
1771 || (GET_CODE (operand0) == SUBREG
1772 && MEM_P (XEXP (operand0, 0)))))
1774 rtx op0 = operand0;
1776 if (GET_CODE (op0) == SUBREG)
1777 op0 = XEXP (op0, 0);
1779 if (reg_plus_base_memory_operand (op0, GET_MODE (op0)))
1781 if (!(TARGET_PA_20
1782 && !TARGET_ELF32
1783 && INT_14_BITS (XEXP (XEXP (op0, 0), 1)))
1784 && !INT_5_BITS (XEXP (XEXP (op0, 0), 1)))
1786 /* SCRATCH_REG will hold an address and maybe the actual data.
1787 We want it in WORD_MODE regardless of what mode it was
1788 originally given to us. */
1789 scratch_reg = force_mode (word_mode, scratch_reg);
1791 /* D might not fit in 14 bits either; for such cases load D
1792 into scratch reg. */
1793 if (!INT_14_BITS (XEXP (XEXP (op0, 0), 1)))
1795 emit_move_insn (scratch_reg, XEXP (XEXP (op0, 0), 1));
1796 emit_move_insn (scratch_reg,
1797 gen_rtx_fmt_ee (GET_CODE (XEXP (op0, 0)),
1798 Pmode,
1799 XEXP (XEXP (op0, 0), 0),
1800 scratch_reg));
1802 else
1803 emit_move_insn (scratch_reg, XEXP (op0, 0));
1804 op0 = replace_equiv_address (op0, scratch_reg);
1807 else if ((!INT14_OK_STRICT && symbolic_memory_operand (op0, VOIDmode))
1808 || IS_LO_SUM_DLT_ADDR_P (XEXP (op0, 0))
1809 || IS_INDEX_ADDR_P (XEXP (op0, 0)))
1811 /* Load memory address into SCRATCH_REG. */
1812 scratch_reg = force_mode (word_mode, scratch_reg);
1813 emit_move_insn (scratch_reg, XEXP (op0, 0));
1814 op0 = replace_equiv_address (op0, scratch_reg);
1816 emit_insn (gen_rtx_SET (op0, operand1));
1817 return 1;
1819 /* Handle secondary reloads for loads of FP registers from constant
1820 expressions by forcing the constant into memory. For the most part,
1821 this is only necessary for SImode and DImode.
1823 Use scratch_reg to hold the address of the memory location. */
1824 else if (scratch_reg
1825 && CONSTANT_P (operand1)
1826 && FP_REG_P (operand0))
1828 rtx const_mem, xoperands[2];
1830 if (operand1 == CONST0_RTX (mode))
1832 emit_insn (gen_rtx_SET (operand0, operand1));
1833 return 1;
1836 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1837 it in WORD_MODE regardless of what mode it was originally given
1838 to us. */
1839 scratch_reg = force_mode (word_mode, scratch_reg);
1841 /* Force the constant into memory and put the address of the
1842 memory location into scratch_reg. */
1843 const_mem = force_const_mem (mode, operand1);
1844 xoperands[0] = scratch_reg;
1845 xoperands[1] = XEXP (const_mem, 0);
1846 pa_emit_move_sequence (xoperands, Pmode, 0);
1848 /* Now load the destination register. */
1849 emit_insn (gen_rtx_SET (operand0,
1850 replace_equiv_address (const_mem, scratch_reg)));
1851 return 1;
1853 /* Handle secondary reloads for SAR. These occur when trying to load
1854 the SAR from memory or a constant. */
1855 else if (scratch_reg
1856 && GET_CODE (operand0) == REG
1857 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1858 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1859 && (GET_CODE (operand1) == MEM || GET_CODE (operand1) == CONST_INT))
1861 /* D might not fit in 14 bits either; for such cases load D into
1862 scratch reg. */
1863 if (GET_CODE (operand1) == MEM
1864 && !memory_address_p (GET_MODE (operand0), XEXP (operand1, 0)))
1866 /* We are reloading the address into the scratch register, so we
1867 want to make sure the scratch register is a full register. */
1868 scratch_reg = force_mode (word_mode, scratch_reg);
1870 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1871 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1872 0)),
1873 Pmode,
1874 XEXP (XEXP (operand1, 0),
1876 scratch_reg));
1878 /* Now we are going to load the scratch register from memory,
1879 we want to load it in the same width as the original MEM,
1880 which must be the same as the width of the ultimate destination,
1881 OPERAND0. */
1882 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1884 emit_move_insn (scratch_reg,
1885 replace_equiv_address (operand1, scratch_reg));
1887 else
1889 /* We want to load the scratch register using the same mode as
1890 the ultimate destination. */
1891 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1893 emit_move_insn (scratch_reg, operand1);
1896 /* And emit the insn to set the ultimate destination. We know that
1897 the scratch register has the same mode as the destination at this
1898 point. */
1899 emit_move_insn (operand0, scratch_reg);
1900 return 1;
1903 /* Handle the most common case: storing into a register. */
1904 if (register_operand (operand0, mode))
1906 /* Legitimize TLS symbol references. This happens for references
1907 that aren't a legitimate constant. */
1908 if (PA_SYMBOL_REF_TLS_P (operand1))
1909 operand1 = legitimize_tls_address (operand1);
1911 if (register_operand (operand1, mode)
1912 || (GET_CODE (operand1) == CONST_INT
1913 && pa_cint_ok_for_move (UINTVAL (operand1)))
1914 || (operand1 == CONST0_RTX (mode))
1915 || (GET_CODE (operand1) == HIGH
1916 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1917 /* Only `general_operands' can come here, so MEM is ok. */
1918 || GET_CODE (operand1) == MEM)
1920 /* Various sets are created during RTL generation which don't
1921 have the REG_POINTER flag correctly set. After the CSE pass,
1922 instruction recognition can fail if we don't consistently
1923 set this flag when performing register copies. This should
1924 also improve the opportunities for creating insns that use
1925 unscaled indexing. */
1926 if (REG_P (operand0) && REG_P (operand1))
1928 if (REG_POINTER (operand1)
1929 && !REG_POINTER (operand0)
1930 && !HARD_REGISTER_P (operand0))
1931 copy_reg_pointer (operand0, operand1);
1934 /* When MEMs are broken out, the REG_POINTER flag doesn't
1935 get set. In some cases, we can set the REG_POINTER flag
1936 from the declaration for the MEM. */
1937 if (REG_P (operand0)
1938 && GET_CODE (operand1) == MEM
1939 && !REG_POINTER (operand0))
1941 tree decl = MEM_EXPR (operand1);
1943 /* Set the register pointer flag and register alignment
1944 if the declaration for this memory reference is a
1945 pointer type. */
1946 if (decl)
1948 tree type;
1950 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1951 tree operand 1. */
1952 if (TREE_CODE (decl) == COMPONENT_REF)
1953 decl = TREE_OPERAND (decl, 1);
1955 type = TREE_TYPE (decl);
1956 type = strip_array_types (type);
1958 if (POINTER_TYPE_P (type))
1959 mark_reg_pointer (operand0, BITS_PER_UNIT);
1963 emit_insn (gen_rtx_SET (operand0, operand1));
1964 return 1;
1967 else if (GET_CODE (operand0) == MEM)
1969 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1970 && !(reload_in_progress || reload_completed))
1972 rtx temp = gen_reg_rtx (DFmode);
1974 emit_insn (gen_rtx_SET (temp, operand1));
1975 emit_insn (gen_rtx_SET (operand0, temp));
1976 return 1;
1978 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1980 /* Run this case quickly. */
1981 emit_insn (gen_rtx_SET (operand0, operand1));
1982 return 1;
1984 if (! (reload_in_progress || reload_completed))
1986 operands[0] = validize_mem (operand0);
1987 operands[1] = operand1 = force_reg (mode, operand1);
1991 /* Simplify the source if we need to.
1992 Note we do have to handle function labels here, even though we do
1993 not consider them legitimate constants. Loop optimizations can
1994 call the emit_move_xxx with one as a source. */
1995 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1996 || (GET_CODE (operand1) == HIGH
1997 && symbolic_operand (XEXP (operand1, 0), mode))
1998 || function_label_operand (operand1, VOIDmode)
1999 || tls_referenced_p (operand1))
2001 int ishighonly = 0;
2003 if (GET_CODE (operand1) == HIGH)
2005 ishighonly = 1;
2006 operand1 = XEXP (operand1, 0);
2008 if (symbolic_operand (operand1, mode))
2010 /* Argh. The assembler and linker can't handle arithmetic
2011 involving plabels.
2013 So we force the plabel into memory, load operand0 from
2014 the memory location, then add in the constant part. */
2015 if ((GET_CODE (operand1) == CONST
2016 && GET_CODE (XEXP (operand1, 0)) == PLUS
2017 && function_label_operand (XEXP (XEXP (operand1, 0), 0),
2018 VOIDmode))
2019 || function_label_operand (operand1, VOIDmode))
2021 rtx temp, const_part;
2023 /* Figure out what (if any) scratch register to use. */
2024 if (reload_in_progress || reload_completed)
2026 scratch_reg = scratch_reg ? scratch_reg : operand0;
2027 /* SCRATCH_REG will hold an address and maybe the actual
2028 data. We want it in WORD_MODE regardless of what mode it
2029 was originally given to us. */
2030 scratch_reg = force_mode (word_mode, scratch_reg);
2032 else if (flag_pic)
2033 scratch_reg = gen_reg_rtx (Pmode);
2035 if (GET_CODE (operand1) == CONST)
2037 /* Save away the constant part of the expression. */
2038 const_part = XEXP (XEXP (operand1, 0), 1);
2039 gcc_assert (GET_CODE (const_part) == CONST_INT);
2041 /* Force the function label into memory. */
2042 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
2044 else
2046 /* No constant part. */
2047 const_part = NULL_RTX;
2049 /* Force the function label into memory. */
2050 temp = force_const_mem (mode, operand1);
2054 /* Get the address of the memory location. PIC-ify it if
2055 necessary. */
2056 temp = XEXP (temp, 0);
2057 if (flag_pic)
2058 temp = legitimize_pic_address (temp, mode, scratch_reg);
2060 /* Put the address of the memory location into our destination
2061 register. */
2062 operands[1] = temp;
2063 pa_emit_move_sequence (operands, mode, scratch_reg);
2065 /* Now load from the memory location into our destination
2066 register. */
2067 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
2068 pa_emit_move_sequence (operands, mode, scratch_reg);
2070 /* And add back in the constant part. */
2071 if (const_part != NULL_RTX)
2072 expand_inc (operand0, const_part);
2074 return 1;
2077 if (flag_pic)
2079 rtx_insn *insn;
2080 rtx temp;
2082 if (reload_in_progress || reload_completed)
2084 temp = scratch_reg ? scratch_reg : operand0;
2085 /* TEMP will hold an address and maybe the actual
2086 data. We want it in WORD_MODE regardless of what mode it
2087 was originally given to us. */
2088 temp = force_mode (word_mode, temp);
2090 else
2091 temp = gen_reg_rtx (Pmode);
2093 /* Force (const (plus (symbol) (const_int))) to memory
2094 if the const_int will not fit in 14 bits. Although
2095 this requires a relocation, the instruction sequence
2096 needed to load the value is shorter. */
2097 if (GET_CODE (operand1) == CONST
2098 && GET_CODE (XEXP (operand1, 0)) == PLUS
2099 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
2100 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1)))
2102 rtx x, m = force_const_mem (mode, operand1);
2104 x = legitimize_pic_address (XEXP (m, 0), mode, temp);
2105 x = replace_equiv_address (m, x);
2106 insn = emit_move_insn (operand0, x);
2108 else
2110 operands[1] = legitimize_pic_address (operand1, mode, temp);
2111 if (REG_P (operand0) && REG_P (operands[1]))
2112 copy_reg_pointer (operand0, operands[1]);
2113 insn = emit_move_insn (operand0, operands[1]);
2116 /* Put a REG_EQUAL note on this insn. */
2117 set_unique_reg_note (insn, REG_EQUAL, operand1);
2119 /* On the HPPA, references to data space are supposed to use dp,
2120 register 27, but showing it in the RTL inhibits various cse
2121 and loop optimizations. */
2122 else
2124 rtx temp, set;
2126 if (reload_in_progress || reload_completed)
2128 temp = scratch_reg ? scratch_reg : operand0;
2129 /* TEMP will hold an address and maybe the actual
2130 data. We want it in WORD_MODE regardless of what mode it
2131 was originally given to us. */
2132 temp = force_mode (word_mode, temp);
2134 else
2135 temp = gen_reg_rtx (mode);
2137 /* Loading a SYMBOL_REF into a register makes that register
2138 safe to be used as the base in an indexed address.
2140 Don't mark hard registers though. That loses. */
2141 if (GET_CODE (operand0) == REG
2142 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
2143 mark_reg_pointer (operand0, BITS_PER_UNIT);
2144 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
2145 mark_reg_pointer (temp, BITS_PER_UNIT);
2147 if (ishighonly)
2148 set = gen_rtx_SET (operand0, temp);
2149 else
2150 set = gen_rtx_SET (operand0,
2151 gen_rtx_LO_SUM (mode, temp, operand1));
2153 emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (mode, operand1)));
2154 emit_insn (set);
2157 return 1;
2159 else if (tls_referenced_p (operand1))
2161 rtx tmp = operand1;
2162 rtx addend = NULL;
2164 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
2166 addend = XEXP (XEXP (tmp, 0), 1);
2167 tmp = XEXP (XEXP (tmp, 0), 0);
2170 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
2171 tmp = legitimize_tls_address (tmp);
2172 if (addend)
2174 tmp = gen_rtx_PLUS (mode, tmp, addend);
2175 tmp = force_operand (tmp, operands[0]);
2177 operands[1] = tmp;
2179 else if (GET_CODE (operand1) != CONST_INT
2180 || !pa_cint_ok_for_move (UINTVAL (operand1)))
2182 rtx temp;
2183 rtx_insn *insn;
2184 rtx op1 = operand1;
2185 HOST_WIDE_INT value = 0;
2186 HOST_WIDE_INT insv = 0;
2187 int insert = 0;
2189 if (GET_CODE (operand1) == CONST_INT)
2190 value = INTVAL (operand1);
2192 if (TARGET_64BIT
2193 && GET_CODE (operand1) == CONST_INT
2194 && HOST_BITS_PER_WIDE_INT > 32
2195 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
2197 HOST_WIDE_INT nval;
2199 /* Extract the low order 32 bits of the value and sign extend.
2200 If the new value is the same as the original value, we can
2201 can use the original value as-is. If the new value is
2202 different, we use it and insert the most-significant 32-bits
2203 of the original value into the final result. */
2204 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
2205 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
2206 if (value != nval)
2208 #if HOST_BITS_PER_WIDE_INT > 32
2209 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
2210 #endif
2211 insert = 1;
2212 value = nval;
2213 operand1 = GEN_INT (nval);
2217 if (reload_in_progress || reload_completed)
2218 temp = scratch_reg ? scratch_reg : operand0;
2219 else
2220 temp = gen_reg_rtx (mode);
2222 /* We don't directly split DImode constants on 32-bit targets
2223 because PLUS uses an 11-bit immediate and the insn sequence
2224 generated is not as efficient as the one using HIGH/LO_SUM. */
2225 if (GET_CODE (operand1) == CONST_INT
2226 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
2227 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2228 && !insert)
2230 /* Directly break constant into high and low parts. This
2231 provides better optimization opportunities because various
2232 passes recognize constants split with PLUS but not LO_SUM.
2233 We use a 14-bit signed low part except when the addition
2234 of 0x4000 to the high part might change the sign of the
2235 high part. */
2236 HOST_WIDE_INT low = value & 0x3fff;
2237 HOST_WIDE_INT high = value & ~ 0x3fff;
2239 if (low >= 0x2000)
2241 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2242 high += 0x2000;
2243 else
2244 high += 0x4000;
2247 low = value - high;
2249 emit_insn (gen_rtx_SET (temp, GEN_INT (high)));
2250 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2252 else
2254 emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (mode, operand1)));
2255 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2258 insn = emit_move_insn (operands[0], operands[1]);
2260 /* Now insert the most significant 32 bits of the value
2261 into the register. When we don't have a second register
2262 available, it could take up to nine instructions to load
2263 a 64-bit integer constant. Prior to reload, we force
2264 constants that would take more than three instructions
2265 to load to the constant pool. During and after reload,
2266 we have to handle all possible values. */
2267 if (insert)
2269 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2270 register and the value to be inserted is outside the
2271 range that can be loaded with three depdi instructions. */
2272 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2274 operand1 = GEN_INT (insv);
2276 emit_insn (gen_rtx_SET (temp,
2277 gen_rtx_HIGH (mode, operand1)));
2278 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2279 if (mode == DImode)
2280 insn = emit_insn (gen_insvdi (operand0, GEN_INT (32),
2281 const0_rtx, temp));
2282 else
2283 insn = emit_insn (gen_insvsi (operand0, GEN_INT (32),
2284 const0_rtx, temp));
2286 else
2288 int len = 5, pos = 27;
2290 /* Insert the bits using the depdi instruction. */
2291 while (pos >= 0)
2293 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2294 HOST_WIDE_INT sign = v5 < 0;
2296 /* Left extend the insertion. */
2297 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2298 while (pos > 0 && (insv & 1) == sign)
2300 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2301 len += 1;
2302 pos -= 1;
2305 if (mode == DImode)
2306 insn = emit_insn (gen_insvdi (operand0,
2307 GEN_INT (len),
2308 GEN_INT (pos),
2309 GEN_INT (v5)));
2310 else
2311 insn = emit_insn (gen_insvsi (operand0,
2312 GEN_INT (len),
2313 GEN_INT (pos),
2314 GEN_INT (v5)));
2316 len = pos > 0 && pos < 5 ? pos : 5;
2317 pos -= len;
2322 set_unique_reg_note (insn, REG_EQUAL, op1);
2324 return 1;
2327 /* Now have insn-emit do whatever it normally does. */
2328 return 0;
2331 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2332 it will need a link/runtime reloc). */
2335 pa_reloc_needed (tree exp)
2337 int reloc = 0;
2339 switch (TREE_CODE (exp))
2341 case ADDR_EXPR:
2342 return 1;
2344 case POINTER_PLUS_EXPR:
2345 case PLUS_EXPR:
2346 case MINUS_EXPR:
2347 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2348 reloc |= pa_reloc_needed (TREE_OPERAND (exp, 1));
2349 break;
2351 CASE_CONVERT:
2352 case NON_LVALUE_EXPR:
2353 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2354 break;
2356 case CONSTRUCTOR:
2358 tree value;
2359 unsigned HOST_WIDE_INT ix;
2361 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2362 if (value)
2363 reloc |= pa_reloc_needed (value);
2365 break;
2367 case ERROR_MARK:
2368 break;
2370 default:
2371 break;
2373 return reloc;
2377 /* Return the best assembler insn template
2378 for moving operands[1] into operands[0] as a fullword. */
2379 const char *
2380 pa_singlemove_string (rtx *operands)
2382 HOST_WIDE_INT intval;
2384 if (GET_CODE (operands[0]) == MEM)
2385 return "stw %r1,%0";
2386 if (GET_CODE (operands[1]) == MEM)
2387 return "ldw %1,%0";
2388 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2390 long i;
2392 gcc_assert (GET_MODE (operands[1]) == SFmode);
2394 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2395 bit pattern. */
2396 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (operands[1]), i);
2398 operands[1] = GEN_INT (i);
2399 /* Fall through to CONST_INT case. */
2401 if (GET_CODE (operands[1]) == CONST_INT)
2403 intval = INTVAL (operands[1]);
2405 if (VAL_14_BITS_P (intval))
2406 return "ldi %1,%0";
2407 else if ((intval & 0x7ff) == 0)
2408 return "ldil L'%1,%0";
2409 else if (pa_zdepi_cint_p (intval))
2410 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2411 else
2412 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2414 return "copy %1,%0";
2418 /* Compute position (in OP[1]) and width (in OP[2])
2419 useful for copying IMM to a register using the zdepi
2420 instructions. Store the immediate value to insert in OP[0]. */
2421 static void
2422 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2424 int lsb, len;
2426 /* Find the least significant set bit in IMM. */
2427 for (lsb = 0; lsb < 32; lsb++)
2429 if ((imm & 1) != 0)
2430 break;
2431 imm >>= 1;
2434 /* Choose variants based on *sign* of the 5-bit field. */
2435 if ((imm & 0x10) == 0)
2436 len = (lsb <= 28) ? 4 : 32 - lsb;
2437 else
2439 /* Find the width of the bitstring in IMM. */
2440 for (len = 5; len < 32 - lsb; len++)
2442 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2443 break;
2446 /* Sign extend IMM as a 5-bit value. */
2447 imm = (imm & 0xf) - 0x10;
2450 op[0] = imm;
2451 op[1] = 31 - lsb;
2452 op[2] = len;
2455 /* Compute position (in OP[1]) and width (in OP[2])
2456 useful for copying IMM to a register using the depdi,z
2457 instructions. Store the immediate value to insert in OP[0]. */
2459 static void
2460 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2462 int lsb, len, maxlen;
2464 maxlen = MIN (HOST_BITS_PER_WIDE_INT, 64);
2466 /* Find the least significant set bit in IMM. */
2467 for (lsb = 0; lsb < maxlen; lsb++)
2469 if ((imm & 1) != 0)
2470 break;
2471 imm >>= 1;
2474 /* Choose variants based on *sign* of the 5-bit field. */
2475 if ((imm & 0x10) == 0)
2476 len = (lsb <= maxlen - 4) ? 4 : maxlen - lsb;
2477 else
2479 /* Find the width of the bitstring in IMM. */
2480 for (len = 5; len < maxlen - lsb; len++)
2482 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2483 break;
2486 /* Extend length if host is narrow and IMM is negative. */
2487 if (HOST_BITS_PER_WIDE_INT == 32 && len == maxlen - lsb)
2488 len += 32;
2490 /* Sign extend IMM as a 5-bit value. */
2491 imm = (imm & 0xf) - 0x10;
2494 op[0] = imm;
2495 op[1] = 63 - lsb;
2496 op[2] = len;
2499 /* Output assembler code to perform a doubleword move insn
2500 with operands OPERANDS. */
2502 const char *
2503 pa_output_move_double (rtx *operands)
2505 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2506 rtx latehalf[2];
2507 rtx addreg0 = 0, addreg1 = 0;
2508 int highonly = 0;
2510 /* First classify both operands. */
2512 if (REG_P (operands[0]))
2513 optype0 = REGOP;
2514 else if (offsettable_memref_p (operands[0]))
2515 optype0 = OFFSOP;
2516 else if (GET_CODE (operands[0]) == MEM)
2517 optype0 = MEMOP;
2518 else
2519 optype0 = RNDOP;
2521 if (REG_P (operands[1]))
2522 optype1 = REGOP;
2523 else if (CONSTANT_P (operands[1]))
2524 optype1 = CNSTOP;
2525 else if (offsettable_memref_p (operands[1]))
2526 optype1 = OFFSOP;
2527 else if (GET_CODE (operands[1]) == MEM)
2528 optype1 = MEMOP;
2529 else
2530 optype1 = RNDOP;
2532 /* Check for the cases that the operand constraints are not
2533 supposed to allow to happen. */
2534 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2536 /* Handle copies between general and floating registers. */
2538 if (optype0 == REGOP && optype1 == REGOP
2539 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2541 if (FP_REG_P (operands[0]))
2543 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2544 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2545 return "{fldds|fldd} -16(%%sp),%0";
2547 else
2549 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2550 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2551 return "{ldws|ldw} -12(%%sp),%R0";
2555 /* Handle auto decrementing and incrementing loads and stores
2556 specifically, since the structure of the function doesn't work
2557 for them without major modification. Do it better when we learn
2558 this port about the general inc/dec addressing of PA.
2559 (This was written by tege. Chide him if it doesn't work.) */
2561 if (optype0 == MEMOP)
2563 /* We have to output the address syntax ourselves, since print_operand
2564 doesn't deal with the addresses we want to use. Fix this later. */
2566 rtx addr = XEXP (operands[0], 0);
2567 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2569 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2571 operands[0] = XEXP (addr, 0);
2572 gcc_assert (GET_CODE (operands[1]) == REG
2573 && GET_CODE (operands[0]) == REG);
2575 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2577 /* No overlap between high target register and address
2578 register. (We do this in a non-obvious way to
2579 save a register file writeback) */
2580 if (GET_CODE (addr) == POST_INC)
2581 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2582 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2584 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2586 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2588 operands[0] = XEXP (addr, 0);
2589 gcc_assert (GET_CODE (operands[1]) == REG
2590 && GET_CODE (operands[0]) == REG);
2592 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2593 /* No overlap between high target register and address
2594 register. (We do this in a non-obvious way to save a
2595 register file writeback) */
2596 if (GET_CODE (addr) == PRE_INC)
2597 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2598 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2601 if (optype1 == MEMOP)
2603 /* We have to output the address syntax ourselves, since print_operand
2604 doesn't deal with the addresses we want to use. Fix this later. */
2606 rtx addr = XEXP (operands[1], 0);
2607 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2609 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2611 operands[1] = XEXP (addr, 0);
2612 gcc_assert (GET_CODE (operands[0]) == REG
2613 && GET_CODE (operands[1]) == REG);
2615 if (!reg_overlap_mentioned_p (high_reg, addr))
2617 /* No overlap between high target register and address
2618 register. (We do this in a non-obvious way to
2619 save a register file writeback) */
2620 if (GET_CODE (addr) == POST_INC)
2621 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2622 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2624 else
2626 /* This is an undefined situation. We should load into the
2627 address register *and* update that register. Probably
2628 we don't need to handle this at all. */
2629 if (GET_CODE (addr) == POST_INC)
2630 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2631 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2634 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2636 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2638 operands[1] = XEXP (addr, 0);
2639 gcc_assert (GET_CODE (operands[0]) == REG
2640 && GET_CODE (operands[1]) == REG);
2642 if (!reg_overlap_mentioned_p (high_reg, addr))
2644 /* No overlap between high target register and address
2645 register. (We do this in a non-obvious way to
2646 save a register file writeback) */
2647 if (GET_CODE (addr) == PRE_INC)
2648 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2649 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2651 else
2653 /* This is an undefined situation. We should load into the
2654 address register *and* update that register. Probably
2655 we don't need to handle this at all. */
2656 if (GET_CODE (addr) == PRE_INC)
2657 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2658 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2661 else if (GET_CODE (addr) == PLUS
2662 && GET_CODE (XEXP (addr, 0)) == MULT)
2664 rtx xoperands[4];
2666 /* Load address into left half of destination register. */
2667 xoperands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
2668 xoperands[1] = XEXP (addr, 1);
2669 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2670 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2671 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2672 xoperands);
2673 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2675 else if (GET_CODE (addr) == PLUS
2676 && REG_P (XEXP (addr, 0))
2677 && REG_P (XEXP (addr, 1)))
2679 rtx xoperands[3];
2681 /* Load address into left half of destination register. */
2682 xoperands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
2683 xoperands[1] = XEXP (addr, 0);
2684 xoperands[2] = XEXP (addr, 1);
2685 output_asm_insn ("{addl|add,l} %1,%2,%0",
2686 xoperands);
2687 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2691 /* If an operand is an unoffsettable memory ref, find a register
2692 we can increment temporarily to make it refer to the second word. */
2694 if (optype0 == MEMOP)
2695 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2697 if (optype1 == MEMOP)
2698 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2700 /* Ok, we can do one word at a time.
2701 Normally we do the low-numbered word first.
2703 In either case, set up in LATEHALF the operands to use
2704 for the high-numbered word and in some cases alter the
2705 operands in OPERANDS to be suitable for the low-numbered word. */
2707 if (optype0 == REGOP)
2708 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2709 else if (optype0 == OFFSOP)
2710 latehalf[0] = adjust_address_nv (operands[0], SImode, 4);
2711 else
2712 latehalf[0] = operands[0];
2714 if (optype1 == REGOP)
2715 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2716 else if (optype1 == OFFSOP)
2717 latehalf[1] = adjust_address_nv (operands[1], SImode, 4);
2718 else if (optype1 == CNSTOP)
2720 if (GET_CODE (operands[1]) == HIGH)
2722 operands[1] = XEXP (operands[1], 0);
2723 highonly = 1;
2725 split_double (operands[1], &operands[1], &latehalf[1]);
2727 else
2728 latehalf[1] = operands[1];
2730 /* If the first move would clobber the source of the second one,
2731 do them in the other order.
2733 This can happen in two cases:
2735 mem -> register where the first half of the destination register
2736 is the same register used in the memory's address. Reload
2737 can create such insns.
2739 mem in this case will be either register indirect or register
2740 indirect plus a valid offset.
2742 register -> register move where REGNO(dst) == REGNO(src + 1)
2743 someone (Tim/Tege?) claimed this can happen for parameter loads.
2745 Handle mem -> register case first. */
2746 if (optype0 == REGOP
2747 && (optype1 == MEMOP || optype1 == OFFSOP)
2748 && refers_to_regno_p (REGNO (operands[0]), operands[1]))
2750 /* Do the late half first. */
2751 if (addreg1)
2752 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2753 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2755 /* Then clobber. */
2756 if (addreg1)
2757 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2758 return pa_singlemove_string (operands);
2761 /* Now handle register -> register case. */
2762 if (optype0 == REGOP && optype1 == REGOP
2763 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2765 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2766 return pa_singlemove_string (operands);
2769 /* Normal case: do the two words, low-numbered first. */
2771 output_asm_insn (pa_singlemove_string (operands), operands);
2773 /* Make any unoffsettable addresses point at high-numbered word. */
2774 if (addreg0)
2775 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2776 if (addreg1)
2777 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2779 /* Do high-numbered word. */
2780 if (highonly)
2781 output_asm_insn ("ldil L'%1,%0", latehalf);
2782 else
2783 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2785 /* Undo the adds we just did. */
2786 if (addreg0)
2787 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2788 if (addreg1)
2789 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2791 return "";
2794 const char *
2795 pa_output_fp_move_double (rtx *operands)
2797 if (FP_REG_P (operands[0]))
2799 if (FP_REG_P (operands[1])
2800 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2801 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2802 else
2803 output_asm_insn ("fldd%F1 %1,%0", operands);
2805 else if (FP_REG_P (operands[1]))
2807 output_asm_insn ("fstd%F0 %1,%0", operands);
2809 else
2811 rtx xoperands[2];
2813 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2815 /* This is a pain. You have to be prepared to deal with an
2816 arbitrary address here including pre/post increment/decrement.
2818 so avoid this in the MD. */
2819 gcc_assert (GET_CODE (operands[0]) == REG);
2821 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2822 xoperands[0] = operands[0];
2823 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2825 return "";
2828 /* Return a REG that occurs in ADDR with coefficient 1.
2829 ADDR can be effectively incremented by incrementing REG. */
2831 static rtx
2832 find_addr_reg (rtx addr)
2834 while (GET_CODE (addr) == PLUS)
2836 if (GET_CODE (XEXP (addr, 0)) == REG)
2837 addr = XEXP (addr, 0);
2838 else if (GET_CODE (XEXP (addr, 1)) == REG)
2839 addr = XEXP (addr, 1);
2840 else if (CONSTANT_P (XEXP (addr, 0)))
2841 addr = XEXP (addr, 1);
2842 else if (CONSTANT_P (XEXP (addr, 1)))
2843 addr = XEXP (addr, 0);
2844 else
2845 gcc_unreachable ();
2847 gcc_assert (GET_CODE (addr) == REG);
2848 return addr;
2851 /* Emit code to perform a block move.
2853 OPERANDS[0] is the destination pointer as a REG, clobbered.
2854 OPERANDS[1] is the source pointer as a REG, clobbered.
2855 OPERANDS[2] is a register for temporary storage.
2856 OPERANDS[3] is a register for temporary storage.
2857 OPERANDS[4] is the size as a CONST_INT
2858 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2859 OPERANDS[6] is another temporary register. */
2861 const char *
2862 pa_output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2864 int align = INTVAL (operands[5]);
2865 unsigned long n_bytes = INTVAL (operands[4]);
2867 /* We can't move more than a word at a time because the PA
2868 has no longer integer move insns. (Could use fp mem ops?) */
2869 if (align > (TARGET_64BIT ? 8 : 4))
2870 align = (TARGET_64BIT ? 8 : 4);
2872 /* Note that we know each loop below will execute at least twice
2873 (else we would have open-coded the copy). */
2874 switch (align)
2876 case 8:
2877 /* Pre-adjust the loop counter. */
2878 operands[4] = GEN_INT (n_bytes - 16);
2879 output_asm_insn ("ldi %4,%2", operands);
2881 /* Copying loop. */
2882 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2883 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2884 output_asm_insn ("std,ma %3,8(%0)", operands);
2885 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2886 output_asm_insn ("std,ma %6,8(%0)", operands);
2888 /* Handle the residual. There could be up to 7 bytes of
2889 residual to copy! */
2890 if (n_bytes % 16 != 0)
2892 operands[4] = GEN_INT (n_bytes % 8);
2893 if (n_bytes % 16 >= 8)
2894 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2895 if (n_bytes % 8 != 0)
2896 output_asm_insn ("ldd 0(%1),%6", operands);
2897 if (n_bytes % 16 >= 8)
2898 output_asm_insn ("std,ma %3,8(%0)", operands);
2899 if (n_bytes % 8 != 0)
2900 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2902 return "";
2904 case 4:
2905 /* Pre-adjust the loop counter. */
2906 operands[4] = GEN_INT (n_bytes - 8);
2907 output_asm_insn ("ldi %4,%2", operands);
2909 /* Copying loop. */
2910 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2911 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2912 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2913 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2914 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2916 /* Handle the residual. There could be up to 7 bytes of
2917 residual to copy! */
2918 if (n_bytes % 8 != 0)
2920 operands[4] = GEN_INT (n_bytes % 4);
2921 if (n_bytes % 8 >= 4)
2922 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2923 if (n_bytes % 4 != 0)
2924 output_asm_insn ("ldw 0(%1),%6", operands);
2925 if (n_bytes % 8 >= 4)
2926 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2927 if (n_bytes % 4 != 0)
2928 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2930 return "";
2932 case 2:
2933 /* Pre-adjust the loop counter. */
2934 operands[4] = GEN_INT (n_bytes - 4);
2935 output_asm_insn ("ldi %4,%2", operands);
2937 /* Copying loop. */
2938 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2939 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2940 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2941 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2942 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2944 /* Handle the residual. */
2945 if (n_bytes % 4 != 0)
2947 if (n_bytes % 4 >= 2)
2948 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2949 if (n_bytes % 2 != 0)
2950 output_asm_insn ("ldb 0(%1),%6", operands);
2951 if (n_bytes % 4 >= 2)
2952 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2953 if (n_bytes % 2 != 0)
2954 output_asm_insn ("stb %6,0(%0)", operands);
2956 return "";
2958 case 1:
2959 /* Pre-adjust the loop counter. */
2960 operands[4] = GEN_INT (n_bytes - 2);
2961 output_asm_insn ("ldi %4,%2", operands);
2963 /* Copying loop. */
2964 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2965 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2966 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2967 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2968 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2970 /* Handle the residual. */
2971 if (n_bytes % 2 != 0)
2973 output_asm_insn ("ldb 0(%1),%3", operands);
2974 output_asm_insn ("stb %3,0(%0)", operands);
2976 return "";
2978 default:
2979 gcc_unreachable ();
2983 /* Count the number of insns necessary to handle this block move.
2985 Basic structure is the same as emit_block_move, except that we
2986 count insns rather than emit them. */
2988 static int
2989 compute_movmem_length (rtx_insn *insn)
2991 rtx pat = PATTERN (insn);
2992 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2993 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2994 unsigned int n_insns = 0;
2996 /* We can't move more than four bytes at a time because the PA
2997 has no longer integer move insns. (Could use fp mem ops?) */
2998 if (align > (TARGET_64BIT ? 8 : 4))
2999 align = (TARGET_64BIT ? 8 : 4);
3001 /* The basic copying loop. */
3002 n_insns = 6;
3004 /* Residuals. */
3005 if (n_bytes % (2 * align) != 0)
3007 if ((n_bytes % (2 * align)) >= align)
3008 n_insns += 2;
3010 if ((n_bytes % align) != 0)
3011 n_insns += 2;
3014 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3015 return n_insns * 4;
3018 /* Emit code to perform a block clear.
3020 OPERANDS[0] is the destination pointer as a REG, clobbered.
3021 OPERANDS[1] is a register for temporary storage.
3022 OPERANDS[2] is the size as a CONST_INT
3023 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
3025 const char *
3026 pa_output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
3028 int align = INTVAL (operands[3]);
3029 unsigned long n_bytes = INTVAL (operands[2]);
3031 /* We can't clear more than a word at a time because the PA
3032 has no longer integer move insns. */
3033 if (align > (TARGET_64BIT ? 8 : 4))
3034 align = (TARGET_64BIT ? 8 : 4);
3036 /* Note that we know each loop below will execute at least twice
3037 (else we would have open-coded the copy). */
3038 switch (align)
3040 case 8:
3041 /* Pre-adjust the loop counter. */
3042 operands[2] = GEN_INT (n_bytes - 16);
3043 output_asm_insn ("ldi %2,%1", operands);
3045 /* Loop. */
3046 output_asm_insn ("std,ma %%r0,8(%0)", operands);
3047 output_asm_insn ("addib,>= -16,%1,.-4", operands);
3048 output_asm_insn ("std,ma %%r0,8(%0)", operands);
3050 /* Handle the residual. There could be up to 7 bytes of
3051 residual to copy! */
3052 if (n_bytes % 16 != 0)
3054 operands[2] = GEN_INT (n_bytes % 8);
3055 if (n_bytes % 16 >= 8)
3056 output_asm_insn ("std,ma %%r0,8(%0)", operands);
3057 if (n_bytes % 8 != 0)
3058 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
3060 return "";
3062 case 4:
3063 /* Pre-adjust the loop counter. */
3064 operands[2] = GEN_INT (n_bytes - 8);
3065 output_asm_insn ("ldi %2,%1", operands);
3067 /* Loop. */
3068 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
3069 output_asm_insn ("addib,>= -8,%1,.-4", operands);
3070 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
3072 /* Handle the residual. There could be up to 7 bytes of
3073 residual to copy! */
3074 if (n_bytes % 8 != 0)
3076 operands[2] = GEN_INT (n_bytes % 4);
3077 if (n_bytes % 8 >= 4)
3078 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
3079 if (n_bytes % 4 != 0)
3080 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
3082 return "";
3084 case 2:
3085 /* Pre-adjust the loop counter. */
3086 operands[2] = GEN_INT (n_bytes - 4);
3087 output_asm_insn ("ldi %2,%1", operands);
3089 /* Loop. */
3090 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3091 output_asm_insn ("addib,>= -4,%1,.-4", operands);
3092 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3094 /* Handle the residual. */
3095 if (n_bytes % 4 != 0)
3097 if (n_bytes % 4 >= 2)
3098 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3099 if (n_bytes % 2 != 0)
3100 output_asm_insn ("stb %%r0,0(%0)", operands);
3102 return "";
3104 case 1:
3105 /* Pre-adjust the loop counter. */
3106 operands[2] = GEN_INT (n_bytes - 2);
3107 output_asm_insn ("ldi %2,%1", operands);
3109 /* Loop. */
3110 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3111 output_asm_insn ("addib,>= -2,%1,.-4", operands);
3112 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3114 /* Handle the residual. */
3115 if (n_bytes % 2 != 0)
3116 output_asm_insn ("stb %%r0,0(%0)", operands);
3118 return "";
3120 default:
3121 gcc_unreachable ();
3125 /* Count the number of insns necessary to handle this block move.
3127 Basic structure is the same as emit_block_move, except that we
3128 count insns rather than emit them. */
3130 static int
3131 compute_clrmem_length (rtx_insn *insn)
3133 rtx pat = PATTERN (insn);
3134 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
3135 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
3136 unsigned int n_insns = 0;
3138 /* We can't clear more than a word at a time because the PA
3139 has no longer integer move insns. */
3140 if (align > (TARGET_64BIT ? 8 : 4))
3141 align = (TARGET_64BIT ? 8 : 4);
3143 /* The basic loop. */
3144 n_insns = 4;
3146 /* Residuals. */
3147 if (n_bytes % (2 * align) != 0)
3149 if ((n_bytes % (2 * align)) >= align)
3150 n_insns++;
3152 if ((n_bytes % align) != 0)
3153 n_insns++;
3156 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3157 return n_insns * 4;
3161 const char *
3162 pa_output_and (rtx *operands)
3164 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3166 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3167 int ls0, ls1, ms0, p, len;
3169 for (ls0 = 0; ls0 < 32; ls0++)
3170 if ((mask & (1 << ls0)) == 0)
3171 break;
3173 for (ls1 = ls0; ls1 < 32; ls1++)
3174 if ((mask & (1 << ls1)) != 0)
3175 break;
3177 for (ms0 = ls1; ms0 < 32; ms0++)
3178 if ((mask & (1 << ms0)) == 0)
3179 break;
3181 gcc_assert (ms0 == 32);
3183 if (ls1 == 32)
3185 len = ls0;
3187 gcc_assert (len);
3189 operands[2] = GEN_INT (len);
3190 return "{extru|extrw,u} %1,31,%2,%0";
3192 else
3194 /* We could use this `depi' for the case above as well, but `depi'
3195 requires one more register file access than an `extru'. */
3197 p = 31 - ls0;
3198 len = ls1 - ls0;
3200 operands[2] = GEN_INT (p);
3201 operands[3] = GEN_INT (len);
3202 return "{depi|depwi} 0,%2,%3,%0";
3205 else
3206 return "and %1,%2,%0";
3209 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3210 storing the result in operands[0]. */
3211 const char *
3212 pa_output_64bit_and (rtx *operands)
3214 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3216 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3217 int ls0, ls1, ms0, p, len;
3219 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
3220 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
3221 break;
3223 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
3224 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
3225 break;
3227 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
3228 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
3229 break;
3231 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
3233 if (ls1 == HOST_BITS_PER_WIDE_INT)
3235 len = ls0;
3237 gcc_assert (len);
3239 operands[2] = GEN_INT (len);
3240 return "extrd,u %1,63,%2,%0";
3242 else
3244 /* We could use this `depi' for the case above as well, but `depi'
3245 requires one more register file access than an `extru'. */
3247 p = 63 - ls0;
3248 len = ls1 - ls0;
3250 operands[2] = GEN_INT (p);
3251 operands[3] = GEN_INT (len);
3252 return "depdi 0,%2,%3,%0";
3255 else
3256 return "and %1,%2,%0";
3259 const char *
3260 pa_output_ior (rtx *operands)
3262 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3263 int bs0, bs1, p, len;
3265 if (INTVAL (operands[2]) == 0)
3266 return "copy %1,%0";
3268 for (bs0 = 0; bs0 < 32; bs0++)
3269 if ((mask & (1 << bs0)) != 0)
3270 break;
3272 for (bs1 = bs0; bs1 < 32; bs1++)
3273 if ((mask & (1 << bs1)) == 0)
3274 break;
3276 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3278 p = 31 - bs0;
3279 len = bs1 - bs0;
3281 operands[2] = GEN_INT (p);
3282 operands[3] = GEN_INT (len);
3283 return "{depi|depwi} -1,%2,%3,%0";
3286 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3287 storing the result in operands[0]. */
3288 const char *
3289 pa_output_64bit_ior (rtx *operands)
3291 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3292 int bs0, bs1, p, len;
3294 if (INTVAL (operands[2]) == 0)
3295 return "copy %1,%0";
3297 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3298 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3299 break;
3301 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3302 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3303 break;
3305 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
3306 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3308 p = 63 - bs0;
3309 len = bs1 - bs0;
3311 operands[2] = GEN_INT (p);
3312 operands[3] = GEN_INT (len);
3313 return "depdi -1,%2,%3,%0";
3316 /* Target hook for assembling integer objects. This code handles
3317 aligned SI and DI integers specially since function references
3318 must be preceded by P%. */
3320 static bool
3321 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3323 bool result;
3324 tree decl = NULL;
3326 /* When we have a SYMBOL_REF with a SYMBOL_REF_DECL, we need to call
3327 call assemble_external and set the SYMBOL_REF_DECL to NULL before
3328 calling output_addr_const. Otherwise, it may call assemble_external
3329 in the midst of outputing the assembler code for the SYMBOL_REF.
3330 We restore the SYMBOL_REF_DECL after the output is done. */
3331 if (GET_CODE (x) == SYMBOL_REF)
3333 decl = SYMBOL_REF_DECL (x);
3334 if (decl)
3336 assemble_external (decl);
3337 SET_SYMBOL_REF_DECL (x, NULL);
3341 if (size == UNITS_PER_WORD
3342 && aligned_p
3343 && function_label_operand (x, VOIDmode))
3345 fputs (size == 8? "\t.dword\t" : "\t.word\t", asm_out_file);
3347 /* We don't want an OPD when generating fast indirect calls. */
3348 if (!TARGET_FAST_INDIRECT_CALLS)
3349 fputs ("P%", asm_out_file);
3351 output_addr_const (asm_out_file, x);
3352 fputc ('\n', asm_out_file);
3353 result = true;
3355 else
3356 result = default_assemble_integer (x, size, aligned_p);
3358 if (decl)
3359 SET_SYMBOL_REF_DECL (x, decl);
3361 return result;
3364 /* Output an ascii string. */
3365 void
3366 pa_output_ascii (FILE *file, const char *p, int size)
3368 int i;
3369 int chars_output;
3370 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3372 /* The HP assembler can only take strings of 256 characters at one
3373 time. This is a limitation on input line length, *not* the
3374 length of the string. Sigh. Even worse, it seems that the
3375 restriction is in number of input characters (see \xnn &
3376 \whatever). So we have to do this very carefully. */
3378 fputs ("\t.STRING \"", file);
3380 chars_output = 0;
3381 for (i = 0; i < size; i += 4)
3383 int co = 0;
3384 int io = 0;
3385 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3387 register unsigned int c = (unsigned char) p[i + io];
3389 if (c == '\"' || c == '\\')
3390 partial_output[co++] = '\\';
3391 if (c >= ' ' && c < 0177)
3392 partial_output[co++] = c;
3393 else
3395 unsigned int hexd;
3396 partial_output[co++] = '\\';
3397 partial_output[co++] = 'x';
3398 hexd = c / 16 - 0 + '0';
3399 if (hexd > '9')
3400 hexd -= '9' - 'a' + 1;
3401 partial_output[co++] = hexd;
3402 hexd = c % 16 - 0 + '0';
3403 if (hexd > '9')
3404 hexd -= '9' - 'a' + 1;
3405 partial_output[co++] = hexd;
3408 if (chars_output + co > 243)
3410 fputs ("\"\n\t.STRING \"", file);
3411 chars_output = 0;
3413 fwrite (partial_output, 1, (size_t) co, file);
3414 chars_output += co;
3415 co = 0;
3417 fputs ("\"\n", file);
3420 /* Try to rewrite floating point comparisons & branches to avoid
3421 useless add,tr insns.
3423 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3424 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3425 first attempt to remove useless add,tr insns. It is zero
3426 for the second pass as reorg sometimes leaves bogus REG_DEAD
3427 notes lying around.
3429 When CHECK_NOTES is zero we can only eliminate add,tr insns
3430 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3431 instructions. */
3432 static void
3433 remove_useless_addtr_insns (int check_notes)
3435 rtx_insn *insn;
3436 static int pass = 0;
3438 /* This is fairly cheap, so always run it when optimizing. */
3439 if (optimize > 0)
3441 int fcmp_count = 0;
3442 int fbranch_count = 0;
3444 /* Walk all the insns in this function looking for fcmp & fbranch
3445 instructions. Keep track of how many of each we find. */
3446 for (insn = get_insns (); insn; insn = next_insn (insn))
3448 rtx tmp;
3450 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3451 if (! NONJUMP_INSN_P (insn) && ! JUMP_P (insn))
3452 continue;
3454 tmp = PATTERN (insn);
3456 /* It must be a set. */
3457 if (GET_CODE (tmp) != SET)
3458 continue;
3460 /* If the destination is CCFP, then we've found an fcmp insn. */
3461 tmp = SET_DEST (tmp);
3462 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3464 fcmp_count++;
3465 continue;
3468 tmp = PATTERN (insn);
3469 /* If this is an fbranch instruction, bump the fbranch counter. */
3470 if (GET_CODE (tmp) == SET
3471 && SET_DEST (tmp) == pc_rtx
3472 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3473 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3474 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3475 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3477 fbranch_count++;
3478 continue;
3483 /* Find all floating point compare + branch insns. If possible,
3484 reverse the comparison & the branch to avoid add,tr insns. */
3485 for (insn = get_insns (); insn; insn = next_insn (insn))
3487 rtx tmp;
3488 rtx_insn *next;
3490 /* Ignore anything that isn't an INSN. */
3491 if (! NONJUMP_INSN_P (insn))
3492 continue;
3494 tmp = PATTERN (insn);
3496 /* It must be a set. */
3497 if (GET_CODE (tmp) != SET)
3498 continue;
3500 /* The destination must be CCFP, which is register zero. */
3501 tmp = SET_DEST (tmp);
3502 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3503 continue;
3505 /* INSN should be a set of CCFP.
3507 See if the result of this insn is used in a reversed FP
3508 conditional branch. If so, reverse our condition and
3509 the branch. Doing so avoids useless add,tr insns. */
3510 next = next_insn (insn);
3511 while (next)
3513 /* Jumps, calls and labels stop our search. */
3514 if (JUMP_P (next) || CALL_P (next) || LABEL_P (next))
3515 break;
3517 /* As does another fcmp insn. */
3518 if (NONJUMP_INSN_P (next)
3519 && GET_CODE (PATTERN (next)) == SET
3520 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3521 && REGNO (SET_DEST (PATTERN (next))) == 0)
3522 break;
3524 next = next_insn (next);
3527 /* Is NEXT_INSN a branch? */
3528 if (next && JUMP_P (next))
3530 rtx pattern = PATTERN (next);
3532 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3533 and CCFP dies, then reverse our conditional and the branch
3534 to avoid the add,tr. */
3535 if (GET_CODE (pattern) == SET
3536 && SET_DEST (pattern) == pc_rtx
3537 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3538 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3539 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3540 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3541 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3542 && (fcmp_count == fbranch_count
3543 || (check_notes
3544 && find_regno_note (next, REG_DEAD, 0))))
3546 /* Reverse the branch. */
3547 tmp = XEXP (SET_SRC (pattern), 1);
3548 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3549 XEXP (SET_SRC (pattern), 2) = tmp;
3550 INSN_CODE (next) = -1;
3552 /* Reverse our condition. */
3553 tmp = PATTERN (insn);
3554 PUT_CODE (XEXP (tmp, 1),
3555 (reverse_condition_maybe_unordered
3556 (GET_CODE (XEXP (tmp, 1)))));
3562 pass = !pass;
3566 /* You may have trouble believing this, but this is the 32 bit HP-PA
3567 stack layout. Wow.
3569 Offset Contents
3571 Variable arguments (optional; any number may be allocated)
3573 SP-(4*(N+9)) arg word N
3575 SP-56 arg word 5
3576 SP-52 arg word 4
3578 Fixed arguments (must be allocated; may remain unused)
3580 SP-48 arg word 3
3581 SP-44 arg word 2
3582 SP-40 arg word 1
3583 SP-36 arg word 0
3585 Frame Marker
3587 SP-32 External Data Pointer (DP)
3588 SP-28 External sr4
3589 SP-24 External/stub RP (RP')
3590 SP-20 Current RP
3591 SP-16 Static Link
3592 SP-12 Clean up
3593 SP-8 Calling Stub RP (RP'')
3594 SP-4 Previous SP
3596 Top of Frame
3598 SP-0 Stack Pointer (points to next available address)
3602 /* This function saves registers as follows. Registers marked with ' are
3603 this function's registers (as opposed to the previous function's).
3604 If a frame_pointer isn't needed, r4 is saved as a general register;
3605 the space for the frame pointer is still allocated, though, to keep
3606 things simple.
3609 Top of Frame
3611 SP (FP') Previous FP
3612 SP + 4 Alignment filler (sigh)
3613 SP + 8 Space for locals reserved here.
3617 SP + n All call saved register used.
3621 SP + o All call saved fp registers used.
3625 SP + p (SP') points to next available address.
3629 /* Global variables set by output_function_prologue(). */
3630 /* Size of frame. Need to know this to emit return insns from
3631 leaf procedures. */
3632 static HOST_WIDE_INT actual_fsize, local_fsize;
3633 static int save_fregs;
3635 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3636 Handle case where DISP > 8k by using the add_high_const patterns.
3638 Note in DISP > 8k case, we will leave the high part of the address
3639 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3641 static void
3642 store_reg (int reg, HOST_WIDE_INT disp, int base)
3644 rtx dest, src, basereg;
3645 rtx_insn *insn;
3647 src = gen_rtx_REG (word_mode, reg);
3648 basereg = gen_rtx_REG (Pmode, base);
3649 if (VAL_14_BITS_P (disp))
3651 dest = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
3652 insn = emit_move_insn (dest, src);
3654 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3656 rtx delta = GEN_INT (disp);
3657 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3659 emit_move_insn (tmpreg, delta);
3660 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3661 if (DO_FRAME_NOTES)
3663 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3664 gen_rtx_SET (tmpreg,
3665 gen_rtx_PLUS (Pmode, basereg, delta)));
3666 RTX_FRAME_RELATED_P (insn) = 1;
3668 dest = gen_rtx_MEM (word_mode, tmpreg);
3669 insn = emit_move_insn (dest, src);
3671 else
3673 rtx delta = GEN_INT (disp);
3674 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3675 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3677 emit_move_insn (tmpreg, high);
3678 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3679 insn = emit_move_insn (dest, src);
3680 if (DO_FRAME_NOTES)
3681 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3682 gen_rtx_SET (gen_rtx_MEM (word_mode,
3683 gen_rtx_PLUS (word_mode,
3684 basereg,
3685 delta)),
3686 src));
3689 if (DO_FRAME_NOTES)
3690 RTX_FRAME_RELATED_P (insn) = 1;
3693 /* Emit RTL to store REG at the memory location specified by BASE and then
3694 add MOD to BASE. MOD must be <= 8k. */
3696 static void
3697 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3699 rtx basereg, srcreg, delta;
3700 rtx_insn *insn;
3702 gcc_assert (VAL_14_BITS_P (mod));
3704 basereg = gen_rtx_REG (Pmode, base);
3705 srcreg = gen_rtx_REG (word_mode, reg);
3706 delta = GEN_INT (mod);
3708 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3709 if (DO_FRAME_NOTES)
3711 RTX_FRAME_RELATED_P (insn) = 1;
3713 /* RTX_FRAME_RELATED_P must be set on each frame related set
3714 in a parallel with more than one element. */
3715 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3716 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3720 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3721 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3722 whether to add a frame note or not.
3724 In the DISP > 8k case, we leave the high part of the address in %r1.
3725 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3727 static void
3728 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3730 rtx_insn *insn;
3732 if (VAL_14_BITS_P (disp))
3734 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3735 plus_constant (Pmode,
3736 gen_rtx_REG (Pmode, base), disp));
3738 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3740 rtx basereg = gen_rtx_REG (Pmode, base);
3741 rtx delta = GEN_INT (disp);
3742 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3744 emit_move_insn (tmpreg, delta);
3745 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3746 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3747 if (DO_FRAME_NOTES)
3748 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3749 gen_rtx_SET (tmpreg,
3750 gen_rtx_PLUS (Pmode, basereg, delta)));
3752 else
3754 rtx basereg = gen_rtx_REG (Pmode, base);
3755 rtx delta = GEN_INT (disp);
3756 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3758 emit_move_insn (tmpreg,
3759 gen_rtx_PLUS (Pmode, basereg,
3760 gen_rtx_HIGH (Pmode, delta)));
3761 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3762 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3765 if (DO_FRAME_NOTES && note)
3766 RTX_FRAME_RELATED_P (insn) = 1;
3769 HOST_WIDE_INT
3770 pa_compute_frame_size (poly_int64 size, int *fregs_live)
3772 int freg_saved = 0;
3773 int i, j;
3775 /* The code in pa_expand_prologue and pa_expand_epilogue must
3776 be consistent with the rounding and size calculation done here.
3777 Change them at the same time. */
3779 /* We do our own stack alignment. First, round the size of the
3780 stack locals up to a word boundary. */
3781 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3783 /* Space for previous frame pointer + filler. If any frame is
3784 allocated, we need to add in the TARGET_STARTING_FRAME_OFFSET. We
3785 waste some space here for the sake of HP compatibility. The
3786 first slot is only used when the frame pointer is needed. */
3787 if (size || frame_pointer_needed)
3788 size += pa_starting_frame_offset ();
3790 /* If the current function calls __builtin_eh_return, then we need
3791 to allocate stack space for registers that will hold data for
3792 the exception handler. */
3793 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3795 unsigned int i;
3797 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3798 continue;
3799 size += i * UNITS_PER_WORD;
3802 /* Account for space used by the callee general register saves. */
3803 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3804 if (df_regs_ever_live_p (i))
3805 size += UNITS_PER_WORD;
3807 /* Account for space used by the callee floating point register saves. */
3808 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3809 if (df_regs_ever_live_p (i)
3810 || (!TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3812 freg_saved = 1;
3814 /* We always save both halves of the FP register, so always
3815 increment the frame size by 8 bytes. */
3816 size += 8;
3819 /* If any of the floating registers are saved, account for the
3820 alignment needed for the floating point register save block. */
3821 if (freg_saved)
3823 size = (size + 7) & ~7;
3824 if (fregs_live)
3825 *fregs_live = 1;
3828 /* The various ABIs include space for the outgoing parameters in the
3829 size of the current function's stack frame. We don't need to align
3830 for the outgoing arguments as their alignment is set by the final
3831 rounding for the frame as a whole. */
3832 size += crtl->outgoing_args_size;
3834 /* Allocate space for the fixed frame marker. This space must be
3835 allocated for any function that makes calls or allocates
3836 stack space. */
3837 if (!crtl->is_leaf || size)
3838 size += TARGET_64BIT ? 48 : 32;
3840 /* Finally, round to the preferred stack boundary. */
3841 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3842 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3845 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3846 of memory. If any fpu reg is used in the function, we allocate
3847 such a block here, at the bottom of the frame, just in case it's needed.
3849 If this function is a leaf procedure, then we may choose not
3850 to do a "save" insn. The decision about whether or not
3851 to do this is made in regclass.c. */
3853 static void
3854 pa_output_function_prologue (FILE *file)
3856 /* The function's label and associated .PROC must never be
3857 separated and must be output *after* any profiling declarations
3858 to avoid changing spaces/subspaces within a procedure. */
3859 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3860 fputs ("\t.PROC\n", file);
3862 /* pa_expand_prologue does the dirty work now. We just need
3863 to output the assembler directives which denote the start
3864 of a function. */
3865 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3866 if (crtl->is_leaf)
3867 fputs (",NO_CALLS", file);
3868 else
3869 fputs (",CALLS", file);
3870 if (rp_saved)
3871 fputs (",SAVE_RP", file);
3873 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3874 at the beginning of the frame and that it is used as the frame
3875 pointer for the frame. We do this because our current frame
3876 layout doesn't conform to that specified in the HP runtime
3877 documentation and we need a way to indicate to programs such as
3878 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3879 isn't used by HP compilers but is supported by the assembler.
3880 However, SAVE_SP is supposed to indicate that the previous stack
3881 pointer has been saved in the frame marker. */
3882 if (frame_pointer_needed)
3883 fputs (",SAVE_SP", file);
3885 /* Pass on information about the number of callee register saves
3886 performed in the prologue.
3888 The compiler is supposed to pass the highest register number
3889 saved, the assembler then has to adjust that number before
3890 entering it into the unwind descriptor (to account for any
3891 caller saved registers with lower register numbers than the
3892 first callee saved register). */
3893 if (gr_saved)
3894 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3896 if (fr_saved)
3897 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3899 fputs ("\n\t.ENTRY\n", file);
3901 remove_useless_addtr_insns (0);
3904 void
3905 pa_expand_prologue (void)
3907 int merge_sp_adjust_with_store = 0;
3908 HOST_WIDE_INT size = get_frame_size ();
3909 HOST_WIDE_INT offset;
3910 int i;
3911 rtx tmpreg;
3912 rtx_insn *insn;
3914 gr_saved = 0;
3915 fr_saved = 0;
3916 save_fregs = 0;
3918 /* Compute total size for frame pointer, filler, locals and rounding to
3919 the next word boundary. Similar code appears in pa_compute_frame_size
3920 and must be changed in tandem with this code. */
3921 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3922 if (local_fsize || frame_pointer_needed)
3923 local_fsize += pa_starting_frame_offset ();
3925 actual_fsize = pa_compute_frame_size (size, &save_fregs);
3926 if (flag_stack_usage_info)
3927 current_function_static_stack_size = actual_fsize;
3929 /* Compute a few things we will use often. */
3930 tmpreg = gen_rtx_REG (word_mode, 1);
3932 /* Save RP first. The calling conventions manual states RP will
3933 always be stored into the caller's frame at sp - 20 or sp - 16
3934 depending on which ABI is in use. */
3935 if (df_regs_ever_live_p (2) || crtl->calls_eh_return)
3937 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3938 rp_saved = true;
3940 else
3941 rp_saved = false;
3943 /* Allocate the local frame and set up the frame pointer if needed. */
3944 if (actual_fsize != 0)
3946 if (frame_pointer_needed)
3948 /* Copy the old frame pointer temporarily into %r1. Set up the
3949 new stack pointer, then store away the saved old frame pointer
3950 into the stack at sp and at the same time update the stack
3951 pointer by actual_fsize bytes. Two versions, first
3952 handles small (<8k) frames. The second handles large (>=8k)
3953 frames. */
3954 insn = emit_move_insn (tmpreg, hard_frame_pointer_rtx);
3955 if (DO_FRAME_NOTES)
3956 RTX_FRAME_RELATED_P (insn) = 1;
3958 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3959 if (DO_FRAME_NOTES)
3960 RTX_FRAME_RELATED_P (insn) = 1;
3962 if (VAL_14_BITS_P (actual_fsize))
3963 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3964 else
3966 /* It is incorrect to store the saved frame pointer at *sp,
3967 then increment sp (writes beyond the current stack boundary).
3969 So instead use stwm to store at *sp and post-increment the
3970 stack pointer as an atomic operation. Then increment sp to
3971 finish allocating the new frame. */
3972 HOST_WIDE_INT adjust1 = 8192 - 64;
3973 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3975 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3976 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3977 adjust2, 1);
3980 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3981 we need to store the previous stack pointer (frame pointer)
3982 into the frame marker on targets that use the HP unwind
3983 library. This allows the HP unwind library to be used to
3984 unwind GCC frames. However, we are not fully compatible
3985 with the HP library because our frame layout differs from
3986 that specified in the HP runtime specification.
3988 We don't want a frame note on this instruction as the frame
3989 marker moves during dynamic stack allocation.
3991 This instruction also serves as a blockage to prevent
3992 register spills from being scheduled before the stack
3993 pointer is raised. This is necessary as we store
3994 registers using the frame pointer as a base register,
3995 and the frame pointer is set before sp is raised. */
3996 if (TARGET_HPUX_UNWIND_LIBRARY)
3998 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3999 GEN_INT (TARGET_64BIT ? -8 : -4));
4001 emit_move_insn (gen_rtx_MEM (word_mode, addr),
4002 hard_frame_pointer_rtx);
4004 else
4005 emit_insn (gen_blockage ());
4007 /* no frame pointer needed. */
4008 else
4010 /* In some cases we can perform the first callee register save
4011 and allocating the stack frame at the same time. If so, just
4012 make a note of it and defer allocating the frame until saving
4013 the callee registers. */
4014 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
4015 merge_sp_adjust_with_store = 1;
4016 /* Cannot optimize. Adjust the stack frame by actual_fsize
4017 bytes. */
4018 else
4019 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4020 actual_fsize, 1);
4024 /* Normal register save.
4026 Do not save the frame pointer in the frame_pointer_needed case. It
4027 was done earlier. */
4028 if (frame_pointer_needed)
4030 offset = local_fsize;
4032 /* Saving the EH return data registers in the frame is the simplest
4033 way to get the frame unwind information emitted. We put them
4034 just before the general registers. */
4035 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4037 unsigned int i, regno;
4039 for (i = 0; ; ++i)
4041 regno = EH_RETURN_DATA_REGNO (i);
4042 if (regno == INVALID_REGNUM)
4043 break;
4045 store_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4046 offset += UNITS_PER_WORD;
4050 for (i = 18; i >= 4; i--)
4051 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4053 store_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4054 offset += UNITS_PER_WORD;
4055 gr_saved++;
4057 /* Account for %r3 which is saved in a special place. */
4058 gr_saved++;
4060 /* No frame pointer needed. */
4061 else
4063 offset = local_fsize - actual_fsize;
4065 /* Saving the EH return data registers in the frame is the simplest
4066 way to get the frame unwind information emitted. */
4067 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4069 unsigned int i, regno;
4071 for (i = 0; ; ++i)
4073 regno = EH_RETURN_DATA_REGNO (i);
4074 if (regno == INVALID_REGNUM)
4075 break;
4077 /* If merge_sp_adjust_with_store is nonzero, then we can
4078 optimize the first save. */
4079 if (merge_sp_adjust_with_store)
4081 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
4082 merge_sp_adjust_with_store = 0;
4084 else
4085 store_reg (regno, offset, STACK_POINTER_REGNUM);
4086 offset += UNITS_PER_WORD;
4090 for (i = 18; i >= 3; i--)
4091 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4093 /* If merge_sp_adjust_with_store is nonzero, then we can
4094 optimize the first GR save. */
4095 if (merge_sp_adjust_with_store)
4097 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
4098 merge_sp_adjust_with_store = 0;
4100 else
4101 store_reg (i, offset, STACK_POINTER_REGNUM);
4102 offset += UNITS_PER_WORD;
4103 gr_saved++;
4106 /* If we wanted to merge the SP adjustment with a GR save, but we never
4107 did any GR saves, then just emit the adjustment here. */
4108 if (merge_sp_adjust_with_store)
4109 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4110 actual_fsize, 1);
4113 /* The hppa calling conventions say that %r19, the pic offset
4114 register, is saved at sp - 32 (in this function's frame)
4115 when generating PIC code. FIXME: What is the correct thing
4116 to do for functions which make no calls and allocate no
4117 frame? Do we need to allocate a frame, or can we just omit
4118 the save? For now we'll just omit the save.
4120 We don't want a note on this insn as the frame marker can
4121 move if there is a dynamic stack allocation. */
4122 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
4124 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
4126 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
4130 /* Align pointer properly (doubleword boundary). */
4131 offset = (offset + 7) & ~7;
4133 /* Floating point register store. */
4134 if (save_fregs)
4136 rtx base;
4138 /* First get the frame or stack pointer to the start of the FP register
4139 save area. */
4140 if (frame_pointer_needed)
4142 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4143 base = hard_frame_pointer_rtx;
4145 else
4147 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4148 base = stack_pointer_rtx;
4151 /* Now actually save the FP registers. */
4152 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4154 if (df_regs_ever_live_p (i)
4155 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4157 rtx addr, reg;
4158 rtx_insn *insn;
4159 addr = gen_rtx_MEM (DFmode,
4160 gen_rtx_POST_INC (word_mode, tmpreg));
4161 reg = gen_rtx_REG (DFmode, i);
4162 insn = emit_move_insn (addr, reg);
4163 if (DO_FRAME_NOTES)
4165 RTX_FRAME_RELATED_P (insn) = 1;
4166 if (TARGET_64BIT)
4168 rtx mem = gen_rtx_MEM (DFmode,
4169 plus_constant (Pmode, base,
4170 offset));
4171 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4172 gen_rtx_SET (mem, reg));
4174 else
4176 rtx meml = gen_rtx_MEM (SFmode,
4177 plus_constant (Pmode, base,
4178 offset));
4179 rtx memr = gen_rtx_MEM (SFmode,
4180 plus_constant (Pmode, base,
4181 offset + 4));
4182 rtx regl = gen_rtx_REG (SFmode, i);
4183 rtx regr = gen_rtx_REG (SFmode, i + 1);
4184 rtx setl = gen_rtx_SET (meml, regl);
4185 rtx setr = gen_rtx_SET (memr, regr);
4186 rtvec vec;
4188 RTX_FRAME_RELATED_P (setl) = 1;
4189 RTX_FRAME_RELATED_P (setr) = 1;
4190 vec = gen_rtvec (2, setl, setr);
4191 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4192 gen_rtx_SEQUENCE (VOIDmode, vec));
4195 offset += GET_MODE_SIZE (DFmode);
4196 fr_saved++;
4202 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4203 Handle case where DISP > 8k by using the add_high_const patterns. */
4205 static void
4206 load_reg (int reg, HOST_WIDE_INT disp, int base)
4208 rtx dest = gen_rtx_REG (word_mode, reg);
4209 rtx basereg = gen_rtx_REG (Pmode, base);
4210 rtx src;
4212 if (VAL_14_BITS_P (disp))
4213 src = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
4214 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
4216 rtx delta = GEN_INT (disp);
4217 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4219 emit_move_insn (tmpreg, delta);
4220 if (TARGET_DISABLE_INDEXING)
4222 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4223 src = gen_rtx_MEM (word_mode, tmpreg);
4225 else
4226 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4228 else
4230 rtx delta = GEN_INT (disp);
4231 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
4232 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4234 emit_move_insn (tmpreg, high);
4235 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
4238 emit_move_insn (dest, src);
4241 /* Update the total code bytes output to the text section. */
4243 static void
4244 update_total_code_bytes (unsigned int nbytes)
4246 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
4247 && !IN_NAMED_SECTION_P (cfun->decl))
4249 unsigned int old_total = total_code_bytes;
4251 total_code_bytes += nbytes;
4253 /* Be prepared to handle overflows. */
4254 if (old_total > total_code_bytes)
4255 total_code_bytes = UINT_MAX;
4259 /* This function generates the assembly code for function exit.
4260 Args are as for output_function_prologue ().
4262 The function epilogue should not depend on the current stack
4263 pointer! It should use the frame pointer only. This is mandatory
4264 because of alloca; we also take advantage of it to omit stack
4265 adjustments before returning. */
4267 static void
4268 pa_output_function_epilogue (FILE *file)
4270 rtx_insn *insn = get_last_insn ();
4271 bool extra_nop;
4273 /* pa_expand_epilogue does the dirty work now. We just need
4274 to output the assembler directives which denote the end
4275 of a function.
4277 To make debuggers happy, emit a nop if the epilogue was completely
4278 eliminated due to a volatile call as the last insn in the
4279 current function. That way the return address (in %r2) will
4280 always point to a valid instruction in the current function. */
4282 /* Get the last real insn. */
4283 if (NOTE_P (insn))
4284 insn = prev_real_insn (insn);
4286 /* If it is a sequence, then look inside. */
4287 if (insn && NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
4288 insn = as_a <rtx_sequence *> (PATTERN (insn))-> insn (0);
4290 /* If insn is a CALL_INSN, then it must be a call to a volatile
4291 function (otherwise there would be epilogue insns). */
4292 if (insn && CALL_P (insn))
4294 fputs ("\tnop\n", file);
4295 extra_nop = true;
4297 else
4298 extra_nop = false;
4300 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4302 if (TARGET_SOM && TARGET_GAS)
4304 /* We are done with this subspace except possibly for some additional
4305 debug information. Forget that we are in this subspace to ensure
4306 that the next function is output in its own subspace. */
4307 in_section = NULL;
4308 cfun->machine->in_nsubspa = 2;
4311 /* Thunks do their own insn accounting. */
4312 if (cfun->is_thunk)
4313 return;
4315 if (INSN_ADDRESSES_SET_P ())
4317 last_address = extra_nop ? 4 : 0;
4318 insn = get_last_nonnote_insn ();
4319 if (insn)
4321 last_address += INSN_ADDRESSES (INSN_UID (insn));
4322 if (INSN_P (insn))
4323 last_address += insn_default_length (insn);
4325 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4326 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4328 else
4329 last_address = UINT_MAX;
4331 /* Finally, update the total number of code bytes output so far. */
4332 update_total_code_bytes (last_address);
4335 void
4336 pa_expand_epilogue (void)
4338 rtx tmpreg;
4339 HOST_WIDE_INT offset;
4340 HOST_WIDE_INT ret_off = 0;
4341 int i;
4342 int merge_sp_adjust_with_load = 0;
4344 /* We will use this often. */
4345 tmpreg = gen_rtx_REG (word_mode, 1);
4347 /* Try to restore RP early to avoid load/use interlocks when
4348 RP gets used in the return (bv) instruction. This appears to still
4349 be necessary even when we schedule the prologue and epilogue. */
4350 if (rp_saved)
4352 ret_off = TARGET_64BIT ? -16 : -20;
4353 if (frame_pointer_needed)
4355 load_reg (2, ret_off, HARD_FRAME_POINTER_REGNUM);
4356 ret_off = 0;
4358 else
4360 /* No frame pointer, and stack is smaller than 8k. */
4361 if (VAL_14_BITS_P (ret_off - actual_fsize))
4363 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4364 ret_off = 0;
4369 /* General register restores. */
4370 if (frame_pointer_needed)
4372 offset = local_fsize;
4374 /* If the current function calls __builtin_eh_return, then we need
4375 to restore the saved EH data registers. */
4376 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4378 unsigned int i, regno;
4380 for (i = 0; ; ++i)
4382 regno = EH_RETURN_DATA_REGNO (i);
4383 if (regno == INVALID_REGNUM)
4384 break;
4386 load_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4387 offset += UNITS_PER_WORD;
4391 for (i = 18; i >= 4; i--)
4392 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4394 load_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4395 offset += UNITS_PER_WORD;
4398 else
4400 offset = local_fsize - actual_fsize;
4402 /* If the current function calls __builtin_eh_return, then we need
4403 to restore the saved EH data registers. */
4404 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4406 unsigned int i, regno;
4408 for (i = 0; ; ++i)
4410 regno = EH_RETURN_DATA_REGNO (i);
4411 if (regno == INVALID_REGNUM)
4412 break;
4414 /* Only for the first load.
4415 merge_sp_adjust_with_load holds the register load
4416 with which we will merge the sp adjustment. */
4417 if (merge_sp_adjust_with_load == 0
4418 && local_fsize == 0
4419 && VAL_14_BITS_P (-actual_fsize))
4420 merge_sp_adjust_with_load = regno;
4421 else
4422 load_reg (regno, offset, STACK_POINTER_REGNUM);
4423 offset += UNITS_PER_WORD;
4427 for (i = 18; i >= 3; i--)
4429 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4431 /* Only for the first load.
4432 merge_sp_adjust_with_load holds the register load
4433 with which we will merge the sp adjustment. */
4434 if (merge_sp_adjust_with_load == 0
4435 && local_fsize == 0
4436 && VAL_14_BITS_P (-actual_fsize))
4437 merge_sp_adjust_with_load = i;
4438 else
4439 load_reg (i, offset, STACK_POINTER_REGNUM);
4440 offset += UNITS_PER_WORD;
4445 /* Align pointer properly (doubleword boundary). */
4446 offset = (offset + 7) & ~7;
4448 /* FP register restores. */
4449 if (save_fregs)
4451 /* Adjust the register to index off of. */
4452 if (frame_pointer_needed)
4453 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4454 else
4455 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4457 /* Actually do the restores now. */
4458 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4459 if (df_regs_ever_live_p (i)
4460 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4462 rtx src = gen_rtx_MEM (DFmode,
4463 gen_rtx_POST_INC (word_mode, tmpreg));
4464 rtx dest = gen_rtx_REG (DFmode, i);
4465 emit_move_insn (dest, src);
4469 /* Emit a blockage insn here to keep these insns from being moved to
4470 an earlier spot in the epilogue, or into the main instruction stream.
4472 This is necessary as we must not cut the stack back before all the
4473 restores are finished. */
4474 emit_insn (gen_blockage ());
4476 /* Reset stack pointer (and possibly frame pointer). The stack
4477 pointer is initially set to fp + 64 to avoid a race condition. */
4478 if (frame_pointer_needed)
4480 rtx delta = GEN_INT (-64);
4482 set_reg_plus_d (STACK_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM, 64, 0);
4483 emit_insn (gen_pre_load (hard_frame_pointer_rtx,
4484 stack_pointer_rtx, delta));
4486 /* If we were deferring a callee register restore, do it now. */
4487 else if (merge_sp_adjust_with_load)
4489 rtx delta = GEN_INT (-actual_fsize);
4490 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4492 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4494 else if (actual_fsize != 0)
4495 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4496 - actual_fsize, 0);
4498 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4499 frame greater than 8k), do so now. */
4500 if (ret_off != 0)
4501 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4503 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4505 rtx sa = EH_RETURN_STACKADJ_RTX;
4507 emit_insn (gen_blockage ());
4508 emit_insn (TARGET_64BIT
4509 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4510 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4514 bool
4515 pa_can_use_return_insn (void)
4517 if (!reload_completed)
4518 return false;
4520 if (frame_pointer_needed)
4521 return false;
4523 if (df_regs_ever_live_p (2))
4524 return false;
4526 if (crtl->profile)
4527 return false;
4529 return pa_compute_frame_size (get_frame_size (), 0) == 0;
4533 hppa_pic_save_rtx (void)
4535 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4538 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4539 #define NO_DEFERRED_PROFILE_COUNTERS 0
4540 #endif
4543 /* Vector of funcdef numbers. */
4544 static vec<int> funcdef_nos;
4546 /* Output deferred profile counters. */
4547 static void
4548 output_deferred_profile_counters (void)
4550 unsigned int i;
4551 int align, n;
4553 if (funcdef_nos.is_empty ())
4554 return;
4556 switch_to_section (data_section);
4557 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4558 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4560 for (i = 0; funcdef_nos.iterate (i, &n); i++)
4562 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4563 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4566 funcdef_nos.release ();
4569 void
4570 hppa_profile_hook (int label_no)
4572 rtx_code_label *label_rtx = gen_label_rtx ();
4573 int reg_parm_stack_space = REG_PARM_STACK_SPACE (NULL_TREE);
4574 rtx arg_bytes, begin_label_rtx, mcount, sym;
4575 rtx_insn *call_insn;
4576 char begin_label_name[16];
4577 bool use_mcount_pcrel_call;
4579 /* Set up call destination. */
4580 sym = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
4581 pa_encode_label (sym);
4582 mcount = gen_rtx_MEM (Pmode, sym);
4584 /* If we can reach _mcount with a pc-relative call, we can optimize
4585 loading the address of the current function. This requires linker
4586 long branch stub support. */
4587 if (!TARGET_PORTABLE_RUNTIME
4588 && !TARGET_LONG_CALLS
4589 && (TARGET_SOM || flag_function_sections))
4590 use_mcount_pcrel_call = TRUE;
4591 else
4592 use_mcount_pcrel_call = FALSE;
4594 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4595 label_no);
4596 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4598 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4600 if (!use_mcount_pcrel_call)
4602 /* The address of the function is loaded into %r25 with an instruction-
4603 relative sequence that avoids the use of relocations. We use SImode
4604 for the address of the function in both 32 and 64-bit code to avoid
4605 having to provide DImode versions of the lcla2 pattern. */
4606 if (TARGET_PA_20)
4607 emit_insn (gen_lcla2 (gen_rtx_REG (SImode, 25), label_rtx));
4608 else
4609 emit_insn (gen_lcla1 (gen_rtx_REG (SImode, 25), label_rtx));
4612 if (!NO_DEFERRED_PROFILE_COUNTERS)
4614 rtx count_label_rtx, addr, r24;
4615 char count_label_name[16];
4617 funcdef_nos.safe_push (label_no);
4618 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4619 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode,
4620 ggc_strdup (count_label_name));
4622 addr = force_reg (Pmode, count_label_rtx);
4623 r24 = gen_rtx_REG (Pmode, 24);
4624 emit_move_insn (r24, addr);
4626 arg_bytes = GEN_INT (TARGET_64BIT ? 24 : 12);
4627 if (use_mcount_pcrel_call)
4628 call_insn = emit_call_insn (gen_call_mcount (mcount, arg_bytes,
4629 begin_label_rtx));
4630 else
4631 call_insn = emit_call_insn (gen_call (mcount, arg_bytes));
4633 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4635 else
4637 arg_bytes = GEN_INT (TARGET_64BIT ? 16 : 8);
4638 if (use_mcount_pcrel_call)
4639 call_insn = emit_call_insn (gen_call_mcount (mcount, arg_bytes,
4640 begin_label_rtx));
4641 else
4642 call_insn = emit_call_insn (gen_call (mcount, arg_bytes));
4645 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4646 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4648 /* Indicate the _mcount call cannot throw, nor will it execute a
4649 non-local goto. */
4650 make_reg_eh_region_note_nothrow_nononlocal (call_insn);
4652 /* Allocate space for fixed arguments. */
4653 if (reg_parm_stack_space > crtl->outgoing_args_size)
4654 crtl->outgoing_args_size = reg_parm_stack_space;
4657 /* Fetch the return address for the frame COUNT steps up from
4658 the current frame, after the prologue. FRAMEADDR is the
4659 frame pointer of the COUNT frame.
4661 We want to ignore any export stub remnants here. To handle this,
4662 we examine the code at the return address, and if it is an export
4663 stub, we return a memory rtx for the stub return address stored
4664 at frame-24.
4666 The value returned is used in two different ways:
4668 1. To find a function's caller.
4670 2. To change the return address for a function.
4672 This function handles most instances of case 1; however, it will
4673 fail if there are two levels of stubs to execute on the return
4674 path. The only way I believe that can happen is if the return value
4675 needs a parameter relocation, which never happens for C code.
4677 This function handles most instances of case 2; however, it will
4678 fail if we did not originally have stub code on the return path
4679 but will need stub code on the new return path. This can happen if
4680 the caller & callee are both in the main program, but the new
4681 return location is in a shared library. */
4684 pa_return_addr_rtx (int count, rtx frameaddr)
4686 rtx label;
4687 rtx rp;
4688 rtx saved_rp;
4689 rtx ins;
4691 /* The instruction stream at the return address of a PA1.X export stub is:
4693 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4694 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4695 0x00011820 | stub+16: mtsp r1,sr0
4696 0xe0400002 | stub+20: be,n 0(sr0,rp)
4698 0xe0400002 must be specified as -532676606 so that it won't be
4699 rejected as an invalid immediate operand on 64-bit hosts.
4701 The instruction stream at the return address of a PA2.0 export stub is:
4703 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4704 0xe840d002 | stub+12: bve,n (rp)
4707 HOST_WIDE_INT insns[4];
4708 int i, len;
4710 if (count != 0)
4711 return NULL_RTX;
4713 rp = get_hard_reg_initial_val (Pmode, 2);
4715 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4716 return rp;
4718 /* If there is no export stub then just use the value saved from
4719 the return pointer register. */
4721 saved_rp = gen_reg_rtx (Pmode);
4722 emit_move_insn (saved_rp, rp);
4724 /* Get pointer to the instruction stream. We have to mask out the
4725 privilege level from the two low order bits of the return address
4726 pointer here so that ins will point to the start of the first
4727 instruction that would have been executed if we returned. */
4728 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4729 label = gen_label_rtx ();
4731 if (TARGET_PA_20)
4733 insns[0] = 0x4bc23fd1;
4734 insns[1] = -398405630;
4735 len = 2;
4737 else
4739 insns[0] = 0x4bc23fd1;
4740 insns[1] = 0x004010a1;
4741 insns[2] = 0x00011820;
4742 insns[3] = -532676606;
4743 len = 4;
4746 /* Check the instruction stream at the normal return address for the
4747 export stub. If it is an export stub, than our return address is
4748 really in -24[frameaddr]. */
4750 for (i = 0; i < len; i++)
4752 rtx op0 = gen_rtx_MEM (SImode, plus_constant (Pmode, ins, i * 4));
4753 rtx op1 = GEN_INT (insns[i]);
4754 emit_cmp_and_jump_insns (op0, op1, NE, NULL, SImode, 0, label);
4757 /* Here we know that our return address points to an export
4758 stub. We don't want to return the address of the export stub,
4759 but rather the return address of the export stub. That return
4760 address is stored at -24[frameaddr]. */
4762 emit_move_insn (saved_rp,
4763 gen_rtx_MEM (Pmode,
4764 memory_address (Pmode,
4765 plus_constant (Pmode, frameaddr,
4766 -24))));
4768 emit_label (label);
4770 return saved_rp;
4773 void
4774 pa_emit_bcond_fp (rtx operands[])
4776 enum rtx_code code = GET_CODE (operands[0]);
4777 rtx operand0 = operands[1];
4778 rtx operand1 = operands[2];
4779 rtx label = operands[3];
4781 emit_insn (gen_rtx_SET (gen_rtx_REG (CCFPmode, 0),
4782 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1)));
4784 emit_jump_insn (gen_rtx_SET (pc_rtx,
4785 gen_rtx_IF_THEN_ELSE (VOIDmode,
4786 gen_rtx_fmt_ee (NE,
4787 VOIDmode,
4788 gen_rtx_REG (CCFPmode, 0),
4789 const0_rtx),
4790 gen_rtx_LABEL_REF (VOIDmode, label),
4791 pc_rtx)));
4795 /* Adjust the cost of a scheduling dependency. Return the new cost of
4796 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4798 static int
4799 pa_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
4800 unsigned int)
4802 enum attr_type attr_type;
4804 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4805 true dependencies as they are described with bypasses now. */
4806 if (pa_cpu >= PROCESSOR_8000 || dep_type == 0)
4807 return cost;
4809 if (! recog_memoized (insn))
4810 return 0;
4812 attr_type = get_attr_type (insn);
4814 switch (dep_type)
4816 case REG_DEP_ANTI:
4817 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4818 cycles later. */
4820 if (attr_type == TYPE_FPLOAD)
4822 rtx pat = PATTERN (insn);
4823 rtx dep_pat = PATTERN (dep_insn);
4824 if (GET_CODE (pat) == PARALLEL)
4826 /* This happens for the fldXs,mb patterns. */
4827 pat = XVECEXP (pat, 0, 0);
4829 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4830 /* If this happens, we have to extend this to schedule
4831 optimally. Return 0 for now. */
4832 return 0;
4834 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4836 if (! recog_memoized (dep_insn))
4837 return 0;
4838 switch (get_attr_type (dep_insn))
4840 case TYPE_FPALU:
4841 case TYPE_FPMULSGL:
4842 case TYPE_FPMULDBL:
4843 case TYPE_FPDIVSGL:
4844 case TYPE_FPDIVDBL:
4845 case TYPE_FPSQRTSGL:
4846 case TYPE_FPSQRTDBL:
4847 /* A fpload can't be issued until one cycle before a
4848 preceding arithmetic operation has finished if
4849 the target of the fpload is any of the sources
4850 (or destination) of the arithmetic operation. */
4851 return insn_default_latency (dep_insn) - 1;
4853 default:
4854 return 0;
4858 else if (attr_type == TYPE_FPALU)
4860 rtx pat = PATTERN (insn);
4861 rtx dep_pat = PATTERN (dep_insn);
4862 if (GET_CODE (pat) == PARALLEL)
4864 /* This happens for the fldXs,mb patterns. */
4865 pat = XVECEXP (pat, 0, 0);
4867 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4868 /* If this happens, we have to extend this to schedule
4869 optimally. Return 0 for now. */
4870 return 0;
4872 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4874 if (! recog_memoized (dep_insn))
4875 return 0;
4876 switch (get_attr_type (dep_insn))
4878 case TYPE_FPDIVSGL:
4879 case TYPE_FPDIVDBL:
4880 case TYPE_FPSQRTSGL:
4881 case TYPE_FPSQRTDBL:
4882 /* An ALU flop can't be issued until two cycles before a
4883 preceding divide or sqrt operation has finished if
4884 the target of the ALU flop is any of the sources
4885 (or destination) of the divide or sqrt operation. */
4886 return insn_default_latency (dep_insn) - 2;
4888 default:
4889 return 0;
4894 /* For other anti dependencies, the cost is 0. */
4895 return 0;
4897 case REG_DEP_OUTPUT:
4898 /* Output dependency; DEP_INSN writes a register that INSN writes some
4899 cycles later. */
4900 if (attr_type == TYPE_FPLOAD)
4902 rtx pat = PATTERN (insn);
4903 rtx dep_pat = PATTERN (dep_insn);
4904 if (GET_CODE (pat) == PARALLEL)
4906 /* This happens for the fldXs,mb patterns. */
4907 pat = XVECEXP (pat, 0, 0);
4909 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4910 /* If this happens, we have to extend this to schedule
4911 optimally. Return 0 for now. */
4912 return 0;
4914 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4916 if (! recog_memoized (dep_insn))
4917 return 0;
4918 switch (get_attr_type (dep_insn))
4920 case TYPE_FPALU:
4921 case TYPE_FPMULSGL:
4922 case TYPE_FPMULDBL:
4923 case TYPE_FPDIVSGL:
4924 case TYPE_FPDIVDBL:
4925 case TYPE_FPSQRTSGL:
4926 case TYPE_FPSQRTDBL:
4927 /* A fpload can't be issued until one cycle before a
4928 preceding arithmetic operation has finished if
4929 the target of the fpload is the destination of the
4930 arithmetic operation.
4932 Exception: For PA7100LC, PA7200 and PA7300, the cost
4933 is 3 cycles, unless they bundle together. We also
4934 pay the penalty if the second insn is a fpload. */
4935 return insn_default_latency (dep_insn) - 1;
4937 default:
4938 return 0;
4942 else if (attr_type == TYPE_FPALU)
4944 rtx pat = PATTERN (insn);
4945 rtx dep_pat = PATTERN (dep_insn);
4946 if (GET_CODE (pat) == PARALLEL)
4948 /* This happens for the fldXs,mb patterns. */
4949 pat = XVECEXP (pat, 0, 0);
4951 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4952 /* If this happens, we have to extend this to schedule
4953 optimally. Return 0 for now. */
4954 return 0;
4956 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4958 if (! recog_memoized (dep_insn))
4959 return 0;
4960 switch (get_attr_type (dep_insn))
4962 case TYPE_FPDIVSGL:
4963 case TYPE_FPDIVDBL:
4964 case TYPE_FPSQRTSGL:
4965 case TYPE_FPSQRTDBL:
4966 /* An ALU flop can't be issued until two cycles before a
4967 preceding divide or sqrt operation has finished if
4968 the target of the ALU flop is also the target of
4969 the divide or sqrt operation. */
4970 return insn_default_latency (dep_insn) - 2;
4972 default:
4973 return 0;
4978 /* For other output dependencies, the cost is 0. */
4979 return 0;
4981 default:
4982 gcc_unreachable ();
4986 /* The 700 can only issue a single insn at a time.
4987 The 7XXX processors can issue two insns at a time.
4988 The 8000 can issue 4 insns at a time. */
4989 static int
4990 pa_issue_rate (void)
4992 switch (pa_cpu)
4994 case PROCESSOR_700: return 1;
4995 case PROCESSOR_7100: return 2;
4996 case PROCESSOR_7100LC: return 2;
4997 case PROCESSOR_7200: return 2;
4998 case PROCESSOR_7300: return 2;
4999 case PROCESSOR_8000: return 4;
5001 default:
5002 gcc_unreachable ();
5008 /* Return any length plus adjustment needed by INSN which already has
5009 its length computed as LENGTH. Return LENGTH if no adjustment is
5010 necessary.
5012 Also compute the length of an inline block move here as it is too
5013 complicated to express as a length attribute in pa.md. */
5015 pa_adjust_insn_length (rtx_insn *insn, int length)
5017 rtx pat = PATTERN (insn);
5019 /* If length is negative or undefined, provide initial length. */
5020 if ((unsigned int) length >= INT_MAX)
5022 if (GET_CODE (pat) == SEQUENCE)
5023 insn = as_a <rtx_insn *> (XVECEXP (pat, 0, 0));
5025 switch (get_attr_type (insn))
5027 case TYPE_MILLI:
5028 length = pa_attr_length_millicode_call (insn);
5029 break;
5030 case TYPE_CALL:
5031 length = pa_attr_length_call (insn, 0);
5032 break;
5033 case TYPE_SIBCALL:
5034 length = pa_attr_length_call (insn, 1);
5035 break;
5036 case TYPE_DYNCALL:
5037 length = pa_attr_length_indirect_call (insn);
5038 break;
5039 case TYPE_SH_FUNC_ADRS:
5040 length = pa_attr_length_millicode_call (insn) + 20;
5041 break;
5042 default:
5043 gcc_unreachable ();
5047 /* Block move pattern. */
5048 if (NONJUMP_INSN_P (insn)
5049 && GET_CODE (pat) == PARALLEL
5050 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
5051 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
5052 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
5053 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
5054 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
5055 length += compute_movmem_length (insn) - 4;
5056 /* Block clear pattern. */
5057 else if (NONJUMP_INSN_P (insn)
5058 && GET_CODE (pat) == PARALLEL
5059 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
5060 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
5061 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
5062 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
5063 length += compute_clrmem_length (insn) - 4;
5064 /* Conditional branch with an unfilled delay slot. */
5065 else if (JUMP_P (insn) && ! simplejump_p (insn))
5067 /* Adjust a short backwards conditional with an unfilled delay slot. */
5068 if (GET_CODE (pat) == SET
5069 && length == 4
5070 && JUMP_LABEL (insn) != NULL_RTX
5071 && ! forward_branch_p (insn))
5072 length += 4;
5073 else if (GET_CODE (pat) == PARALLEL
5074 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
5075 && length == 4)
5076 length += 4;
5077 /* Adjust dbra insn with short backwards conditional branch with
5078 unfilled delay slot -- only for case where counter is in a
5079 general register register. */
5080 else if (GET_CODE (pat) == PARALLEL
5081 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
5082 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
5083 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
5084 && length == 4
5085 && ! forward_branch_p (insn))
5086 length += 4;
5088 return length;
5091 /* Implement the TARGET_PRINT_OPERAND_PUNCT_VALID_P hook. */
5093 static bool
5094 pa_print_operand_punct_valid_p (unsigned char code)
5096 if (code == '@'
5097 || code == '#'
5098 || code == '*'
5099 || code == '^')
5100 return true;
5102 return false;
5105 /* Print operand X (an rtx) in assembler syntax to file FILE.
5106 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
5107 For `%' followed by punctuation, CODE is the punctuation and X is null. */
5109 void
5110 pa_print_operand (FILE *file, rtx x, int code)
5112 switch (code)
5114 case '#':
5115 /* Output a 'nop' if there's nothing for the delay slot. */
5116 if (dbr_sequence_length () == 0)
5117 fputs ("\n\tnop", file);
5118 return;
5119 case '*':
5120 /* Output a nullification completer if there's nothing for the */
5121 /* delay slot or nullification is requested. */
5122 if (dbr_sequence_length () == 0 ||
5123 (final_sequence &&
5124 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
5125 fputs (",n", file);
5126 return;
5127 case 'R':
5128 /* Print out the second register name of a register pair.
5129 I.e., R (6) => 7. */
5130 fputs (reg_names[REGNO (x) + 1], file);
5131 return;
5132 case 'r':
5133 /* A register or zero. */
5134 if (x == const0_rtx
5135 || (x == CONST0_RTX (DFmode))
5136 || (x == CONST0_RTX (SFmode)))
5138 fputs ("%r0", file);
5139 return;
5141 else
5142 break;
5143 case 'f':
5144 /* A register or zero (floating point). */
5145 if (x == const0_rtx
5146 || (x == CONST0_RTX (DFmode))
5147 || (x == CONST0_RTX (SFmode)))
5149 fputs ("%fr0", file);
5150 return;
5152 else
5153 break;
5154 case 'A':
5156 rtx xoperands[2];
5158 xoperands[0] = XEXP (XEXP (x, 0), 0);
5159 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
5160 pa_output_global_address (file, xoperands[1], 0);
5161 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
5162 return;
5165 case 'C': /* Plain (C)ondition */
5166 case 'X':
5167 switch (GET_CODE (x))
5169 case EQ:
5170 fputs ("=", file); break;
5171 case NE:
5172 fputs ("<>", file); break;
5173 case GT:
5174 fputs (">", file); break;
5175 case GE:
5176 fputs (">=", file); break;
5177 case GEU:
5178 fputs (">>=", file); break;
5179 case GTU:
5180 fputs (">>", file); break;
5181 case LT:
5182 fputs ("<", file); break;
5183 case LE:
5184 fputs ("<=", file); break;
5185 case LEU:
5186 fputs ("<<=", file); break;
5187 case LTU:
5188 fputs ("<<", file); break;
5189 default:
5190 gcc_unreachable ();
5192 return;
5193 case 'N': /* Condition, (N)egated */
5194 switch (GET_CODE (x))
5196 case EQ:
5197 fputs ("<>", file); break;
5198 case NE:
5199 fputs ("=", file); break;
5200 case GT:
5201 fputs ("<=", file); break;
5202 case GE:
5203 fputs ("<", file); break;
5204 case GEU:
5205 fputs ("<<", file); break;
5206 case GTU:
5207 fputs ("<<=", file); break;
5208 case LT:
5209 fputs (">=", file); break;
5210 case LE:
5211 fputs (">", file); break;
5212 case LEU:
5213 fputs (">>", file); break;
5214 case LTU:
5215 fputs (">>=", file); break;
5216 default:
5217 gcc_unreachable ();
5219 return;
5220 /* For floating point comparisons. Note that the output
5221 predicates are the complement of the desired mode. The
5222 conditions for GT, GE, LT, LE and LTGT cause an invalid
5223 operation exception if the result is unordered and this
5224 exception is enabled in the floating-point status register. */
5225 case 'Y':
5226 switch (GET_CODE (x))
5228 case EQ:
5229 fputs ("!=", file); break;
5230 case NE:
5231 fputs ("=", file); break;
5232 case GT:
5233 fputs ("!>", file); break;
5234 case GE:
5235 fputs ("!>=", file); break;
5236 case LT:
5237 fputs ("!<", file); break;
5238 case LE:
5239 fputs ("!<=", file); break;
5240 case LTGT:
5241 fputs ("!<>", file); break;
5242 case UNLE:
5243 fputs ("!?<=", file); break;
5244 case UNLT:
5245 fputs ("!?<", file); break;
5246 case UNGE:
5247 fputs ("!?>=", file); break;
5248 case UNGT:
5249 fputs ("!?>", file); break;
5250 case UNEQ:
5251 fputs ("!?=", file); break;
5252 case UNORDERED:
5253 fputs ("!?", file); break;
5254 case ORDERED:
5255 fputs ("?", file); break;
5256 default:
5257 gcc_unreachable ();
5259 return;
5260 case 'S': /* Condition, operands are (S)wapped. */
5261 switch (GET_CODE (x))
5263 case EQ:
5264 fputs ("=", file); break;
5265 case NE:
5266 fputs ("<>", file); break;
5267 case GT:
5268 fputs ("<", file); break;
5269 case GE:
5270 fputs ("<=", file); break;
5271 case GEU:
5272 fputs ("<<=", file); break;
5273 case GTU:
5274 fputs ("<<", file); break;
5275 case LT:
5276 fputs (">", file); break;
5277 case LE:
5278 fputs (">=", file); break;
5279 case LEU:
5280 fputs (">>=", file); break;
5281 case LTU:
5282 fputs (">>", file); break;
5283 default:
5284 gcc_unreachable ();
5286 return;
5287 case 'B': /* Condition, (B)oth swapped and negate. */
5288 switch (GET_CODE (x))
5290 case EQ:
5291 fputs ("<>", file); break;
5292 case NE:
5293 fputs ("=", file); break;
5294 case GT:
5295 fputs (">=", file); break;
5296 case GE:
5297 fputs (">", file); break;
5298 case GEU:
5299 fputs (">>", file); break;
5300 case GTU:
5301 fputs (">>=", file); break;
5302 case LT:
5303 fputs ("<=", file); break;
5304 case LE:
5305 fputs ("<", file); break;
5306 case LEU:
5307 fputs ("<<", file); break;
5308 case LTU:
5309 fputs ("<<=", file); break;
5310 default:
5311 gcc_unreachable ();
5313 return;
5314 case 'k':
5315 gcc_assert (GET_CODE (x) == CONST_INT);
5316 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
5317 return;
5318 case 'Q':
5319 gcc_assert (GET_CODE (x) == CONST_INT);
5320 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5321 return;
5322 case 'L':
5323 gcc_assert (GET_CODE (x) == CONST_INT);
5324 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5325 return;
5326 case 'o':
5327 gcc_assert (GET_CODE (x) == CONST_INT
5328 && (INTVAL (x) == 1 || INTVAL (x) == 2 || INTVAL (x) == 3));
5329 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5330 return;
5331 case 'O':
5332 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
5333 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5334 return;
5335 case 'p':
5336 gcc_assert (GET_CODE (x) == CONST_INT);
5337 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5338 return;
5339 case 'P':
5340 gcc_assert (GET_CODE (x) == CONST_INT);
5341 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5342 return;
5343 case 'I':
5344 if (GET_CODE (x) == CONST_INT)
5345 fputs ("i", file);
5346 return;
5347 case 'M':
5348 case 'F':
5349 switch (GET_CODE (XEXP (x, 0)))
5351 case PRE_DEC:
5352 case PRE_INC:
5353 if (ASSEMBLER_DIALECT == 0)
5354 fputs ("s,mb", file);
5355 else
5356 fputs (",mb", file);
5357 break;
5358 case POST_DEC:
5359 case POST_INC:
5360 if (ASSEMBLER_DIALECT == 0)
5361 fputs ("s,ma", file);
5362 else
5363 fputs (",ma", file);
5364 break;
5365 case PLUS:
5366 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5367 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5369 if (ASSEMBLER_DIALECT == 0)
5370 fputs ("x", file);
5372 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5373 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5375 if (ASSEMBLER_DIALECT == 0)
5376 fputs ("x,s", file);
5377 else
5378 fputs (",s", file);
5380 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5381 fputs ("s", file);
5382 break;
5383 default:
5384 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5385 fputs ("s", file);
5386 break;
5388 return;
5389 case 'G':
5390 pa_output_global_address (file, x, 0);
5391 return;
5392 case 'H':
5393 pa_output_global_address (file, x, 1);
5394 return;
5395 case 0: /* Don't do anything special */
5396 break;
5397 case 'Z':
5399 unsigned op[3];
5400 compute_zdepwi_operands (INTVAL (x), op);
5401 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5402 return;
5404 case 'z':
5406 unsigned op[3];
5407 compute_zdepdi_operands (INTVAL (x), op);
5408 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5409 return;
5411 case 'c':
5412 /* We can get here from a .vtable_inherit due to our
5413 CONSTANT_ADDRESS_P rejecting perfectly good constant
5414 addresses. */
5415 break;
5416 default:
5417 gcc_unreachable ();
5419 if (GET_CODE (x) == REG)
5421 fputs (reg_names [REGNO (x)], file);
5422 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5424 fputs ("R", file);
5425 return;
5427 if (FP_REG_P (x)
5428 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5429 && (REGNO (x) & 1) == 0)
5430 fputs ("L", file);
5432 else if (GET_CODE (x) == MEM)
5434 int size = GET_MODE_SIZE (GET_MODE (x));
5435 rtx base = NULL_RTX;
5436 switch (GET_CODE (XEXP (x, 0)))
5438 case PRE_DEC:
5439 case POST_DEC:
5440 base = XEXP (XEXP (x, 0), 0);
5441 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5442 break;
5443 case PRE_INC:
5444 case POST_INC:
5445 base = XEXP (XEXP (x, 0), 0);
5446 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5447 break;
5448 case PLUS:
5449 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5450 fprintf (file, "%s(%s)",
5451 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5452 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5453 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5454 fprintf (file, "%s(%s)",
5455 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5456 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5457 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5458 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5460 /* Because the REG_POINTER flag can get lost during reload,
5461 pa_legitimate_address_p canonicalizes the order of the
5462 index and base registers in the combined move patterns. */
5463 rtx base = XEXP (XEXP (x, 0), 1);
5464 rtx index = XEXP (XEXP (x, 0), 0);
5466 fprintf (file, "%s(%s)",
5467 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5469 else
5470 output_address (GET_MODE (x), XEXP (x, 0));
5471 break;
5472 default:
5473 output_address (GET_MODE (x), XEXP (x, 0));
5474 break;
5477 else
5478 output_addr_const (file, x);
5481 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5483 void
5484 pa_output_global_address (FILE *file, rtx x, int round_constant)
5487 /* Imagine (high (const (plus ...))). */
5488 if (GET_CODE (x) == HIGH)
5489 x = XEXP (x, 0);
5491 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5492 output_addr_const (file, x);
5493 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5495 output_addr_const (file, x);
5496 fputs ("-$global$", file);
5498 else if (GET_CODE (x) == CONST)
5500 const char *sep = "";
5501 int offset = 0; /* assembler wants -$global$ at end */
5502 rtx base = NULL_RTX;
5504 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5506 case LABEL_REF:
5507 case SYMBOL_REF:
5508 base = XEXP (XEXP (x, 0), 0);
5509 output_addr_const (file, base);
5510 break;
5511 case CONST_INT:
5512 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5513 break;
5514 default:
5515 gcc_unreachable ();
5518 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5520 case LABEL_REF:
5521 case SYMBOL_REF:
5522 base = XEXP (XEXP (x, 0), 1);
5523 output_addr_const (file, base);
5524 break;
5525 case CONST_INT:
5526 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5527 break;
5528 default:
5529 gcc_unreachable ();
5532 /* How bogus. The compiler is apparently responsible for
5533 rounding the constant if it uses an LR field selector.
5535 The linker and/or assembler seem a better place since
5536 they have to do this kind of thing already.
5538 If we fail to do this, HP's optimizing linker may eliminate
5539 an addil, but not update the ldw/stw/ldo instruction that
5540 uses the result of the addil. */
5541 if (round_constant)
5542 offset = ((offset + 0x1000) & ~0x1fff);
5544 switch (GET_CODE (XEXP (x, 0)))
5546 case PLUS:
5547 if (offset < 0)
5549 offset = -offset;
5550 sep = "-";
5552 else
5553 sep = "+";
5554 break;
5556 case MINUS:
5557 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5558 sep = "-";
5559 break;
5561 default:
5562 gcc_unreachable ();
5565 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5566 fputs ("-$global$", file);
5567 if (offset)
5568 fprintf (file, "%s%d", sep, offset);
5570 else
5571 output_addr_const (file, x);
5574 /* Output boilerplate text to appear at the beginning of the file.
5575 There are several possible versions. */
5576 #define aputs(x) fputs(x, asm_out_file)
5577 static inline void
5578 pa_file_start_level (void)
5580 if (TARGET_64BIT)
5581 aputs ("\t.LEVEL 2.0w\n");
5582 else if (TARGET_PA_20)
5583 aputs ("\t.LEVEL 2.0\n");
5584 else if (TARGET_PA_11)
5585 aputs ("\t.LEVEL 1.1\n");
5586 else
5587 aputs ("\t.LEVEL 1.0\n");
5590 static inline void
5591 pa_file_start_space (int sortspace)
5593 aputs ("\t.SPACE $PRIVATE$");
5594 if (sortspace)
5595 aputs (",SORT=16");
5596 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31");
5597 if (flag_tm)
5598 aputs ("\n\t.SUBSPA $TM_CLONE_TABLE$,QUAD=1,ALIGN=8,ACCESS=31");
5599 aputs ("\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5600 "\n\t.SPACE $TEXT$");
5601 if (sortspace)
5602 aputs (",SORT=8");
5603 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5604 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5607 static inline void
5608 pa_file_start_file (int want_version)
5610 if (write_symbols != NO_DEBUG)
5612 output_file_directive (asm_out_file, main_input_filename);
5613 if (want_version)
5614 aputs ("\t.version\t\"01.01\"\n");
5618 static inline void
5619 pa_file_start_mcount (const char *aswhat)
5621 if (profile_flag)
5622 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5625 static void
5626 pa_elf_file_start (void)
5628 pa_file_start_level ();
5629 pa_file_start_mcount ("ENTRY");
5630 pa_file_start_file (0);
5633 static void
5634 pa_som_file_start (void)
5636 pa_file_start_level ();
5637 pa_file_start_space (0);
5638 aputs ("\t.IMPORT $global$,DATA\n"
5639 "\t.IMPORT $$dyncall,MILLICODE\n");
5640 pa_file_start_mcount ("CODE");
5641 pa_file_start_file (0);
5644 static void
5645 pa_linux_file_start (void)
5647 pa_file_start_file (1);
5648 pa_file_start_level ();
5649 pa_file_start_mcount ("CODE");
5652 static void
5653 pa_hpux64_gas_file_start (void)
5655 pa_file_start_level ();
5656 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5657 if (profile_flag)
5658 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5659 #endif
5660 pa_file_start_file (1);
5663 static void
5664 pa_hpux64_hpas_file_start (void)
5666 pa_file_start_level ();
5667 pa_file_start_space (1);
5668 pa_file_start_mcount ("CODE");
5669 pa_file_start_file (0);
5671 #undef aputs
5673 /* Search the deferred plabel list for SYMBOL and return its internal
5674 label. If an entry for SYMBOL is not found, a new entry is created. */
5677 pa_get_deferred_plabel (rtx symbol)
5679 const char *fname = XSTR (symbol, 0);
5680 size_t i;
5682 /* See if we have already put this function on the list of deferred
5683 plabels. This list is generally small, so a liner search is not
5684 too ugly. If it proves too slow replace it with something faster. */
5685 for (i = 0; i < n_deferred_plabels; i++)
5686 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5687 break;
5689 /* If the deferred plabel list is empty, or this entry was not found
5690 on the list, create a new entry on the list. */
5691 if (deferred_plabels == NULL || i == n_deferred_plabels)
5693 tree id;
5695 if (deferred_plabels == 0)
5696 deferred_plabels = ggc_alloc<deferred_plabel> ();
5697 else
5698 deferred_plabels = GGC_RESIZEVEC (struct deferred_plabel,
5699 deferred_plabels,
5700 n_deferred_plabels + 1);
5702 i = n_deferred_plabels++;
5703 deferred_plabels[i].internal_label = gen_label_rtx ();
5704 deferred_plabels[i].symbol = symbol;
5706 /* Gross. We have just implicitly taken the address of this
5707 function. Mark it in the same manner as assemble_name. */
5708 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5709 if (id)
5710 mark_referenced (id);
5713 return deferred_plabels[i].internal_label;
5716 static void
5717 output_deferred_plabels (void)
5719 size_t i;
5721 /* If we have some deferred plabels, then we need to switch into the
5722 data or readonly data section, and align it to a 4 byte boundary
5723 before outputting the deferred plabels. */
5724 if (n_deferred_plabels)
5726 switch_to_section (flag_pic ? data_section : readonly_data_section);
5727 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5730 /* Now output the deferred plabels. */
5731 for (i = 0; i < n_deferred_plabels; i++)
5733 targetm.asm_out.internal_label (asm_out_file, "L",
5734 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5735 assemble_integer (deferred_plabels[i].symbol,
5736 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5740 /* Initialize optabs to point to emulation routines. */
5742 static void
5743 pa_init_libfuncs (void)
5745 if (HPUX_LONG_DOUBLE_LIBRARY)
5747 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5748 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5749 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5750 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5751 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5752 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5753 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5754 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5755 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5757 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5758 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5759 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5760 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5761 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5762 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5763 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5765 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5766 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5767 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5768 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5770 set_conv_libfunc (sfix_optab, SImode, TFmode,
5771 TARGET_64BIT ? "__U_Qfcnvfxt_quad_to_sgl"
5772 : "_U_Qfcnvfxt_quad_to_sgl");
5773 set_conv_libfunc (sfix_optab, DImode, TFmode,
5774 "_U_Qfcnvfxt_quad_to_dbl");
5775 set_conv_libfunc (ufix_optab, SImode, TFmode,
5776 "_U_Qfcnvfxt_quad_to_usgl");
5777 set_conv_libfunc (ufix_optab, DImode, TFmode,
5778 "_U_Qfcnvfxt_quad_to_udbl");
5780 set_conv_libfunc (sfloat_optab, TFmode, SImode,
5781 "_U_Qfcnvxf_sgl_to_quad");
5782 set_conv_libfunc (sfloat_optab, TFmode, DImode,
5783 "_U_Qfcnvxf_dbl_to_quad");
5784 set_conv_libfunc (ufloat_optab, TFmode, SImode,
5785 "_U_Qfcnvxf_usgl_to_quad");
5786 set_conv_libfunc (ufloat_optab, TFmode, DImode,
5787 "_U_Qfcnvxf_udbl_to_quad");
5790 if (TARGET_SYNC_LIBCALL)
5791 init_sync_libfuncs (8);
5794 /* HP's millicode routines mean something special to the assembler.
5795 Keep track of which ones we have used. */
5797 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5798 static void import_milli (enum millicodes);
5799 static char imported[(int) end1000];
5800 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5801 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5802 #define MILLI_START 10
5804 static void
5805 import_milli (enum millicodes code)
5807 char str[sizeof (import_string)];
5809 if (!imported[(int) code])
5811 imported[(int) code] = 1;
5812 strcpy (str, import_string);
5813 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5814 output_asm_insn (str, 0);
5818 /* The register constraints have put the operands and return value in
5819 the proper registers. */
5821 const char *
5822 pa_output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx_insn *insn)
5824 import_milli (mulI);
5825 return pa_output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5828 /* Emit the rtl for doing a division by a constant. */
5830 /* Do magic division millicodes exist for this value? */
5831 const int pa_magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5833 /* We'll use an array to keep track of the magic millicodes and
5834 whether or not we've used them already. [n][0] is signed, [n][1] is
5835 unsigned. */
5837 static int div_milli[16][2];
5840 pa_emit_hpdiv_const (rtx *operands, int unsignedp)
5842 if (GET_CODE (operands[2]) == CONST_INT
5843 && INTVAL (operands[2]) > 0
5844 && INTVAL (operands[2]) < 16
5845 && pa_magic_milli[INTVAL (operands[2])])
5847 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5849 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5850 emit
5851 (gen_rtx_PARALLEL
5852 (VOIDmode,
5853 gen_rtvec (6, gen_rtx_SET (gen_rtx_REG (SImode, 29),
5854 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5855 SImode,
5856 gen_rtx_REG (SImode, 26),
5857 operands[2])),
5858 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5859 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5860 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5861 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5862 gen_rtx_CLOBBER (VOIDmode, ret))));
5863 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5864 return 1;
5866 return 0;
5869 const char *
5870 pa_output_div_insn (rtx *operands, int unsignedp, rtx_insn *insn)
5872 int divisor;
5874 /* If the divisor is a constant, try to use one of the special
5875 opcodes .*/
5876 if (GET_CODE (operands[0]) == CONST_INT)
5878 static char buf[100];
5879 divisor = INTVAL (operands[0]);
5880 if (!div_milli[divisor][unsignedp])
5882 div_milli[divisor][unsignedp] = 1;
5883 if (unsignedp)
5884 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5885 else
5886 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5888 if (unsignedp)
5890 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5891 INTVAL (operands[0]));
5892 return pa_output_millicode_call (insn,
5893 gen_rtx_SYMBOL_REF (SImode, buf));
5895 else
5897 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5898 INTVAL (operands[0]));
5899 return pa_output_millicode_call (insn,
5900 gen_rtx_SYMBOL_REF (SImode, buf));
5903 /* Divisor isn't a special constant. */
5904 else
5906 if (unsignedp)
5908 import_milli (divU);
5909 return pa_output_millicode_call (insn,
5910 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5912 else
5914 import_milli (divI);
5915 return pa_output_millicode_call (insn,
5916 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5921 /* Output a $$rem millicode to do mod. */
5923 const char *
5924 pa_output_mod_insn (int unsignedp, rtx_insn *insn)
5926 if (unsignedp)
5928 import_milli (remU);
5929 return pa_output_millicode_call (insn,
5930 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5932 else
5934 import_milli (remI);
5935 return pa_output_millicode_call (insn,
5936 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5940 void
5941 pa_output_arg_descriptor (rtx_insn *call_insn)
5943 const char *arg_regs[4];
5944 machine_mode arg_mode;
5945 rtx link;
5946 int i, output_flag = 0;
5947 int regno;
5949 /* We neither need nor want argument location descriptors for the
5950 64bit runtime environment or the ELF32 environment. */
5951 if (TARGET_64BIT || TARGET_ELF32)
5952 return;
5954 for (i = 0; i < 4; i++)
5955 arg_regs[i] = 0;
5957 /* Specify explicitly that no argument relocations should take place
5958 if using the portable runtime calling conventions. */
5959 if (TARGET_PORTABLE_RUNTIME)
5961 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5962 asm_out_file);
5963 return;
5966 gcc_assert (CALL_P (call_insn));
5967 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5968 link; link = XEXP (link, 1))
5970 rtx use = XEXP (link, 0);
5972 if (! (GET_CODE (use) == USE
5973 && GET_CODE (XEXP (use, 0)) == REG
5974 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5975 continue;
5977 arg_mode = GET_MODE (XEXP (use, 0));
5978 regno = REGNO (XEXP (use, 0));
5979 if (regno >= 23 && regno <= 26)
5981 arg_regs[26 - regno] = "GR";
5982 if (arg_mode == DImode)
5983 arg_regs[25 - regno] = "GR";
5985 else if (regno >= 32 && regno <= 39)
5987 if (arg_mode == SFmode)
5988 arg_regs[(regno - 32) / 2] = "FR";
5989 else
5991 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5992 arg_regs[(regno - 34) / 2] = "FR";
5993 arg_regs[(regno - 34) / 2 + 1] = "FU";
5994 #else
5995 arg_regs[(regno - 34) / 2] = "FU";
5996 arg_regs[(regno - 34) / 2 + 1] = "FR";
5997 #endif
6001 fputs ("\t.CALL ", asm_out_file);
6002 for (i = 0; i < 4; i++)
6004 if (arg_regs[i])
6006 if (output_flag++)
6007 fputc (',', asm_out_file);
6008 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
6011 fputc ('\n', asm_out_file);
6014 /* Inform reload about cases where moving X with a mode MODE to or from
6015 a register in RCLASS requires an extra scratch or immediate register.
6016 Return the class needed for the immediate register. */
6018 static reg_class_t
6019 pa_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
6020 machine_mode mode, secondary_reload_info *sri)
6022 int regno;
6023 enum reg_class rclass = (enum reg_class) rclass_i;
6025 /* Handle the easy stuff first. */
6026 if (rclass == R1_REGS)
6027 return NO_REGS;
6029 if (REG_P (x))
6031 regno = REGNO (x);
6032 if (rclass == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
6033 return NO_REGS;
6035 else
6036 regno = -1;
6038 /* If we have something like (mem (mem (...)), we can safely assume the
6039 inner MEM will end up in a general register after reloading, so there's
6040 no need for a secondary reload. */
6041 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
6042 return NO_REGS;
6044 /* Trying to load a constant into a FP register during PIC code
6045 generation requires %r1 as a scratch register. For float modes,
6046 the only legitimate constant is CONST0_RTX. However, there are
6047 a few patterns that accept constant double operands. */
6048 if (flag_pic
6049 && FP_REG_CLASS_P (rclass)
6050 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
6052 switch (mode)
6054 case E_SImode:
6055 sri->icode = CODE_FOR_reload_insi_r1;
6056 break;
6058 case E_DImode:
6059 sri->icode = CODE_FOR_reload_indi_r1;
6060 break;
6062 case E_SFmode:
6063 sri->icode = CODE_FOR_reload_insf_r1;
6064 break;
6066 case E_DFmode:
6067 sri->icode = CODE_FOR_reload_indf_r1;
6068 break;
6070 default:
6071 gcc_unreachable ();
6073 return NO_REGS;
6076 /* Secondary reloads of symbolic expressions require %r1 as a scratch
6077 register when we're generating PIC code or when the operand isn't
6078 readonly. */
6079 if (pa_symbolic_expression_p (x))
6081 if (GET_CODE (x) == HIGH)
6082 x = XEXP (x, 0);
6084 if (flag_pic || !read_only_operand (x, VOIDmode))
6086 switch (mode)
6088 case E_SImode:
6089 sri->icode = CODE_FOR_reload_insi_r1;
6090 break;
6092 case E_DImode:
6093 sri->icode = CODE_FOR_reload_indi_r1;
6094 break;
6096 default:
6097 gcc_unreachable ();
6099 return NO_REGS;
6103 /* Profiling showed the PA port spends about 1.3% of its compilation
6104 time in true_regnum from calls inside pa_secondary_reload_class. */
6105 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
6106 regno = true_regnum (x);
6108 /* Handle reloads for floating point loads and stores. */
6109 if ((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
6110 && FP_REG_CLASS_P (rclass))
6112 if (MEM_P (x))
6114 x = XEXP (x, 0);
6116 /* We don't need a secondary reload for indexed memory addresses.
6118 When INT14_OK_STRICT is true, it might appear that we could
6119 directly allow register indirect memory addresses. However,
6120 this doesn't work because we don't support SUBREGs in
6121 floating-point register copies and reload doesn't tell us
6122 when it's going to use a SUBREG. */
6123 if (IS_INDEX_ADDR_P (x))
6124 return NO_REGS;
6127 /* Request a secondary reload with a general scratch register
6128 for everything else. ??? Could symbolic operands be handled
6129 directly when generating non-pic PA 2.0 code? */
6130 sri->icode = (in_p
6131 ? direct_optab_handler (reload_in_optab, mode)
6132 : direct_optab_handler (reload_out_optab, mode));
6133 return NO_REGS;
6136 /* A SAR<->FP register copy requires an intermediate general register
6137 and secondary memory. We need a secondary reload with a general
6138 scratch register for spills. */
6139 if (rclass == SHIFT_REGS)
6141 /* Handle spill. */
6142 if (regno >= FIRST_PSEUDO_REGISTER || regno < 0)
6144 sri->icode = (in_p
6145 ? direct_optab_handler (reload_in_optab, mode)
6146 : direct_optab_handler (reload_out_optab, mode));
6147 return NO_REGS;
6150 /* Handle FP copy. */
6151 if (FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))
6152 return GENERAL_REGS;
6155 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
6156 && REGNO_REG_CLASS (regno) == SHIFT_REGS
6157 && FP_REG_CLASS_P (rclass))
6158 return GENERAL_REGS;
6160 return NO_REGS;
6163 /* Implement TARGET_SECONDARY_MEMORY_NEEDED. */
6165 static bool
6166 pa_secondary_memory_needed (machine_mode mode ATTRIBUTE_UNUSED,
6167 reg_class_t class1 ATTRIBUTE_UNUSED,
6168 reg_class_t class2 ATTRIBUTE_UNUSED)
6170 #ifdef PA_SECONDARY_MEMORY_NEEDED
6171 return PA_SECONDARY_MEMORY_NEEDED (mode, class1, class2);
6172 #else
6173 return false;
6174 #endif
6177 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
6178 is only marked as live on entry by df-scan when it is a fixed
6179 register. It isn't a fixed register in the 64-bit runtime,
6180 so we need to mark it here. */
6182 static void
6183 pa_extra_live_on_entry (bitmap regs)
6185 if (TARGET_64BIT)
6186 bitmap_set_bit (regs, ARG_POINTER_REGNUM);
6189 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
6190 to prevent it from being deleted. */
6193 pa_eh_return_handler_rtx (void)
6195 rtx tmp;
6197 tmp = gen_rtx_PLUS (word_mode, hard_frame_pointer_rtx,
6198 TARGET_64BIT ? GEN_INT (-16) : GEN_INT (-20));
6199 tmp = gen_rtx_MEM (word_mode, tmp);
6200 tmp->volatil = 1;
6201 return tmp;
6204 /* In the 32-bit runtime, arguments larger than eight bytes are passed
6205 by invisible reference. As a GCC extension, we also pass anything
6206 with a zero or variable size by reference.
6208 The 64-bit runtime does not describe passing any types by invisible
6209 reference. The internals of GCC can't currently handle passing
6210 empty structures, and zero or variable length arrays when they are
6211 not passed entirely on the stack or by reference. Thus, as a GCC
6212 extension, we pass these types by reference. The HP compiler doesn't
6213 support these types, so hopefully there shouldn't be any compatibility
6214 issues. This may have to be revisited when HP releases a C99 compiler
6215 or updates the ABI. */
6217 static bool
6218 pa_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
6219 machine_mode mode, const_tree type,
6220 bool named ATTRIBUTE_UNUSED)
6222 HOST_WIDE_INT size;
6224 if (type)
6225 size = int_size_in_bytes (type);
6226 else
6227 size = GET_MODE_SIZE (mode);
6229 if (TARGET_64BIT)
6230 return size <= 0;
6231 else
6232 return size <= 0 || size > 8;
6235 /* Implement TARGET_FUNCTION_ARG_PADDING. */
6237 static pad_direction
6238 pa_function_arg_padding (machine_mode mode, const_tree type)
6240 if (mode == BLKmode
6241 || (TARGET_64BIT
6242 && type
6243 && (AGGREGATE_TYPE_P (type)
6244 || TREE_CODE (type) == COMPLEX_TYPE
6245 || TREE_CODE (type) == VECTOR_TYPE)))
6247 /* Return PAD_NONE if justification is not required. */
6248 if (type
6249 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
6250 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
6251 return PAD_NONE;
6253 /* The directions set here are ignored when a BLKmode argument larger
6254 than a word is placed in a register. Different code is used for
6255 the stack and registers. This makes it difficult to have a
6256 consistent data representation for both the stack and registers.
6257 For both runtimes, the justification and padding for arguments on
6258 the stack and in registers should be identical. */
6259 if (TARGET_64BIT)
6260 /* The 64-bit runtime specifies left justification for aggregates. */
6261 return PAD_UPWARD;
6262 else
6263 /* The 32-bit runtime architecture specifies right justification.
6264 When the argument is passed on the stack, the argument is padded
6265 with garbage on the left. The HP compiler pads with zeros. */
6266 return PAD_DOWNWARD;
6269 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
6270 return PAD_DOWNWARD;
6271 else
6272 return PAD_NONE;
6276 /* Do what is necessary for `va_start'. We look at the current function
6277 to determine if stdargs or varargs is used and fill in an initial
6278 va_list. A pointer to this constructor is returned. */
6280 static rtx
6281 hppa_builtin_saveregs (void)
6283 rtx offset, dest;
6284 tree fntype = TREE_TYPE (current_function_decl);
6285 int argadj = ((!stdarg_p (fntype))
6286 ? UNITS_PER_WORD : 0);
6288 if (argadj)
6289 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
6290 else
6291 offset = crtl->args.arg_offset_rtx;
6293 if (TARGET_64BIT)
6295 int i, off;
6297 /* Adjust for varargs/stdarg differences. */
6298 if (argadj)
6299 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, -argadj);
6300 else
6301 offset = crtl->args.arg_offset_rtx;
6303 /* We need to save %r26 .. %r19 inclusive starting at offset -64
6304 from the incoming arg pointer and growing to larger addresses. */
6305 for (i = 26, off = -64; i >= 19; i--, off += 8)
6306 emit_move_insn (gen_rtx_MEM (word_mode,
6307 plus_constant (Pmode,
6308 arg_pointer_rtx, off)),
6309 gen_rtx_REG (word_mode, i));
6311 /* The incoming args pointer points just beyond the flushback area;
6312 normally this is not a serious concern. However, when we are doing
6313 varargs/stdargs we want to make the arg pointer point to the start
6314 of the incoming argument area. */
6315 emit_move_insn (virtual_incoming_args_rtx,
6316 plus_constant (Pmode, arg_pointer_rtx, -64));
6318 /* Now return a pointer to the first anonymous argument. */
6319 return copy_to_reg (expand_binop (Pmode, add_optab,
6320 virtual_incoming_args_rtx,
6321 offset, 0, 0, OPTAB_LIB_WIDEN));
6324 /* Store general registers on the stack. */
6325 dest = gen_rtx_MEM (BLKmode,
6326 plus_constant (Pmode, crtl->args.internal_arg_pointer,
6327 -16));
6328 set_mem_alias_set (dest, get_varargs_alias_set ());
6329 set_mem_align (dest, BITS_PER_WORD);
6330 move_block_from_reg (23, dest, 4);
6332 /* move_block_from_reg will emit code to store the argument registers
6333 individually as scalar stores.
6335 However, other insns may later load from the same addresses for
6336 a structure load (passing a struct to a varargs routine).
6338 The alias code assumes that such aliasing can never happen, so we
6339 have to keep memory referencing insns from moving up beyond the
6340 last argument register store. So we emit a blockage insn here. */
6341 emit_insn (gen_blockage ());
6343 return copy_to_reg (expand_binop (Pmode, add_optab,
6344 crtl->args.internal_arg_pointer,
6345 offset, 0, 0, OPTAB_LIB_WIDEN));
6348 static void
6349 hppa_va_start (tree valist, rtx nextarg)
6351 nextarg = expand_builtin_saveregs ();
6352 std_expand_builtin_va_start (valist, nextarg);
6355 static tree
6356 hppa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
6357 gimple_seq *post_p)
6359 if (TARGET_64BIT)
6361 /* Args grow upward. We can use the generic routines. */
6362 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6364 else /* !TARGET_64BIT */
6366 tree ptr = build_pointer_type (type);
6367 tree valist_type;
6368 tree t, u;
6369 unsigned int size, ofs;
6370 bool indirect;
6372 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6373 if (indirect)
6375 type = ptr;
6376 ptr = build_pointer_type (type);
6378 size = int_size_in_bytes (type);
6379 valist_type = TREE_TYPE (valist);
6381 /* Args grow down. Not handled by generic routines. */
6383 u = fold_convert (sizetype, size_in_bytes (type));
6384 u = fold_build1 (NEGATE_EXPR, sizetype, u);
6385 t = fold_build_pointer_plus (valist, u);
6387 /* Align to 4 or 8 byte boundary depending on argument size. */
6389 u = build_int_cst (TREE_TYPE (t), (HOST_WIDE_INT)(size > 4 ? -8 : -4));
6390 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, u);
6391 t = fold_convert (valist_type, t);
6393 t = build2 (MODIFY_EXPR, valist_type, valist, t);
6395 ofs = (8 - size) % 4;
6396 if (ofs != 0)
6397 t = fold_build_pointer_plus_hwi (t, ofs);
6399 t = fold_convert (ptr, t);
6400 t = build_va_arg_indirect_ref (t);
6402 if (indirect)
6403 t = build_va_arg_indirect_ref (t);
6405 return t;
6409 /* True if MODE is valid for the target. By "valid", we mean able to
6410 be manipulated in non-trivial ways. In particular, this means all
6411 the arithmetic is supported.
6413 Currently, TImode is not valid as the HP 64-bit runtime documentation
6414 doesn't document the alignment and calling conventions for this type.
6415 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6416 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6418 static bool
6419 pa_scalar_mode_supported_p (scalar_mode mode)
6421 int precision = GET_MODE_PRECISION (mode);
6423 switch (GET_MODE_CLASS (mode))
6425 case MODE_PARTIAL_INT:
6426 case MODE_INT:
6427 if (precision == CHAR_TYPE_SIZE)
6428 return true;
6429 if (precision == SHORT_TYPE_SIZE)
6430 return true;
6431 if (precision == INT_TYPE_SIZE)
6432 return true;
6433 if (precision == LONG_TYPE_SIZE)
6434 return true;
6435 if (precision == LONG_LONG_TYPE_SIZE)
6436 return true;
6437 return false;
6439 case MODE_FLOAT:
6440 if (precision == FLOAT_TYPE_SIZE)
6441 return true;
6442 if (precision == DOUBLE_TYPE_SIZE)
6443 return true;
6444 if (precision == LONG_DOUBLE_TYPE_SIZE)
6445 return true;
6446 return false;
6448 case MODE_DECIMAL_FLOAT:
6449 return false;
6451 default:
6452 gcc_unreachable ();
6456 /* Return TRUE if INSN, a jump insn, has an unfilled delay slot and
6457 it branches into the delay slot. Otherwise, return FALSE. */
6459 static bool
6460 branch_to_delay_slot_p (rtx_insn *insn)
6462 rtx_insn *jump_insn;
6464 if (dbr_sequence_length ())
6465 return FALSE;
6467 jump_insn = next_active_insn (JUMP_LABEL_AS_INSN (insn));
6468 while (insn)
6470 insn = next_active_insn (insn);
6471 if (jump_insn == insn)
6472 return TRUE;
6474 /* We can't rely on the length of asms. So, we return FALSE when
6475 the branch is followed by an asm. */
6476 if (!insn
6477 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6478 || asm_noperands (PATTERN (insn)) >= 0
6479 || get_attr_length (insn) > 0)
6480 break;
6483 return FALSE;
6486 /* Return TRUE if INSN, a forward jump insn, needs a nop in its delay slot.
6488 This occurs when INSN has an unfilled delay slot and is followed
6489 by an asm. Disaster can occur if the asm is empty and the jump
6490 branches into the delay slot. So, we add a nop in the delay slot
6491 when this occurs. */
6493 static bool
6494 branch_needs_nop_p (rtx_insn *insn)
6496 rtx_insn *jump_insn;
6498 if (dbr_sequence_length ())
6499 return FALSE;
6501 jump_insn = next_active_insn (JUMP_LABEL_AS_INSN (insn));
6502 while (insn)
6504 insn = next_active_insn (insn);
6505 if (!insn || jump_insn == insn)
6506 return TRUE;
6508 if (!(GET_CODE (PATTERN (insn)) == ASM_INPUT
6509 || asm_noperands (PATTERN (insn)) >= 0)
6510 && get_attr_length (insn) > 0)
6511 break;
6514 return FALSE;
6517 /* Return TRUE if INSN, a forward jump insn, can use nullification
6518 to skip the following instruction. This avoids an extra cycle due
6519 to a mis-predicted branch when we fall through. */
6521 static bool
6522 use_skip_p (rtx_insn *insn)
6524 rtx_insn *jump_insn = next_active_insn (JUMP_LABEL_AS_INSN (insn));
6526 while (insn)
6528 insn = next_active_insn (insn);
6530 /* We can't rely on the length of asms, so we can't skip asms. */
6531 if (!insn
6532 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6533 || asm_noperands (PATTERN (insn)) >= 0)
6534 break;
6535 if (get_attr_length (insn) == 4
6536 && jump_insn == next_active_insn (insn))
6537 return TRUE;
6538 if (get_attr_length (insn) > 0)
6539 break;
6542 return FALSE;
6545 /* This routine handles all the normal conditional branch sequences we
6546 might need to generate. It handles compare immediate vs compare
6547 register, nullification of delay slots, varying length branches,
6548 negated branches, and all combinations of the above. It returns the
6549 output appropriate to emit the branch corresponding to all given
6550 parameters. */
6552 const char *
6553 pa_output_cbranch (rtx *operands, int negated, rtx_insn *insn)
6555 static char buf[100];
6556 bool useskip;
6557 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6558 int length = get_attr_length (insn);
6559 int xdelay;
6561 /* A conditional branch to the following instruction (e.g. the delay slot)
6562 is asking for a disaster. This can happen when not optimizing and
6563 when jump optimization fails.
6565 While it is usually safe to emit nothing, this can fail if the
6566 preceding instruction is a nullified branch with an empty delay
6567 slot and the same branch target as this branch. We could check
6568 for this but jump optimization should eliminate nop jumps. It
6569 is always safe to emit a nop. */
6570 if (branch_to_delay_slot_p (insn))
6571 return "nop";
6573 /* The doubleword form of the cmpib instruction doesn't have the LEU
6574 and GTU conditions while the cmpb instruction does. Since we accept
6575 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6576 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6577 operands[2] = gen_rtx_REG (DImode, 0);
6578 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
6579 operands[1] = gen_rtx_REG (DImode, 0);
6581 /* If this is a long branch with its delay slot unfilled, set `nullify'
6582 as it can nullify the delay slot and save a nop. */
6583 if (length == 8 && dbr_sequence_length () == 0)
6584 nullify = 1;
6586 /* If this is a short forward conditional branch which did not get
6587 its delay slot filled, the delay slot can still be nullified. */
6588 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6589 nullify = forward_branch_p (insn);
6591 /* A forward branch over a single nullified insn can be done with a
6592 comclr instruction. This avoids a single cycle penalty due to
6593 mis-predicted branch if we fall through (branch not taken). */
6594 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6596 switch (length)
6598 /* All short conditional branches except backwards with an unfilled
6599 delay slot. */
6600 case 4:
6601 if (useskip)
6602 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6603 else
6604 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6605 if (GET_MODE (operands[1]) == DImode)
6606 strcat (buf, "*");
6607 if (negated)
6608 strcat (buf, "%B3");
6609 else
6610 strcat (buf, "%S3");
6611 if (useskip)
6612 strcat (buf, " %2,%r1,%%r0");
6613 else if (nullify)
6615 if (branch_needs_nop_p (insn))
6616 strcat (buf, ",n %2,%r1,%0%#");
6617 else
6618 strcat (buf, ",n %2,%r1,%0");
6620 else
6621 strcat (buf, " %2,%r1,%0");
6622 break;
6624 /* All long conditionals. Note a short backward branch with an
6625 unfilled delay slot is treated just like a long backward branch
6626 with an unfilled delay slot. */
6627 case 8:
6628 /* Handle weird backwards branch with a filled delay slot
6629 which is nullified. */
6630 if (dbr_sequence_length () != 0
6631 && ! forward_branch_p (insn)
6632 && nullify)
6634 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6635 if (GET_MODE (operands[1]) == DImode)
6636 strcat (buf, "*");
6637 if (negated)
6638 strcat (buf, "%S3");
6639 else
6640 strcat (buf, "%B3");
6641 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6643 /* Handle short backwards branch with an unfilled delay slot.
6644 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6645 taken and untaken branches. */
6646 else if (dbr_sequence_length () == 0
6647 && ! forward_branch_p (insn)
6648 && INSN_ADDRESSES_SET_P ()
6649 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6650 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6652 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6653 if (GET_MODE (operands[1]) == DImode)
6654 strcat (buf, "*");
6655 if (negated)
6656 strcat (buf, "%B3 %2,%r1,%0%#");
6657 else
6658 strcat (buf, "%S3 %2,%r1,%0%#");
6660 else
6662 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6663 if (GET_MODE (operands[1]) == DImode)
6664 strcat (buf, "*");
6665 if (negated)
6666 strcat (buf, "%S3");
6667 else
6668 strcat (buf, "%B3");
6669 if (nullify)
6670 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6671 else
6672 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6674 break;
6676 default:
6677 /* The reversed conditional branch must branch over one additional
6678 instruction if the delay slot is filled and needs to be extracted
6679 by pa_output_lbranch. If the delay slot is empty or this is a
6680 nullified forward branch, the instruction after the reversed
6681 condition branch must be nullified. */
6682 if (dbr_sequence_length () == 0
6683 || (nullify && forward_branch_p (insn)))
6685 nullify = 1;
6686 xdelay = 0;
6687 operands[4] = GEN_INT (length);
6689 else
6691 xdelay = 1;
6692 operands[4] = GEN_INT (length + 4);
6695 /* Create a reversed conditional branch which branches around
6696 the following insns. */
6697 if (GET_MODE (operands[1]) != DImode)
6699 if (nullify)
6701 if (negated)
6702 strcpy (buf,
6703 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6704 else
6705 strcpy (buf,
6706 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6708 else
6710 if (negated)
6711 strcpy (buf,
6712 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6713 else
6714 strcpy (buf,
6715 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6718 else
6720 if (nullify)
6722 if (negated)
6723 strcpy (buf,
6724 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6725 else
6726 strcpy (buf,
6727 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6729 else
6731 if (negated)
6732 strcpy (buf,
6733 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6734 else
6735 strcpy (buf,
6736 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6740 output_asm_insn (buf, operands);
6741 return pa_output_lbranch (operands[0], insn, xdelay);
6743 return buf;
6746 /* Output a PIC pc-relative instruction sequence to load the address of
6747 OPERANDS[0] to register OPERANDS[2]. OPERANDS[0] is a symbol ref
6748 or a code label. OPERANDS[1] specifies the register to use to load
6749 the program counter. OPERANDS[3] may be used for label generation
6750 The sequence is always three instructions in length. The program
6751 counter recorded for PA 1.X is eight bytes more than that for PA 2.0.
6752 Register %r1 is clobbered. */
6754 static void
6755 pa_output_pic_pcrel_sequence (rtx *operands)
6757 gcc_assert (SYMBOL_REF_P (operands[0]) || LABEL_P (operands[0]));
6758 if (TARGET_PA_20)
6760 /* We can use mfia to determine the current program counter. */
6761 if (TARGET_SOM || !TARGET_GAS)
6763 operands[3] = gen_label_rtx ();
6764 targetm.asm_out.internal_label (asm_out_file, "L",
6765 CODE_LABEL_NUMBER (operands[3]));
6766 output_asm_insn ("mfia %1", operands);
6767 output_asm_insn ("addil L'%0-%l3,%1", operands);
6768 output_asm_insn ("ldo R'%0-%l3(%%r1),%2", operands);
6770 else
6772 output_asm_insn ("mfia %1", operands);
6773 output_asm_insn ("addil L'%0-$PIC_pcrel$0+12,%1", operands);
6774 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+16(%%r1),%2", operands);
6777 else
6779 /* We need to use a branch to determine the current program counter. */
6780 output_asm_insn ("{bl|b,l} .+8,%1", operands);
6781 if (TARGET_SOM || !TARGET_GAS)
6783 operands[3] = gen_label_rtx ();
6784 output_asm_insn ("addil L'%0-%l3,%1", operands);
6785 targetm.asm_out.internal_label (asm_out_file, "L",
6786 CODE_LABEL_NUMBER (operands[3]));
6787 output_asm_insn ("ldo R'%0-%l3(%%r1),%2", operands);
6789 else
6791 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%1", operands);
6792 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%2", operands);
6797 /* This routine handles output of long unconditional branches that
6798 exceed the maximum range of a simple branch instruction. Since
6799 we don't have a register available for the branch, we save register
6800 %r1 in the frame marker, load the branch destination DEST into %r1,
6801 execute the branch, and restore %r1 in the delay slot of the branch.
6803 Since long branches may have an insn in the delay slot and the
6804 delay slot is used to restore %r1, we in general need to extract
6805 this insn and execute it before the branch. However, to facilitate
6806 use of this function by conditional branches, we also provide an
6807 option to not extract the delay insn so that it will be emitted
6808 after the long branch. So, if there is an insn in the delay slot,
6809 it is extracted if XDELAY is nonzero.
6811 The lengths of the various long-branch sequences are 20, 16 and 24
6812 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6814 const char *
6815 pa_output_lbranch (rtx dest, rtx_insn *insn, int xdelay)
6817 rtx xoperands[4];
6819 xoperands[0] = dest;
6821 /* First, free up the delay slot. */
6822 if (xdelay && dbr_sequence_length () != 0)
6824 /* We can't handle a jump in the delay slot. */
6825 gcc_assert (! JUMP_P (NEXT_INSN (insn)));
6827 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6828 optimize, 0, NULL);
6830 /* Now delete the delay insn. */
6831 SET_INSN_DELETED (NEXT_INSN (insn));
6834 /* Output an insn to save %r1. The runtime documentation doesn't
6835 specify whether the "Clean Up" slot in the callers frame can
6836 be clobbered by the callee. It isn't copied by HP's builtin
6837 alloca, so this suggests that it can be clobbered if necessary.
6838 The "Static Link" location is copied by HP builtin alloca, so
6839 we avoid using it. Using the cleanup slot might be a problem
6840 if we have to interoperate with languages that pass cleanup
6841 information. However, it should be possible to handle these
6842 situations with GCC's asm feature.
6844 The "Current RP" slot is reserved for the called procedure, so
6845 we try to use it when we don't have a frame of our own. It's
6846 rather unlikely that we won't have a frame when we need to emit
6847 a very long branch.
6849 Really the way to go long term is a register scavenger; goto
6850 the target of the jump and find a register which we can use
6851 as a scratch to hold the value in %r1. Then, we wouldn't have
6852 to free up the delay slot or clobber a slot that may be needed
6853 for other purposes. */
6854 if (TARGET_64BIT)
6856 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6857 /* Use the return pointer slot in the frame marker. */
6858 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6859 else
6860 /* Use the slot at -40 in the frame marker since HP builtin
6861 alloca doesn't copy it. */
6862 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6864 else
6866 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6867 /* Use the return pointer slot in the frame marker. */
6868 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6869 else
6870 /* Use the "Clean Up" slot in the frame marker. In GCC,
6871 the only other use of this location is for copying a
6872 floating point double argument from a floating-point
6873 register to two general registers. The copy is done
6874 as an "atomic" operation when outputting a call, so it
6875 won't interfere with our using the location here. */
6876 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6879 if (TARGET_PORTABLE_RUNTIME)
6881 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6882 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6883 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6885 else if (flag_pic)
6887 xoperands[1] = gen_rtx_REG (Pmode, 1);
6888 xoperands[2] = xoperands[1];
6889 pa_output_pic_pcrel_sequence (xoperands);
6890 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6892 else
6893 /* Now output a very long branch to the original target. */
6894 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6896 /* Now restore the value of %r1 in the delay slot. */
6897 if (TARGET_64BIT)
6899 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6900 return "ldd -16(%%r30),%%r1";
6901 else
6902 return "ldd -40(%%r30),%%r1";
6904 else
6906 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6907 return "ldw -20(%%r30),%%r1";
6908 else
6909 return "ldw -12(%%r30),%%r1";
6913 /* This routine handles all the branch-on-bit conditional branch sequences we
6914 might need to generate. It handles nullification of delay slots,
6915 varying length branches, negated branches and all combinations of the
6916 above. it returns the appropriate output template to emit the branch. */
6918 const char *
6919 pa_output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn, int which)
6921 static char buf[100];
6922 bool useskip;
6923 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6924 int length = get_attr_length (insn);
6925 int xdelay;
6927 /* A conditional branch to the following instruction (e.g. the delay slot) is
6928 asking for a disaster. I do not think this can happen as this pattern
6929 is only used when optimizing; jump optimization should eliminate the
6930 jump. But be prepared just in case. */
6932 if (branch_to_delay_slot_p (insn))
6933 return "nop";
6935 /* If this is a long branch with its delay slot unfilled, set `nullify'
6936 as it can nullify the delay slot and save a nop. */
6937 if (length == 8 && dbr_sequence_length () == 0)
6938 nullify = 1;
6940 /* If this is a short forward conditional branch which did not get
6941 its delay slot filled, the delay slot can still be nullified. */
6942 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6943 nullify = forward_branch_p (insn);
6945 /* A forward branch over a single nullified insn can be done with a
6946 extrs instruction. This avoids a single cycle penalty due to
6947 mis-predicted branch if we fall through (branch not taken). */
6948 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6950 switch (length)
6953 /* All short conditional branches except backwards with an unfilled
6954 delay slot. */
6955 case 4:
6956 if (useskip)
6957 strcpy (buf, "{extrs,|extrw,s,}");
6958 else
6959 strcpy (buf, "bb,");
6960 if (useskip && GET_MODE (operands[0]) == DImode)
6961 strcpy (buf, "extrd,s,*");
6962 else if (GET_MODE (operands[0]) == DImode)
6963 strcpy (buf, "bb,*");
6964 if ((which == 0 && negated)
6965 || (which == 1 && ! negated))
6966 strcat (buf, ">=");
6967 else
6968 strcat (buf, "<");
6969 if (useskip)
6970 strcat (buf, " %0,%1,1,%%r0");
6971 else if (nullify && negated)
6973 if (branch_needs_nop_p (insn))
6974 strcat (buf, ",n %0,%1,%3%#");
6975 else
6976 strcat (buf, ",n %0,%1,%3");
6978 else if (nullify && ! negated)
6980 if (branch_needs_nop_p (insn))
6981 strcat (buf, ",n %0,%1,%2%#");
6982 else
6983 strcat (buf, ",n %0,%1,%2");
6985 else if (! nullify && negated)
6986 strcat (buf, " %0,%1,%3");
6987 else if (! nullify && ! negated)
6988 strcat (buf, " %0,%1,%2");
6989 break;
6991 /* All long conditionals. Note a short backward branch with an
6992 unfilled delay slot is treated just like a long backward branch
6993 with an unfilled delay slot. */
6994 case 8:
6995 /* Handle weird backwards branch with a filled delay slot
6996 which is nullified. */
6997 if (dbr_sequence_length () != 0
6998 && ! forward_branch_p (insn)
6999 && nullify)
7001 strcpy (buf, "bb,");
7002 if (GET_MODE (operands[0]) == DImode)
7003 strcat (buf, "*");
7004 if ((which == 0 && negated)
7005 || (which == 1 && ! negated))
7006 strcat (buf, "<");
7007 else
7008 strcat (buf, ">=");
7009 if (negated)
7010 strcat (buf, ",n %0,%1,.+12\n\tb %3");
7011 else
7012 strcat (buf, ",n %0,%1,.+12\n\tb %2");
7014 /* Handle short backwards branch with an unfilled delay slot.
7015 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7016 taken and untaken branches. */
7017 else if (dbr_sequence_length () == 0
7018 && ! forward_branch_p (insn)
7019 && INSN_ADDRESSES_SET_P ()
7020 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7021 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7023 strcpy (buf, "bb,");
7024 if (GET_MODE (operands[0]) == DImode)
7025 strcat (buf, "*");
7026 if ((which == 0 && negated)
7027 || (which == 1 && ! negated))
7028 strcat (buf, ">=");
7029 else
7030 strcat (buf, "<");
7031 if (negated)
7032 strcat (buf, " %0,%1,%3%#");
7033 else
7034 strcat (buf, " %0,%1,%2%#");
7036 else
7038 if (GET_MODE (operands[0]) == DImode)
7039 strcpy (buf, "extrd,s,*");
7040 else
7041 strcpy (buf, "{extrs,|extrw,s,}");
7042 if ((which == 0 && negated)
7043 || (which == 1 && ! negated))
7044 strcat (buf, "<");
7045 else
7046 strcat (buf, ">=");
7047 if (nullify && negated)
7048 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
7049 else if (nullify && ! negated)
7050 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
7051 else if (negated)
7052 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
7053 else
7054 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
7056 break;
7058 default:
7059 /* The reversed conditional branch must branch over one additional
7060 instruction if the delay slot is filled and needs to be extracted
7061 by pa_output_lbranch. If the delay slot is empty or this is a
7062 nullified forward branch, the instruction after the reversed
7063 condition branch must be nullified. */
7064 if (dbr_sequence_length () == 0
7065 || (nullify && forward_branch_p (insn)))
7067 nullify = 1;
7068 xdelay = 0;
7069 operands[4] = GEN_INT (length);
7071 else
7073 xdelay = 1;
7074 operands[4] = GEN_INT (length + 4);
7077 if (GET_MODE (operands[0]) == DImode)
7078 strcpy (buf, "bb,*");
7079 else
7080 strcpy (buf, "bb,");
7081 if ((which == 0 && negated)
7082 || (which == 1 && !negated))
7083 strcat (buf, "<");
7084 else
7085 strcat (buf, ">=");
7086 if (nullify)
7087 strcat (buf, ",n %0,%1,.+%4");
7088 else
7089 strcat (buf, " %0,%1,.+%4");
7090 output_asm_insn (buf, operands);
7091 return pa_output_lbranch (negated ? operands[3] : operands[2],
7092 insn, xdelay);
7094 return buf;
7097 /* This routine handles all the branch-on-variable-bit conditional branch
7098 sequences we might need to generate. It handles nullification of delay
7099 slots, varying length branches, negated branches and all combinations
7100 of the above. it returns the appropriate output template to emit the
7101 branch. */
7103 const char *
7104 pa_output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn,
7105 int which)
7107 static char buf[100];
7108 bool useskip;
7109 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7110 int length = get_attr_length (insn);
7111 int xdelay;
7113 /* A conditional branch to the following instruction (e.g. the delay slot) is
7114 asking for a disaster. I do not think this can happen as this pattern
7115 is only used when optimizing; jump optimization should eliminate the
7116 jump. But be prepared just in case. */
7118 if (branch_to_delay_slot_p (insn))
7119 return "nop";
7121 /* If this is a long branch with its delay slot unfilled, set `nullify'
7122 as it can nullify the delay slot and save a nop. */
7123 if (length == 8 && dbr_sequence_length () == 0)
7124 nullify = 1;
7126 /* If this is a short forward conditional branch which did not get
7127 its delay slot filled, the delay slot can still be nullified. */
7128 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7129 nullify = forward_branch_p (insn);
7131 /* A forward branch over a single nullified insn can be done with a
7132 extrs instruction. This avoids a single cycle penalty due to
7133 mis-predicted branch if we fall through (branch not taken). */
7134 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
7136 switch (length)
7139 /* All short conditional branches except backwards with an unfilled
7140 delay slot. */
7141 case 4:
7142 if (useskip)
7143 strcpy (buf, "{vextrs,|extrw,s,}");
7144 else
7145 strcpy (buf, "{bvb,|bb,}");
7146 if (useskip && GET_MODE (operands[0]) == DImode)
7147 strcpy (buf, "extrd,s,*");
7148 else if (GET_MODE (operands[0]) == DImode)
7149 strcpy (buf, "bb,*");
7150 if ((which == 0 && negated)
7151 || (which == 1 && ! negated))
7152 strcat (buf, ">=");
7153 else
7154 strcat (buf, "<");
7155 if (useskip)
7156 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
7157 else if (nullify && negated)
7159 if (branch_needs_nop_p (insn))
7160 strcat (buf, "{,n %0,%3%#|,n %0,%%sar,%3%#}");
7161 else
7162 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
7164 else if (nullify && ! negated)
7166 if (branch_needs_nop_p (insn))
7167 strcat (buf, "{,n %0,%2%#|,n %0,%%sar,%2%#}");
7168 else
7169 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
7171 else if (! nullify && negated)
7172 strcat (buf, "{ %0,%3| %0,%%sar,%3}");
7173 else if (! nullify && ! negated)
7174 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
7175 break;
7177 /* All long conditionals. Note a short backward branch with an
7178 unfilled delay slot is treated just like a long backward branch
7179 with an unfilled delay slot. */
7180 case 8:
7181 /* Handle weird backwards branch with a filled delay slot
7182 which is nullified. */
7183 if (dbr_sequence_length () != 0
7184 && ! forward_branch_p (insn)
7185 && nullify)
7187 strcpy (buf, "{bvb,|bb,}");
7188 if (GET_MODE (operands[0]) == DImode)
7189 strcat (buf, "*");
7190 if ((which == 0 && negated)
7191 || (which == 1 && ! negated))
7192 strcat (buf, "<");
7193 else
7194 strcat (buf, ">=");
7195 if (negated)
7196 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
7197 else
7198 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
7200 /* Handle short backwards branch with an unfilled delay slot.
7201 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7202 taken and untaken branches. */
7203 else if (dbr_sequence_length () == 0
7204 && ! forward_branch_p (insn)
7205 && INSN_ADDRESSES_SET_P ()
7206 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7207 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7209 strcpy (buf, "{bvb,|bb,}");
7210 if (GET_MODE (operands[0]) == DImode)
7211 strcat (buf, "*");
7212 if ((which == 0 && negated)
7213 || (which == 1 && ! negated))
7214 strcat (buf, ">=");
7215 else
7216 strcat (buf, "<");
7217 if (negated)
7218 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
7219 else
7220 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
7222 else
7224 strcpy (buf, "{vextrs,|extrw,s,}");
7225 if (GET_MODE (operands[0]) == DImode)
7226 strcpy (buf, "extrd,s,*");
7227 if ((which == 0 && negated)
7228 || (which == 1 && ! negated))
7229 strcat (buf, "<");
7230 else
7231 strcat (buf, ">=");
7232 if (nullify && negated)
7233 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
7234 else if (nullify && ! negated)
7235 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
7236 else if (negated)
7237 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
7238 else
7239 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
7241 break;
7243 default:
7244 /* The reversed conditional branch must branch over one additional
7245 instruction if the delay slot is filled and needs to be extracted
7246 by pa_output_lbranch. If the delay slot is empty or this is a
7247 nullified forward branch, the instruction after the reversed
7248 condition branch must be nullified. */
7249 if (dbr_sequence_length () == 0
7250 || (nullify && forward_branch_p (insn)))
7252 nullify = 1;
7253 xdelay = 0;
7254 operands[4] = GEN_INT (length);
7256 else
7258 xdelay = 1;
7259 operands[4] = GEN_INT (length + 4);
7262 if (GET_MODE (operands[0]) == DImode)
7263 strcpy (buf, "bb,*");
7264 else
7265 strcpy (buf, "{bvb,|bb,}");
7266 if ((which == 0 && negated)
7267 || (which == 1 && !negated))
7268 strcat (buf, "<");
7269 else
7270 strcat (buf, ">=");
7271 if (nullify)
7272 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
7273 else
7274 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
7275 output_asm_insn (buf, operands);
7276 return pa_output_lbranch (negated ? operands[3] : operands[2],
7277 insn, xdelay);
7279 return buf;
7282 /* Return the output template for emitting a dbra type insn.
7284 Note it may perform some output operations on its own before
7285 returning the final output string. */
7286 const char *
7287 pa_output_dbra (rtx *operands, rtx_insn *insn, int which_alternative)
7289 int length = get_attr_length (insn);
7291 /* A conditional branch to the following instruction (e.g. the delay slot) is
7292 asking for a disaster. Be prepared! */
7294 if (branch_to_delay_slot_p (insn))
7296 if (which_alternative == 0)
7297 return "ldo %1(%0),%0";
7298 else if (which_alternative == 1)
7300 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
7301 output_asm_insn ("ldw -16(%%r30),%4", operands);
7302 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7303 return "{fldws|fldw} -16(%%r30),%0";
7305 else
7307 output_asm_insn ("ldw %0,%4", operands);
7308 return "ldo %1(%4),%4\n\tstw %4,%0";
7312 if (which_alternative == 0)
7314 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7315 int xdelay;
7317 /* If this is a long branch with its delay slot unfilled, set `nullify'
7318 as it can nullify the delay slot and save a nop. */
7319 if (length == 8 && dbr_sequence_length () == 0)
7320 nullify = 1;
7322 /* If this is a short forward conditional branch which did not get
7323 its delay slot filled, the delay slot can still be nullified. */
7324 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7325 nullify = forward_branch_p (insn);
7327 switch (length)
7329 case 4:
7330 if (nullify)
7332 if (branch_needs_nop_p (insn))
7333 return "addib,%C2,n %1,%0,%3%#";
7334 else
7335 return "addib,%C2,n %1,%0,%3";
7337 else
7338 return "addib,%C2 %1,%0,%3";
7340 case 8:
7341 /* Handle weird backwards branch with a fulled delay slot
7342 which is nullified. */
7343 if (dbr_sequence_length () != 0
7344 && ! forward_branch_p (insn)
7345 && nullify)
7346 return "addib,%N2,n %1,%0,.+12\n\tb %3";
7347 /* Handle short backwards branch with an unfilled delay slot.
7348 Using a addb;nop rather than addi;bl saves 1 cycle for both
7349 taken and untaken branches. */
7350 else if (dbr_sequence_length () == 0
7351 && ! forward_branch_p (insn)
7352 && INSN_ADDRESSES_SET_P ()
7353 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7354 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7355 return "addib,%C2 %1,%0,%3%#";
7357 /* Handle normal cases. */
7358 if (nullify)
7359 return "addi,%N2 %1,%0,%0\n\tb,n %3";
7360 else
7361 return "addi,%N2 %1,%0,%0\n\tb %3";
7363 default:
7364 /* The reversed conditional branch must branch over one additional
7365 instruction if the delay slot is filled and needs to be extracted
7366 by pa_output_lbranch. If the delay slot is empty or this is a
7367 nullified forward branch, the instruction after the reversed
7368 condition branch must be nullified. */
7369 if (dbr_sequence_length () == 0
7370 || (nullify && forward_branch_p (insn)))
7372 nullify = 1;
7373 xdelay = 0;
7374 operands[4] = GEN_INT (length);
7376 else
7378 xdelay = 1;
7379 operands[4] = GEN_INT (length + 4);
7382 if (nullify)
7383 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
7384 else
7385 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
7387 return pa_output_lbranch (operands[3], insn, xdelay);
7391 /* Deal with gross reload from FP register case. */
7392 else if (which_alternative == 1)
7394 /* Move loop counter from FP register to MEM then into a GR,
7395 increment the GR, store the GR into MEM, and finally reload
7396 the FP register from MEM from within the branch's delay slot. */
7397 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
7398 operands);
7399 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7400 if (length == 24)
7401 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
7402 else if (length == 28)
7403 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7404 else
7406 operands[5] = GEN_INT (length - 16);
7407 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
7408 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7409 return pa_output_lbranch (operands[3], insn, 0);
7412 /* Deal with gross reload from memory case. */
7413 else
7415 /* Reload loop counter from memory, the store back to memory
7416 happens in the branch's delay slot. */
7417 output_asm_insn ("ldw %0,%4", operands);
7418 if (length == 12)
7419 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
7420 else if (length == 16)
7421 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
7422 else
7424 operands[5] = GEN_INT (length - 4);
7425 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
7426 return pa_output_lbranch (operands[3], insn, 0);
7431 /* Return the output template for emitting a movb type insn.
7433 Note it may perform some output operations on its own before
7434 returning the final output string. */
7435 const char *
7436 pa_output_movb (rtx *operands, rtx_insn *insn, int which_alternative,
7437 int reverse_comparison)
7439 int length = get_attr_length (insn);
7441 /* A conditional branch to the following instruction (e.g. the delay slot) is
7442 asking for a disaster. Be prepared! */
7444 if (branch_to_delay_slot_p (insn))
7446 if (which_alternative == 0)
7447 return "copy %1,%0";
7448 else if (which_alternative == 1)
7450 output_asm_insn ("stw %1,-16(%%r30)", operands);
7451 return "{fldws|fldw} -16(%%r30),%0";
7453 else if (which_alternative == 2)
7454 return "stw %1,%0";
7455 else
7456 return "mtsar %r1";
7459 /* Support the second variant. */
7460 if (reverse_comparison)
7461 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
7463 if (which_alternative == 0)
7465 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7466 int xdelay;
7468 /* If this is a long branch with its delay slot unfilled, set `nullify'
7469 as it can nullify the delay slot and save a nop. */
7470 if (length == 8 && dbr_sequence_length () == 0)
7471 nullify = 1;
7473 /* If this is a short forward conditional branch which did not get
7474 its delay slot filled, the delay slot can still be nullified. */
7475 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7476 nullify = forward_branch_p (insn);
7478 switch (length)
7480 case 4:
7481 if (nullify)
7483 if (branch_needs_nop_p (insn))
7484 return "movb,%C2,n %1,%0,%3%#";
7485 else
7486 return "movb,%C2,n %1,%0,%3";
7488 else
7489 return "movb,%C2 %1,%0,%3";
7491 case 8:
7492 /* Handle weird backwards branch with a filled delay slot
7493 which is nullified. */
7494 if (dbr_sequence_length () != 0
7495 && ! forward_branch_p (insn)
7496 && nullify)
7497 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7499 /* Handle short backwards branch with an unfilled delay slot.
7500 Using a movb;nop rather than or;bl saves 1 cycle for both
7501 taken and untaken branches. */
7502 else if (dbr_sequence_length () == 0
7503 && ! forward_branch_p (insn)
7504 && INSN_ADDRESSES_SET_P ()
7505 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7506 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7507 return "movb,%C2 %1,%0,%3%#";
7508 /* Handle normal cases. */
7509 if (nullify)
7510 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7511 else
7512 return "or,%N2 %1,%%r0,%0\n\tb %3";
7514 default:
7515 /* The reversed conditional branch must branch over one additional
7516 instruction if the delay slot is filled and needs to be extracted
7517 by pa_output_lbranch. If the delay slot is empty or this is a
7518 nullified forward branch, the instruction after the reversed
7519 condition branch must be nullified. */
7520 if (dbr_sequence_length () == 0
7521 || (nullify && forward_branch_p (insn)))
7523 nullify = 1;
7524 xdelay = 0;
7525 operands[4] = GEN_INT (length);
7527 else
7529 xdelay = 1;
7530 operands[4] = GEN_INT (length + 4);
7533 if (nullify)
7534 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
7535 else
7536 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
7538 return pa_output_lbranch (operands[3], insn, xdelay);
7541 /* Deal with gross reload for FP destination register case. */
7542 else if (which_alternative == 1)
7544 /* Move source register to MEM, perform the branch test, then
7545 finally load the FP register from MEM from within the branch's
7546 delay slot. */
7547 output_asm_insn ("stw %1,-16(%%r30)", operands);
7548 if (length == 12)
7549 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7550 else if (length == 16)
7551 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7552 else
7554 operands[4] = GEN_INT (length - 4);
7555 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
7556 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7557 return pa_output_lbranch (operands[3], insn, 0);
7560 /* Deal with gross reload from memory case. */
7561 else if (which_alternative == 2)
7563 /* Reload loop counter from memory, the store back to memory
7564 happens in the branch's delay slot. */
7565 if (length == 8)
7566 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7567 else if (length == 12)
7568 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7569 else
7571 operands[4] = GEN_INT (length);
7572 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7573 operands);
7574 return pa_output_lbranch (operands[3], insn, 0);
7577 /* Handle SAR as a destination. */
7578 else
7580 if (length == 8)
7581 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7582 else if (length == 12)
7583 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7584 else
7586 operands[4] = GEN_INT (length);
7587 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7588 operands);
7589 return pa_output_lbranch (operands[3], insn, 0);
7594 /* Copy any FP arguments in INSN into integer registers. */
7595 static void
7596 copy_fp_args (rtx_insn *insn)
7598 rtx link;
7599 rtx xoperands[2];
7601 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7603 int arg_mode, regno;
7604 rtx use = XEXP (link, 0);
7606 if (! (GET_CODE (use) == USE
7607 && GET_CODE (XEXP (use, 0)) == REG
7608 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7609 continue;
7611 arg_mode = GET_MODE (XEXP (use, 0));
7612 regno = REGNO (XEXP (use, 0));
7614 /* Is it a floating point register? */
7615 if (regno >= 32 && regno <= 39)
7617 /* Copy the FP register into an integer register via memory. */
7618 if (arg_mode == SFmode)
7620 xoperands[0] = XEXP (use, 0);
7621 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
7622 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
7623 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7625 else
7627 xoperands[0] = XEXP (use, 0);
7628 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
7629 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
7630 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
7631 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7637 /* Compute length of the FP argument copy sequence for INSN. */
7638 static int
7639 length_fp_args (rtx_insn *insn)
7641 int length = 0;
7642 rtx link;
7644 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7646 int arg_mode, regno;
7647 rtx use = XEXP (link, 0);
7649 if (! (GET_CODE (use) == USE
7650 && GET_CODE (XEXP (use, 0)) == REG
7651 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7652 continue;
7654 arg_mode = GET_MODE (XEXP (use, 0));
7655 regno = REGNO (XEXP (use, 0));
7657 /* Is it a floating point register? */
7658 if (regno >= 32 && regno <= 39)
7660 if (arg_mode == SFmode)
7661 length += 8;
7662 else
7663 length += 12;
7667 return length;
7670 /* Return the attribute length for the millicode call instruction INSN.
7671 The length must match the code generated by pa_output_millicode_call.
7672 We include the delay slot in the returned length as it is better to
7673 over estimate the length than to under estimate it. */
7676 pa_attr_length_millicode_call (rtx_insn *insn)
7678 unsigned long distance = -1;
7679 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7681 if (INSN_ADDRESSES_SET_P ())
7683 distance = (total + insn_current_reference_address (insn));
7684 if (distance < total)
7685 distance = -1;
7688 if (TARGET_64BIT)
7690 if (!TARGET_LONG_CALLS && distance < 7600000)
7691 return 8;
7693 return 20;
7695 else if (TARGET_PORTABLE_RUNTIME)
7696 return 24;
7697 else
7699 if (!TARGET_LONG_CALLS && distance < MAX_PCREL17F_OFFSET)
7700 return 8;
7702 if (!flag_pic)
7703 return 12;
7705 return 24;
7709 /* INSN is a function call.
7711 CALL_DEST is the routine we are calling. */
7713 const char *
7714 pa_output_millicode_call (rtx_insn *insn, rtx call_dest)
7716 int attr_length = get_attr_length (insn);
7717 int seq_length = dbr_sequence_length ();
7718 rtx xoperands[4];
7720 xoperands[0] = call_dest;
7722 /* Handle the common case where we are sure that the branch will
7723 reach the beginning of the $CODE$ subspace. The within reach
7724 form of the $$sh_func_adrs call has a length of 28. Because it
7725 has an attribute type of sh_func_adrs, it never has a nonzero
7726 sequence length (i.e., the delay slot is never filled). */
7727 if (!TARGET_LONG_CALLS
7728 && (attr_length == 8
7729 || (attr_length == 28
7730 && get_attr_type (insn) == TYPE_SH_FUNC_ADRS)))
7732 xoperands[1] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7733 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7735 else
7737 if (TARGET_64BIT)
7739 /* It might seem that one insn could be saved by accessing
7740 the millicode function using the linkage table. However,
7741 this doesn't work in shared libraries and other dynamically
7742 loaded objects. Using a pc-relative sequence also avoids
7743 problems related to the implicit use of the gp register. */
7744 xoperands[1] = gen_rtx_REG (Pmode, 1);
7745 xoperands[2] = xoperands[1];
7746 pa_output_pic_pcrel_sequence (xoperands);
7747 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7749 else if (TARGET_PORTABLE_RUNTIME)
7751 /* Pure portable runtime doesn't allow be/ble; we also don't
7752 have PIC support in the assembler/linker, so this sequence
7753 is needed. */
7755 /* Get the address of our target into %r1. */
7756 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7757 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7759 /* Get our return address into %r31. */
7760 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7761 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7763 /* Jump to our target address in %r1. */
7764 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7766 else if (!flag_pic)
7768 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7769 if (TARGET_PA_20)
7770 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7771 else
7772 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7774 else
7776 xoperands[1] = gen_rtx_REG (Pmode, 31);
7777 xoperands[2] = gen_rtx_REG (Pmode, 1);
7778 pa_output_pic_pcrel_sequence (xoperands);
7780 /* Adjust return address. */
7781 output_asm_insn ("ldo {16|24}(%%r31),%%r31", xoperands);
7783 /* Jump to our target address in %r1. */
7784 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7788 if (seq_length == 0)
7789 output_asm_insn ("nop", xoperands);
7791 return "";
7794 /* Return the attribute length of the call instruction INSN. The SIBCALL
7795 flag indicates whether INSN is a regular call or a sibling call. The
7796 length returned must be longer than the code actually generated by
7797 pa_output_call. Since branch shortening is done before delay branch
7798 sequencing, there is no way to determine whether or not the delay
7799 slot will be filled during branch shortening. Even when the delay
7800 slot is filled, we may have to add a nop if the delay slot contains
7801 a branch that can't reach its target. Thus, we always have to include
7802 the delay slot in the length estimate. This used to be done in
7803 pa_adjust_insn_length but we do it here now as some sequences always
7804 fill the delay slot and we can save four bytes in the estimate for
7805 these sequences. */
7808 pa_attr_length_call (rtx_insn *insn, int sibcall)
7810 int local_call;
7811 rtx call, call_dest;
7812 tree call_decl;
7813 int length = 0;
7814 rtx pat = PATTERN (insn);
7815 unsigned long distance = -1;
7817 gcc_assert (CALL_P (insn));
7819 if (INSN_ADDRESSES_SET_P ())
7821 unsigned long total;
7823 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7824 distance = (total + insn_current_reference_address (insn));
7825 if (distance < total)
7826 distance = -1;
7829 gcc_assert (GET_CODE (pat) == PARALLEL);
7831 /* Get the call rtx. */
7832 call = XVECEXP (pat, 0, 0);
7833 if (GET_CODE (call) == SET)
7834 call = SET_SRC (call);
7836 gcc_assert (GET_CODE (call) == CALL);
7838 /* Determine if this is a local call. */
7839 call_dest = XEXP (XEXP (call, 0), 0);
7840 call_decl = SYMBOL_REF_DECL (call_dest);
7841 local_call = call_decl && targetm.binds_local_p (call_decl);
7843 /* pc-relative branch. */
7844 if (!TARGET_LONG_CALLS
7845 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7846 || distance < MAX_PCREL17F_OFFSET))
7847 length += 8;
7849 /* 64-bit plabel sequence. */
7850 else if (TARGET_64BIT && !local_call)
7851 length += sibcall ? 28 : 24;
7853 /* non-pic long absolute branch sequence. */
7854 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7855 length += 12;
7857 /* long pc-relative branch sequence. */
7858 else if (TARGET_LONG_PIC_SDIFF_CALL
7859 || (TARGET_GAS && !TARGET_SOM && local_call))
7861 length += 20;
7863 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7864 length += 8;
7867 /* 32-bit plabel sequence. */
7868 else
7870 length += 32;
7872 if (TARGET_SOM)
7873 length += length_fp_args (insn);
7875 if (flag_pic)
7876 length += 4;
7878 if (!TARGET_PA_20)
7880 if (!sibcall)
7881 length += 8;
7883 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7884 length += 8;
7888 return length;
7891 /* INSN is a function call.
7893 CALL_DEST is the routine we are calling. */
7895 const char *
7896 pa_output_call (rtx_insn *insn, rtx call_dest, int sibcall)
7898 int seq_length = dbr_sequence_length ();
7899 tree call_decl = SYMBOL_REF_DECL (call_dest);
7900 int local_call = call_decl && targetm.binds_local_p (call_decl);
7901 rtx xoperands[4];
7903 xoperands[0] = call_dest;
7905 /* Handle the common case where we're sure that the branch will reach
7906 the beginning of the "$CODE$" subspace. This is the beginning of
7907 the current function if we are in a named section. */
7908 if (!TARGET_LONG_CALLS && pa_attr_length_call (insn, sibcall) == 8)
7910 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7911 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7913 else
7915 if (TARGET_64BIT && !local_call)
7917 /* ??? As far as I can tell, the HP linker doesn't support the
7918 long pc-relative sequence described in the 64-bit runtime
7919 architecture. So, we use a slightly longer indirect call. */
7920 xoperands[0] = pa_get_deferred_plabel (call_dest);
7921 xoperands[1] = gen_label_rtx ();
7923 /* If this isn't a sibcall, we put the load of %r27 into the
7924 delay slot. We can't do this in a sibcall as we don't
7925 have a second call-clobbered scratch register available.
7926 We don't need to do anything when generating fast indirect
7927 calls. */
7928 if (seq_length != 0 && !sibcall)
7930 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7931 optimize, 0, NULL);
7933 /* Now delete the delay insn. */
7934 SET_INSN_DELETED (NEXT_INSN (insn));
7935 seq_length = 0;
7938 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7939 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7940 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7942 if (sibcall)
7944 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7945 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7946 output_asm_insn ("bve (%%r1)", xoperands);
7948 else
7950 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7951 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7952 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7953 seq_length = 1;
7956 else
7958 int indirect_call = 0;
7960 /* Emit a long call. There are several different sequences
7961 of increasing length and complexity. In most cases,
7962 they don't allow an instruction in the delay slot. */
7963 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7964 && !TARGET_LONG_PIC_SDIFF_CALL
7965 && !(TARGET_GAS && !TARGET_SOM && local_call)
7966 && !TARGET_64BIT)
7967 indirect_call = 1;
7969 if (seq_length != 0
7970 && !sibcall
7971 && (!TARGET_PA_20
7972 || indirect_call
7973 || ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)))
7975 /* A non-jump insn in the delay slot. By definition we can
7976 emit this insn before the call (and in fact before argument
7977 relocating. */
7978 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7979 NULL);
7981 /* Now delete the delay insn. */
7982 SET_INSN_DELETED (NEXT_INSN (insn));
7983 seq_length = 0;
7986 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7988 /* This is the best sequence for making long calls in
7989 non-pic code. Unfortunately, GNU ld doesn't provide
7990 the stub needed for external calls, and GAS's support
7991 for this with the SOM linker is buggy. It is safe
7992 to use this for local calls. */
7993 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7994 if (sibcall)
7995 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7996 else
7998 if (TARGET_PA_20)
7999 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
8000 xoperands);
8001 else
8002 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
8004 output_asm_insn ("copy %%r31,%%r2", xoperands);
8005 seq_length = 1;
8008 else
8010 /* The HP assembler and linker can handle relocations for
8011 the difference of two symbols. The HP assembler
8012 recognizes the sequence as a pc-relative call and
8013 the linker provides stubs when needed. */
8015 /* GAS currently can't generate the relocations that
8016 are needed for the SOM linker under HP-UX using this
8017 sequence. The GNU linker doesn't generate the stubs
8018 that are needed for external calls on TARGET_ELF32
8019 with this sequence. For now, we have to use a longer
8020 plabel sequence when using GAS for non local calls. */
8021 if (TARGET_LONG_PIC_SDIFF_CALL
8022 || (TARGET_GAS && !TARGET_SOM && local_call))
8024 xoperands[1] = gen_rtx_REG (Pmode, 1);
8025 xoperands[2] = xoperands[1];
8026 pa_output_pic_pcrel_sequence (xoperands);
8028 else
8030 /* Emit a long plabel-based call sequence. This is
8031 essentially an inline implementation of $$dyncall.
8032 We don't actually try to call $$dyncall as this is
8033 as difficult as calling the function itself. */
8034 xoperands[0] = pa_get_deferred_plabel (call_dest);
8035 xoperands[1] = gen_label_rtx ();
8037 /* Since the call is indirect, FP arguments in registers
8038 need to be copied to the general registers. Then, the
8039 argument relocation stub will copy them back. */
8040 if (TARGET_SOM)
8041 copy_fp_args (insn);
8043 if (flag_pic)
8045 output_asm_insn ("addil LT'%0,%%r19", xoperands);
8046 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
8047 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
8049 else
8051 output_asm_insn ("addil LR'%0-$global$,%%r27",
8052 xoperands);
8053 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
8054 xoperands);
8057 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
8058 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
8059 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
8060 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
8062 if (!sibcall && !TARGET_PA_20)
8064 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
8065 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8066 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
8067 else
8068 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
8072 if (TARGET_PA_20)
8074 if (sibcall)
8075 output_asm_insn ("bve (%%r1)", xoperands);
8076 else
8078 if (indirect_call)
8080 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
8081 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
8082 seq_length = 1;
8084 else
8085 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
8088 else
8090 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
8091 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
8092 xoperands);
8094 if (sibcall)
8096 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8097 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
8098 else
8099 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
8101 else
8103 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8104 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
8105 else
8106 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
8108 if (indirect_call)
8109 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
8110 else
8111 output_asm_insn ("copy %%r31,%%r2", xoperands);
8112 seq_length = 1;
8119 if (seq_length == 0)
8120 output_asm_insn ("nop", xoperands);
8122 return "";
8125 /* Return the attribute length of the indirect call instruction INSN.
8126 The length must match the code generated by output_indirect call.
8127 The returned length includes the delay slot. Currently, the delay
8128 slot of an indirect call sequence is not exposed and it is used by
8129 the sequence itself. */
8132 pa_attr_length_indirect_call (rtx_insn *insn)
8134 unsigned long distance = -1;
8135 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
8137 if (INSN_ADDRESSES_SET_P ())
8139 distance = (total + insn_current_reference_address (insn));
8140 if (distance < total)
8141 distance = -1;
8144 if (TARGET_64BIT)
8145 return 12;
8147 if (TARGET_FAST_INDIRECT_CALLS)
8148 return 8;
8150 if (TARGET_PORTABLE_RUNTIME)
8151 return 16;
8153 /* Inline version of $$dyncall. */
8154 if ((TARGET_NO_SPACE_REGS || TARGET_PA_20) && !optimize_size)
8155 return 20;
8157 if (!TARGET_LONG_CALLS
8158 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
8159 || distance < MAX_PCREL17F_OFFSET))
8160 return 8;
8162 /* Out of reach, can use ble. */
8163 if (!flag_pic)
8164 return 12;
8166 /* Inline version of $$dyncall. */
8167 if (TARGET_NO_SPACE_REGS || TARGET_PA_20)
8168 return 20;
8170 if (!optimize_size)
8171 return 36;
8173 /* Long PIC pc-relative call. */
8174 return 20;
8177 const char *
8178 pa_output_indirect_call (rtx_insn *insn, rtx call_dest)
8180 rtx xoperands[4];
8181 int length;
8183 if (TARGET_64BIT)
8185 xoperands[0] = call_dest;
8186 output_asm_insn ("ldd 16(%0),%%r2\n\t"
8187 "bve,l (%%r2),%%r2\n\t"
8188 "ldd 24(%0),%%r27", xoperands);
8189 return "";
8192 /* First the special case for kernels, level 0 systems, etc. */
8193 if (TARGET_FAST_INDIRECT_CALLS)
8195 pa_output_arg_descriptor (insn);
8196 if (TARGET_PA_20)
8197 return "bve,l,n (%%r22),%%r2\n\tnop";
8198 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8201 if (TARGET_PORTABLE_RUNTIME)
8203 output_asm_insn ("ldil L'$$dyncall,%%r31\n\t"
8204 "ldo R'$$dyncall(%%r31),%%r31", xoperands);
8205 pa_output_arg_descriptor (insn);
8206 return "blr %%r0,%%r2\n\tbv,n %%r0(%%r31)";
8209 /* Maybe emit a fast inline version of $$dyncall. */
8210 if ((TARGET_NO_SPACE_REGS || TARGET_PA_20) && !optimize_size)
8212 output_asm_insn ("bb,>=,n %%r22,30,.+12\n\t"
8213 "ldw 2(%%r22),%%r19\n\t"
8214 "ldw -2(%%r22),%%r22", xoperands);
8215 pa_output_arg_descriptor (insn);
8216 if (TARGET_NO_SPACE_REGS)
8218 if (TARGET_PA_20)
8219 return "bve,l,n (%%r22),%%r2\n\tnop";
8220 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8222 return "bve,l (%%r22),%%r2\n\tstw %%r2,-24(%%sp)";
8225 /* Now the normal case -- we can reach $$dyncall directly or
8226 we're sure that we can get there via a long-branch stub.
8228 No need to check target flags as the length uniquely identifies
8229 the remaining cases. */
8230 length = pa_attr_length_indirect_call (insn);
8231 if (length == 8)
8233 pa_output_arg_descriptor (insn);
8235 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
8236 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
8237 variant of the B,L instruction can't be used on the SOM target. */
8238 if (TARGET_PA_20 && !TARGET_SOM)
8239 return "b,l,n $$dyncall,%%r2\n\tnop";
8240 else
8241 return "bl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
8244 /* Long millicode call, but we are not generating PIC or portable runtime
8245 code. */
8246 if (length == 12)
8248 output_asm_insn ("ldil L'$$dyncall,%%r2", xoperands);
8249 pa_output_arg_descriptor (insn);
8250 return "ble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
8253 /* Maybe emit a fast inline version of $$dyncall. The long PIC
8254 pc-relative call sequence is five instructions. The inline PA 2.0
8255 version of $$dyncall is also five instructions. The PA 1.X versions
8256 are longer but still an overall win. */
8257 if (TARGET_NO_SPACE_REGS || TARGET_PA_20 || !optimize_size)
8259 output_asm_insn ("bb,>=,n %%r22,30,.+12\n\t"
8260 "ldw 2(%%r22),%%r19\n\t"
8261 "ldw -2(%%r22),%%r22", xoperands);
8262 if (TARGET_NO_SPACE_REGS)
8264 pa_output_arg_descriptor (insn);
8265 if (TARGET_PA_20)
8266 return "bve,l,n (%%r22),%%r2\n\tnop";
8267 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8269 if (TARGET_PA_20)
8271 pa_output_arg_descriptor (insn);
8272 return "bve,l (%%r22),%%r2\n\tstw %%r2,-24(%%sp)";
8274 output_asm_insn ("bl .+8,%%r2\n\t"
8275 "ldo 16(%%r2),%%r2\n\t"
8276 "ldsid (%%r22),%%r1\n\t"
8277 "mtsp %%r1,%%sr0", xoperands);
8278 pa_output_arg_descriptor (insn);
8279 return "be 0(%%sr0,%%r22)\n\tstw %%r2,-24(%%sp)";
8282 /* We need a long PIC call to $$dyncall. */
8283 xoperands[0] = gen_rtx_SYMBOL_REF (Pmode, "$$dyncall");
8284 xoperands[1] = gen_rtx_REG (Pmode, 2);
8285 xoperands[2] = gen_rtx_REG (Pmode, 1);
8286 pa_output_pic_pcrel_sequence (xoperands);
8287 pa_output_arg_descriptor (insn);
8288 return "bv %%r0(%%r1)\n\tldo {12|20}(%%r2),%%r2";
8291 /* In HPUX 8.0's shared library scheme, special relocations are needed
8292 for function labels if they might be passed to a function
8293 in a shared library (because shared libraries don't live in code
8294 space), and special magic is needed to construct their address. */
8296 void
8297 pa_encode_label (rtx sym)
8299 const char *str = XSTR (sym, 0);
8300 int len = strlen (str) + 1;
8301 char *newstr, *p;
8303 p = newstr = XALLOCAVEC (char, len + 1);
8304 *p++ = '@';
8305 strcpy (p, str);
8307 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
8310 static void
8311 pa_encode_section_info (tree decl, rtx rtl, int first)
8313 int old_referenced = 0;
8315 if (!first && MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
8316 old_referenced
8317 = SYMBOL_REF_FLAGS (XEXP (rtl, 0)) & SYMBOL_FLAG_REFERENCED;
8319 default_encode_section_info (decl, rtl, first);
8321 if (first && TEXT_SPACE_P (decl))
8323 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
8324 if (TREE_CODE (decl) == FUNCTION_DECL)
8325 pa_encode_label (XEXP (rtl, 0));
8327 else if (old_referenced)
8328 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= old_referenced;
8331 /* This is sort of inverse to pa_encode_section_info. */
8333 static const char *
8334 pa_strip_name_encoding (const char *str)
8336 str += (*str == '@');
8337 str += (*str == '*');
8338 return str;
8341 /* Returns 1 if OP is a function label involved in a simple addition
8342 with a constant. Used to keep certain patterns from matching
8343 during instruction combination. */
8345 pa_is_function_label_plus_const (rtx op)
8347 /* Strip off any CONST. */
8348 if (GET_CODE (op) == CONST)
8349 op = XEXP (op, 0);
8351 return (GET_CODE (op) == PLUS
8352 && function_label_operand (XEXP (op, 0), VOIDmode)
8353 && GET_CODE (XEXP (op, 1)) == CONST_INT);
8356 /* Output assembly code for a thunk to FUNCTION. */
8358 static void
8359 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
8360 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
8361 tree function)
8363 const char *fnname = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk_fndecl));
8364 static unsigned int current_thunk_number;
8365 int val_14 = VAL_14_BITS_P (delta);
8366 unsigned int old_last_address = last_address, nbytes = 0;
8367 char label[17];
8368 rtx xoperands[4];
8370 xoperands[0] = XEXP (DECL_RTL (function), 0);
8371 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
8372 xoperands[2] = GEN_INT (delta);
8374 assemble_start_function (thunk_fndecl, fnname);
8375 final_start_function (emit_barrier (), file, 1);
8377 /* Output the thunk. We know that the function is in the same
8378 translation unit (i.e., the same space) as the thunk, and that
8379 thunks are output after their method. Thus, we don't need an
8380 external branch to reach the function. With SOM and GAS,
8381 functions and thunks are effectively in different sections.
8382 Thus, we can always use a IA-relative branch and the linker
8383 will add a long branch stub if necessary.
8385 However, we have to be careful when generating PIC code on the
8386 SOM port to ensure that the sequence does not transfer to an
8387 import stub for the target function as this could clobber the
8388 return value saved at SP-24. This would also apply to the
8389 32-bit linux port if the multi-space model is implemented. */
8390 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8391 && !(flag_pic && TREE_PUBLIC (function))
8392 && (TARGET_GAS || last_address < 262132))
8393 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8394 && ((targetm_common.have_named_sections
8395 && DECL_SECTION_NAME (thunk_fndecl) != NULL
8396 /* The GNU 64-bit linker has rather poor stub management.
8397 So, we use a long branch from thunks that aren't in
8398 the same section as the target function. */
8399 && ((!TARGET_64BIT
8400 && (DECL_SECTION_NAME (thunk_fndecl)
8401 != DECL_SECTION_NAME (function)))
8402 || ((DECL_SECTION_NAME (thunk_fndecl)
8403 == DECL_SECTION_NAME (function))
8404 && last_address < 262132)))
8405 /* In this case, we need to be able to reach the start of
8406 the stub table even though the function is likely closer
8407 and can be jumped to directly. */
8408 || (targetm_common.have_named_sections
8409 && DECL_SECTION_NAME (thunk_fndecl) == NULL
8410 && DECL_SECTION_NAME (function) == NULL
8411 && total_code_bytes < MAX_PCREL17F_OFFSET)
8412 /* Likewise. */
8413 || (!targetm_common.have_named_sections
8414 && total_code_bytes < MAX_PCREL17F_OFFSET))))
8416 if (!val_14)
8417 output_asm_insn ("addil L'%2,%%r26", xoperands);
8419 output_asm_insn ("b %0", xoperands);
8421 if (val_14)
8423 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8424 nbytes += 8;
8426 else
8428 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8429 nbytes += 12;
8432 else if (TARGET_64BIT)
8434 rtx xop[4];
8436 /* We only have one call-clobbered scratch register, so we can't
8437 make use of the delay slot if delta doesn't fit in 14 bits. */
8438 if (!val_14)
8440 output_asm_insn ("addil L'%2,%%r26", xoperands);
8441 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8444 /* Load function address into %r1. */
8445 xop[0] = xoperands[0];
8446 xop[1] = gen_rtx_REG (Pmode, 1);
8447 xop[2] = xop[1];
8448 pa_output_pic_pcrel_sequence (xop);
8450 if (val_14)
8452 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8453 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8454 nbytes += 20;
8456 else
8458 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
8459 nbytes += 24;
8462 else if (TARGET_PORTABLE_RUNTIME)
8464 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8465 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
8467 if (!val_14)
8468 output_asm_insn ("ldil L'%2,%%r26", xoperands);
8470 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8472 if (val_14)
8474 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8475 nbytes += 16;
8477 else
8479 output_asm_insn ("ldo R'%2(%%r26),%%r26", xoperands);
8480 nbytes += 20;
8483 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8485 /* The function is accessible from outside this module. The only
8486 way to avoid an import stub between the thunk and function is to
8487 call the function directly with an indirect sequence similar to
8488 that used by $$dyncall. This is possible because $$dyncall acts
8489 as the import stub in an indirect call. */
8490 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
8491 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8492 output_asm_insn ("addil LT'%3,%%r19", xoperands);
8493 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
8494 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8495 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
8496 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
8497 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
8498 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8500 if (!val_14)
8502 output_asm_insn ("addil L'%2,%%r26", xoperands);
8503 nbytes += 4;
8506 if (TARGET_PA_20)
8508 output_asm_insn ("bve (%%r22)", xoperands);
8509 nbytes += 36;
8511 else if (TARGET_NO_SPACE_REGS)
8513 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
8514 nbytes += 36;
8516 else
8518 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
8519 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
8520 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
8521 nbytes += 44;
8524 if (val_14)
8525 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8526 else
8527 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8529 else if (flag_pic)
8531 rtx xop[4];
8533 /* Load function address into %r22. */
8534 xop[0] = xoperands[0];
8535 xop[1] = gen_rtx_REG (Pmode, 1);
8536 xop[2] = gen_rtx_REG (Pmode, 22);
8537 pa_output_pic_pcrel_sequence (xop);
8539 if (!val_14)
8540 output_asm_insn ("addil L'%2,%%r26", xoperands);
8542 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8544 if (val_14)
8546 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8547 nbytes += 20;
8549 else
8551 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8552 nbytes += 24;
8555 else
8557 if (!val_14)
8558 output_asm_insn ("addil L'%2,%%r26", xoperands);
8560 output_asm_insn ("ldil L'%0,%%r22", xoperands);
8561 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
8563 if (val_14)
8565 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8566 nbytes += 12;
8568 else
8570 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8571 nbytes += 16;
8575 final_end_function ();
8577 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8579 switch_to_section (data_section);
8580 output_asm_insn (".align 4", xoperands);
8581 ASM_OUTPUT_LABEL (file, label);
8582 output_asm_insn (".word P'%0", xoperands);
8585 current_thunk_number++;
8586 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8587 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8588 last_address += nbytes;
8589 if (old_last_address > last_address)
8590 last_address = UINT_MAX;
8591 update_total_code_bytes (nbytes);
8592 assemble_end_function (thunk_fndecl, fnname);
8595 /* Only direct calls to static functions are allowed to be sibling (tail)
8596 call optimized.
8598 This restriction is necessary because some linker generated stubs will
8599 store return pointers into rp' in some cases which might clobber a
8600 live value already in rp'.
8602 In a sibcall the current function and the target function share stack
8603 space. Thus if the path to the current function and the path to the
8604 target function save a value in rp', they save the value into the
8605 same stack slot, which has undesirable consequences.
8607 Because of the deferred binding nature of shared libraries any function
8608 with external scope could be in a different load module and thus require
8609 rp' to be saved when calling that function. So sibcall optimizations
8610 can only be safe for static function.
8612 Note that GCC never needs return value relocations, so we don't have to
8613 worry about static calls with return value relocations (which require
8614 saving rp').
8616 It is safe to perform a sibcall optimization when the target function
8617 will never return. */
8618 static bool
8619 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8621 /* Sibcalls are not ok because the arg pointer register is not a fixed
8622 register. This prevents the sibcall optimization from occurring. In
8623 addition, there are problems with stub placement using GNU ld. This
8624 is because a normal sibcall branch uses a 17-bit relocation while
8625 a regular call branch uses a 22-bit relocation. As a result, more
8626 care needs to be taken in the placement of long-branch stubs. */
8627 if (TARGET_64BIT)
8628 return false;
8630 if (TARGET_PORTABLE_RUNTIME)
8631 return false;
8633 /* Sibcalls are only ok within a translation unit. */
8634 return decl && targetm.binds_local_p (decl);
8637 /* ??? Addition is not commutative on the PA due to the weird implicit
8638 space register selection rules for memory addresses. Therefore, we
8639 don't consider a + b == b + a, as this might be inside a MEM. */
8640 static bool
8641 pa_commutative_p (const_rtx x, int outer_code)
8643 return (COMMUTATIVE_P (x)
8644 && (TARGET_NO_SPACE_REGS
8645 || (outer_code != UNKNOWN && outer_code != MEM)
8646 || GET_CODE (x) != PLUS));
8649 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8650 use in fmpyadd instructions. */
8652 pa_fmpyaddoperands (rtx *operands)
8654 machine_mode mode = GET_MODE (operands[0]);
8656 /* Must be a floating point mode. */
8657 if (mode != SFmode && mode != DFmode)
8658 return 0;
8660 /* All modes must be the same. */
8661 if (! (mode == GET_MODE (operands[1])
8662 && mode == GET_MODE (operands[2])
8663 && mode == GET_MODE (operands[3])
8664 && mode == GET_MODE (operands[4])
8665 && mode == GET_MODE (operands[5])))
8666 return 0;
8668 /* All operands must be registers. */
8669 if (! (GET_CODE (operands[1]) == REG
8670 && GET_CODE (operands[2]) == REG
8671 && GET_CODE (operands[3]) == REG
8672 && GET_CODE (operands[4]) == REG
8673 && GET_CODE (operands[5]) == REG))
8674 return 0;
8676 /* Only 2 real operands to the addition. One of the input operands must
8677 be the same as the output operand. */
8678 if (! rtx_equal_p (operands[3], operands[4])
8679 && ! rtx_equal_p (operands[3], operands[5]))
8680 return 0;
8682 /* Inout operand of add cannot conflict with any operands from multiply. */
8683 if (rtx_equal_p (operands[3], operands[0])
8684 || rtx_equal_p (operands[3], operands[1])
8685 || rtx_equal_p (operands[3], operands[2]))
8686 return 0;
8688 /* multiply cannot feed into addition operands. */
8689 if (rtx_equal_p (operands[4], operands[0])
8690 || rtx_equal_p (operands[5], operands[0]))
8691 return 0;
8693 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8694 if (mode == SFmode
8695 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8696 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8697 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8698 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8699 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8700 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8701 return 0;
8703 /* Passed. Operands are suitable for fmpyadd. */
8704 return 1;
8707 #if !defined(USE_COLLECT2)
8708 static void
8709 pa_asm_out_constructor (rtx symbol, int priority)
8711 if (!function_label_operand (symbol, VOIDmode))
8712 pa_encode_label (symbol);
8714 #ifdef CTORS_SECTION_ASM_OP
8715 default_ctor_section_asm_out_constructor (symbol, priority);
8716 #else
8717 # ifdef TARGET_ASM_NAMED_SECTION
8718 default_named_section_asm_out_constructor (symbol, priority);
8719 # else
8720 default_stabs_asm_out_constructor (symbol, priority);
8721 # endif
8722 #endif
8725 static void
8726 pa_asm_out_destructor (rtx symbol, int priority)
8728 if (!function_label_operand (symbol, VOIDmode))
8729 pa_encode_label (symbol);
8731 #ifdef DTORS_SECTION_ASM_OP
8732 default_dtor_section_asm_out_destructor (symbol, priority);
8733 #else
8734 # ifdef TARGET_ASM_NAMED_SECTION
8735 default_named_section_asm_out_destructor (symbol, priority);
8736 # else
8737 default_stabs_asm_out_destructor (symbol, priority);
8738 # endif
8739 #endif
8741 #endif
8743 /* This function places uninitialized global data in the bss section.
8744 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8745 function on the SOM port to prevent uninitialized global data from
8746 being placed in the data section. */
8748 void
8749 pa_asm_output_aligned_bss (FILE *stream,
8750 const char *name,
8751 unsigned HOST_WIDE_INT size,
8752 unsigned int align)
8754 switch_to_section (bss_section);
8755 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8757 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8758 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8759 #endif
8761 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8762 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8763 #endif
8765 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8766 ASM_OUTPUT_LABEL (stream, name);
8767 fprintf (stream, "\t.block " HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8770 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8771 that doesn't allow the alignment of global common storage to be directly
8772 specified. The SOM linker aligns common storage based on the rounded
8773 value of the NUM_BYTES parameter in the .comm directive. It's not
8774 possible to use the .align directive as it doesn't affect the alignment
8775 of the label associated with a .comm directive. */
8777 void
8778 pa_asm_output_aligned_common (FILE *stream,
8779 const char *name,
8780 unsigned HOST_WIDE_INT size,
8781 unsigned int align)
8783 unsigned int max_common_align;
8785 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8786 if (align > max_common_align)
8788 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8789 "for global common data. Using %u",
8790 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8791 align = max_common_align;
8794 switch_to_section (bss_section);
8796 assemble_name (stream, name);
8797 fprintf (stream, "\t.comm " HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8798 MAX (size, align / BITS_PER_UNIT));
8801 /* We can't use .comm for local common storage as the SOM linker effectively
8802 treats the symbol as universal and uses the same storage for local symbols
8803 with the same name in different object files. The .block directive
8804 reserves an uninitialized block of storage. However, it's not common
8805 storage. Fortunately, GCC never requests common storage with the same
8806 name in any given translation unit. */
8808 void
8809 pa_asm_output_aligned_local (FILE *stream,
8810 const char *name,
8811 unsigned HOST_WIDE_INT size,
8812 unsigned int align)
8814 switch_to_section (bss_section);
8815 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8817 #ifdef LOCAL_ASM_OP
8818 fprintf (stream, "%s", LOCAL_ASM_OP);
8819 assemble_name (stream, name);
8820 fprintf (stream, "\n");
8821 #endif
8823 ASM_OUTPUT_LABEL (stream, name);
8824 fprintf (stream, "\t.block " HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8827 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8828 use in fmpysub instructions. */
8830 pa_fmpysuboperands (rtx *operands)
8832 machine_mode mode = GET_MODE (operands[0]);
8834 /* Must be a floating point mode. */
8835 if (mode != SFmode && mode != DFmode)
8836 return 0;
8838 /* All modes must be the same. */
8839 if (! (mode == GET_MODE (operands[1])
8840 && mode == GET_MODE (operands[2])
8841 && mode == GET_MODE (operands[3])
8842 && mode == GET_MODE (operands[4])
8843 && mode == GET_MODE (operands[5])))
8844 return 0;
8846 /* All operands must be registers. */
8847 if (! (GET_CODE (operands[1]) == REG
8848 && GET_CODE (operands[2]) == REG
8849 && GET_CODE (operands[3]) == REG
8850 && GET_CODE (operands[4]) == REG
8851 && GET_CODE (operands[5]) == REG))
8852 return 0;
8854 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8855 operation, so operands[4] must be the same as operand[3]. */
8856 if (! rtx_equal_p (operands[3], operands[4]))
8857 return 0;
8859 /* multiply cannot feed into subtraction. */
8860 if (rtx_equal_p (operands[5], operands[0]))
8861 return 0;
8863 /* Inout operand of sub cannot conflict with any operands from multiply. */
8864 if (rtx_equal_p (operands[3], operands[0])
8865 || rtx_equal_p (operands[3], operands[1])
8866 || rtx_equal_p (operands[3], operands[2]))
8867 return 0;
8869 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8870 if (mode == SFmode
8871 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8872 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8873 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8874 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8875 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8876 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8877 return 0;
8879 /* Passed. Operands are suitable for fmpysub. */
8880 return 1;
8883 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8884 constants for a MULT embedded inside a memory address. */
8886 pa_mem_shadd_constant_p (int val)
8888 if (val == 2 || val == 4 || val == 8)
8889 return 1;
8890 else
8891 return 0;
8894 /* Return 1 if the given constant is 1, 2, or 3. These are the valid
8895 constants for shadd instructions. */
8897 pa_shadd_constant_p (int val)
8899 if (val == 1 || val == 2 || val == 3)
8900 return 1;
8901 else
8902 return 0;
8905 /* Return TRUE if INSN branches forward. */
8907 static bool
8908 forward_branch_p (rtx_insn *insn)
8910 rtx lab = JUMP_LABEL (insn);
8912 /* The INSN must have a jump label. */
8913 gcc_assert (lab != NULL_RTX);
8915 if (INSN_ADDRESSES_SET_P ())
8916 return INSN_ADDRESSES (INSN_UID (lab)) > INSN_ADDRESSES (INSN_UID (insn));
8918 while (insn)
8920 if (insn == lab)
8921 return true;
8922 else
8923 insn = NEXT_INSN (insn);
8926 return false;
8929 /* Output an unconditional move and branch insn. */
8931 const char *
8932 pa_output_parallel_movb (rtx *operands, rtx_insn *insn)
8934 int length = get_attr_length (insn);
8936 /* These are the cases in which we win. */
8937 if (length == 4)
8938 return "mov%I1b,tr %1,%0,%2";
8940 /* None of the following cases win, but they don't lose either. */
8941 if (length == 8)
8943 if (dbr_sequence_length () == 0)
8945 /* Nothing in the delay slot, fake it by putting the combined
8946 insn (the copy or add) in the delay slot of a bl. */
8947 if (GET_CODE (operands[1]) == CONST_INT)
8948 return "b %2\n\tldi %1,%0";
8949 else
8950 return "b %2\n\tcopy %1,%0";
8952 else
8954 /* Something in the delay slot, but we've got a long branch. */
8955 if (GET_CODE (operands[1]) == CONST_INT)
8956 return "ldi %1,%0\n\tb %2";
8957 else
8958 return "copy %1,%0\n\tb %2";
8962 if (GET_CODE (operands[1]) == CONST_INT)
8963 output_asm_insn ("ldi %1,%0", operands);
8964 else
8965 output_asm_insn ("copy %1,%0", operands);
8966 return pa_output_lbranch (operands[2], insn, 1);
8969 /* Output an unconditional add and branch insn. */
8971 const char *
8972 pa_output_parallel_addb (rtx *operands, rtx_insn *insn)
8974 int length = get_attr_length (insn);
8976 /* To make life easy we want operand0 to be the shared input/output
8977 operand and operand1 to be the readonly operand. */
8978 if (operands[0] == operands[1])
8979 operands[1] = operands[2];
8981 /* These are the cases in which we win. */
8982 if (length == 4)
8983 return "add%I1b,tr %1,%0,%3";
8985 /* None of the following cases win, but they don't lose either. */
8986 if (length == 8)
8988 if (dbr_sequence_length () == 0)
8989 /* Nothing in the delay slot, fake it by putting the combined
8990 insn (the copy or add) in the delay slot of a bl. */
8991 return "b %3\n\tadd%I1 %1,%0,%0";
8992 else
8993 /* Something in the delay slot, but we've got a long branch. */
8994 return "add%I1 %1,%0,%0\n\tb %3";
8997 output_asm_insn ("add%I1 %1,%0,%0", operands);
8998 return pa_output_lbranch (operands[3], insn, 1);
9001 /* We use this hook to perform a PA specific optimization which is difficult
9002 to do in earlier passes. */
9004 static void
9005 pa_reorg (void)
9007 remove_useless_addtr_insns (1);
9009 if (pa_cpu < PROCESSOR_8000)
9010 pa_combine_instructions ();
9013 /* The PA has a number of odd instructions which can perform multiple
9014 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
9015 it may be profitable to combine two instructions into one instruction
9016 with two outputs. It's not profitable PA2.0 machines because the
9017 two outputs would take two slots in the reorder buffers.
9019 This routine finds instructions which can be combined and combines
9020 them. We only support some of the potential combinations, and we
9021 only try common ways to find suitable instructions.
9023 * addb can add two registers or a register and a small integer
9024 and jump to a nearby (+-8k) location. Normally the jump to the
9025 nearby location is conditional on the result of the add, but by
9026 using the "true" condition we can make the jump unconditional.
9027 Thus addb can perform two independent operations in one insn.
9029 * movb is similar to addb in that it can perform a reg->reg
9030 or small immediate->reg copy and jump to a nearby (+-8k location).
9032 * fmpyadd and fmpysub can perform a FP multiply and either an
9033 FP add or FP sub if the operands of the multiply and add/sub are
9034 independent (there are other minor restrictions). Note both
9035 the fmpy and fadd/fsub can in theory move to better spots according
9036 to data dependencies, but for now we require the fmpy stay at a
9037 fixed location.
9039 * Many of the memory operations can perform pre & post updates
9040 of index registers. GCC's pre/post increment/decrement addressing
9041 is far too simple to take advantage of all the possibilities. This
9042 pass may not be suitable since those insns may not be independent.
9044 * comclr can compare two ints or an int and a register, nullify
9045 the following instruction and zero some other register. This
9046 is more difficult to use as it's harder to find an insn which
9047 will generate a comclr than finding something like an unconditional
9048 branch. (conditional moves & long branches create comclr insns).
9050 * Most arithmetic operations can conditionally skip the next
9051 instruction. They can be viewed as "perform this operation
9052 and conditionally jump to this nearby location" (where nearby
9053 is an insns away). These are difficult to use due to the
9054 branch length restrictions. */
9056 static void
9057 pa_combine_instructions (void)
9059 rtx_insn *anchor;
9061 /* This can get expensive since the basic algorithm is on the
9062 order of O(n^2) (or worse). Only do it for -O2 or higher
9063 levels of optimization. */
9064 if (optimize < 2)
9065 return;
9067 /* Walk down the list of insns looking for "anchor" insns which
9068 may be combined with "floating" insns. As the name implies,
9069 "anchor" instructions don't move, while "floating" insns may
9070 move around. */
9071 rtx par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
9072 rtx_insn *new_rtx = make_insn_raw (par);
9074 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
9076 enum attr_pa_combine_type anchor_attr;
9077 enum attr_pa_combine_type floater_attr;
9079 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
9080 Also ignore any special USE insns. */
9081 if ((! NONJUMP_INSN_P (anchor) && ! JUMP_P (anchor) && ! CALL_P (anchor))
9082 || GET_CODE (PATTERN (anchor)) == USE
9083 || GET_CODE (PATTERN (anchor)) == CLOBBER)
9084 continue;
9086 anchor_attr = get_attr_pa_combine_type (anchor);
9087 /* See if anchor is an insn suitable for combination. */
9088 if (anchor_attr == PA_COMBINE_TYPE_FMPY
9089 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
9090 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9091 && ! forward_branch_p (anchor)))
9093 rtx_insn *floater;
9095 for (floater = PREV_INSN (anchor);
9096 floater;
9097 floater = PREV_INSN (floater))
9099 if (NOTE_P (floater)
9100 || (NONJUMP_INSN_P (floater)
9101 && (GET_CODE (PATTERN (floater)) == USE
9102 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9103 continue;
9105 /* Anything except a regular INSN will stop our search. */
9106 if (! NONJUMP_INSN_P (floater))
9108 floater = NULL;
9109 break;
9112 /* See if FLOATER is suitable for combination with the
9113 anchor. */
9114 floater_attr = get_attr_pa_combine_type (floater);
9115 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9116 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9117 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9118 && floater_attr == PA_COMBINE_TYPE_FMPY))
9120 /* If ANCHOR and FLOATER can be combined, then we're
9121 done with this pass. */
9122 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9123 SET_DEST (PATTERN (floater)),
9124 XEXP (SET_SRC (PATTERN (floater)), 0),
9125 XEXP (SET_SRC (PATTERN (floater)), 1)))
9126 break;
9129 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9130 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
9132 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
9134 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9135 SET_DEST (PATTERN (floater)),
9136 XEXP (SET_SRC (PATTERN (floater)), 0),
9137 XEXP (SET_SRC (PATTERN (floater)), 1)))
9138 break;
9140 else
9142 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9143 SET_DEST (PATTERN (floater)),
9144 SET_SRC (PATTERN (floater)),
9145 SET_SRC (PATTERN (floater))))
9146 break;
9151 /* If we didn't find anything on the backwards scan try forwards. */
9152 if (!floater
9153 && (anchor_attr == PA_COMBINE_TYPE_FMPY
9154 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
9156 for (floater = anchor; floater; floater = NEXT_INSN (floater))
9158 if (NOTE_P (floater)
9159 || (NONJUMP_INSN_P (floater)
9160 && (GET_CODE (PATTERN (floater)) == USE
9161 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9163 continue;
9165 /* Anything except a regular INSN will stop our search. */
9166 if (! NONJUMP_INSN_P (floater))
9168 floater = NULL;
9169 break;
9172 /* See if FLOATER is suitable for combination with the
9173 anchor. */
9174 floater_attr = get_attr_pa_combine_type (floater);
9175 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9176 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9177 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9178 && floater_attr == PA_COMBINE_TYPE_FMPY))
9180 /* If ANCHOR and FLOATER can be combined, then we're
9181 done with this pass. */
9182 if (pa_can_combine_p (new_rtx, anchor, floater, 1,
9183 SET_DEST (PATTERN (floater)),
9184 XEXP (SET_SRC (PATTERN (floater)),
9186 XEXP (SET_SRC (PATTERN (floater)),
9187 1)))
9188 break;
9193 /* FLOATER will be nonzero if we found a suitable floating
9194 insn for combination with ANCHOR. */
9195 if (floater
9196 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9197 || anchor_attr == PA_COMBINE_TYPE_FMPY))
9199 /* Emit the new instruction and delete the old anchor. */
9200 rtvec vtemp = gen_rtvec (2, copy_rtx (PATTERN (anchor)),
9201 copy_rtx (PATTERN (floater)));
9202 rtx temp = gen_rtx_PARALLEL (VOIDmode, vtemp);
9203 emit_insn_before (temp, anchor);
9205 SET_INSN_DELETED (anchor);
9207 /* Emit a special USE insn for FLOATER, then delete
9208 the floating insn. */
9209 temp = copy_rtx (PATTERN (floater));
9210 emit_insn_before (gen_rtx_USE (VOIDmode, temp), floater);
9211 delete_insn (floater);
9213 continue;
9215 else if (floater
9216 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
9218 /* Emit the new_jump instruction and delete the old anchor. */
9219 rtvec vtemp = gen_rtvec (2, copy_rtx (PATTERN (anchor)),
9220 copy_rtx (PATTERN (floater)));
9221 rtx temp = gen_rtx_PARALLEL (VOIDmode, vtemp);
9222 temp = emit_jump_insn_before (temp, anchor);
9224 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
9225 SET_INSN_DELETED (anchor);
9227 /* Emit a special USE insn for FLOATER, then delete
9228 the floating insn. */
9229 temp = copy_rtx (PATTERN (floater));
9230 emit_insn_before (gen_rtx_USE (VOIDmode, temp), floater);
9231 delete_insn (floater);
9232 continue;
9238 static int
9239 pa_can_combine_p (rtx_insn *new_rtx, rtx_insn *anchor, rtx_insn *floater,
9240 int reversed, rtx dest,
9241 rtx src1, rtx src2)
9243 int insn_code_number;
9244 rtx_insn *start, *end;
9246 /* Create a PARALLEL with the patterns of ANCHOR and
9247 FLOATER, try to recognize it, then test constraints
9248 for the resulting pattern.
9250 If the pattern doesn't match or the constraints
9251 aren't met keep searching for a suitable floater
9252 insn. */
9253 XVECEXP (PATTERN (new_rtx), 0, 0) = PATTERN (anchor);
9254 XVECEXP (PATTERN (new_rtx), 0, 1) = PATTERN (floater);
9255 INSN_CODE (new_rtx) = -1;
9256 insn_code_number = recog_memoized (new_rtx);
9257 basic_block bb = BLOCK_FOR_INSN (anchor);
9258 if (insn_code_number < 0
9259 || (extract_insn (new_rtx),
9260 !constrain_operands (1, get_preferred_alternatives (new_rtx, bb))))
9261 return 0;
9263 if (reversed)
9265 start = anchor;
9266 end = floater;
9268 else
9270 start = floater;
9271 end = anchor;
9274 /* There's up to three operands to consider. One
9275 output and two inputs.
9277 The output must not be used between FLOATER & ANCHOR
9278 exclusive. The inputs must not be set between
9279 FLOATER and ANCHOR exclusive. */
9281 if (reg_used_between_p (dest, start, end))
9282 return 0;
9284 if (reg_set_between_p (src1, start, end))
9285 return 0;
9287 if (reg_set_between_p (src2, start, end))
9288 return 0;
9290 /* If we get here, then everything is good. */
9291 return 1;
9294 /* Return nonzero if references for INSN are delayed.
9296 Millicode insns are actually function calls with some special
9297 constraints on arguments and register usage.
9299 Millicode calls always expect their arguments in the integer argument
9300 registers, and always return their result in %r29 (ret1). They
9301 are expected to clobber their arguments, %r1, %r29, and the return
9302 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9304 This function tells reorg that the references to arguments and
9305 millicode calls do not appear to happen until after the millicode call.
9306 This allows reorg to put insns which set the argument registers into the
9307 delay slot of the millicode call -- thus they act more like traditional
9308 CALL_INSNs.
9310 Note we cannot consider side effects of the insn to be delayed because
9311 the branch and link insn will clobber the return pointer. If we happened
9312 to use the return pointer in the delay slot of the call, then we lose.
9314 get_attr_type will try to recognize the given insn, so make sure to
9315 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9316 in particular. */
9318 pa_insn_refs_are_delayed (rtx_insn *insn)
9320 return ((NONJUMP_INSN_P (insn)
9321 && GET_CODE (PATTERN (insn)) != SEQUENCE
9322 && GET_CODE (PATTERN (insn)) != USE
9323 && GET_CODE (PATTERN (insn)) != CLOBBER
9324 && get_attr_type (insn) == TYPE_MILLI));
9327 /* Promote the return value, but not the arguments. */
9329 static machine_mode
9330 pa_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
9331 machine_mode mode,
9332 int *punsignedp ATTRIBUTE_UNUSED,
9333 const_tree fntype ATTRIBUTE_UNUSED,
9334 int for_return)
9336 if (for_return == 0)
9337 return mode;
9338 return promote_mode (type, mode, punsignedp);
9341 /* On the HP-PA the value is found in register(s) 28(-29), unless
9342 the mode is SF or DF. Then the value is returned in fr4 (32).
9344 This must perform the same promotions as PROMOTE_MODE, else promoting
9345 return values in TARGET_PROMOTE_FUNCTION_MODE will not work correctly.
9347 Small structures must be returned in a PARALLEL on PA64 in order
9348 to match the HP Compiler ABI. */
9350 static rtx
9351 pa_function_value (const_tree valtype,
9352 const_tree func ATTRIBUTE_UNUSED,
9353 bool outgoing ATTRIBUTE_UNUSED)
9355 machine_mode valmode;
9357 if (AGGREGATE_TYPE_P (valtype)
9358 || TREE_CODE (valtype) == COMPLEX_TYPE
9359 || TREE_CODE (valtype) == VECTOR_TYPE)
9361 HOST_WIDE_INT valsize = int_size_in_bytes (valtype);
9363 /* Handle aggregates that fit exactly in a word or double word. */
9364 if ((valsize & (UNITS_PER_WORD - 1)) == 0)
9365 return gen_rtx_REG (TYPE_MODE (valtype), 28);
9367 if (TARGET_64BIT)
9369 /* Aggregates with a size less than or equal to 128 bits are
9370 returned in GR 28(-29). They are left justified. The pad
9371 bits are undefined. Larger aggregates are returned in
9372 memory. */
9373 rtx loc[2];
9374 int i, offset = 0;
9375 int ub = valsize <= UNITS_PER_WORD ? 1 : 2;
9377 for (i = 0; i < ub; i++)
9379 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9380 gen_rtx_REG (DImode, 28 + i),
9381 GEN_INT (offset));
9382 offset += 8;
9385 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9387 else if (valsize > UNITS_PER_WORD)
9389 /* Aggregates 5 to 8 bytes in size are returned in general
9390 registers r28-r29 in the same manner as other non
9391 floating-point objects. The data is right-justified and
9392 zero-extended to 64 bits. This is opposite to the normal
9393 justification used on big endian targets and requires
9394 special treatment. */
9395 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9396 gen_rtx_REG (DImode, 28), const0_rtx);
9397 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9401 if ((INTEGRAL_TYPE_P (valtype)
9402 && GET_MODE_BITSIZE (TYPE_MODE (valtype)) < BITS_PER_WORD)
9403 || POINTER_TYPE_P (valtype))
9404 valmode = word_mode;
9405 else
9406 valmode = TYPE_MODE (valtype);
9408 if (TREE_CODE (valtype) == REAL_TYPE
9409 && !AGGREGATE_TYPE_P (valtype)
9410 && TYPE_MODE (valtype) != TFmode
9411 && !TARGET_SOFT_FLOAT)
9412 return gen_rtx_REG (valmode, 32);
9414 return gen_rtx_REG (valmode, 28);
9417 /* Implement the TARGET_LIBCALL_VALUE hook. */
9419 static rtx
9420 pa_libcall_value (machine_mode mode,
9421 const_rtx fun ATTRIBUTE_UNUSED)
9423 if (! TARGET_SOFT_FLOAT
9424 && (mode == SFmode || mode == DFmode))
9425 return gen_rtx_REG (mode, 32);
9426 else
9427 return gen_rtx_REG (mode, 28);
9430 /* Implement the TARGET_FUNCTION_VALUE_REGNO_P hook. */
9432 static bool
9433 pa_function_value_regno_p (const unsigned int regno)
9435 if (regno == 28
9436 || (! TARGET_SOFT_FLOAT && regno == 32))
9437 return true;
9439 return false;
9442 /* Update the data in CUM to advance over an argument
9443 of mode MODE and data type TYPE.
9444 (TYPE is null for libcalls where that information may not be available.) */
9446 static void
9447 pa_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
9448 const_tree type, bool named ATTRIBUTE_UNUSED)
9450 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9451 int arg_size = pa_function_arg_size (mode, type);
9453 cum->nargs_prototype--;
9454 cum->words += (arg_size
9455 + ((cum->words & 01)
9456 && type != NULL_TREE
9457 && arg_size > 1));
9460 /* Return the location of a parameter that is passed in a register or NULL
9461 if the parameter has any component that is passed in memory.
9463 This is new code and will be pushed to into the net sources after
9464 further testing.
9466 ??? We might want to restructure this so that it looks more like other
9467 ports. */
9468 static rtx
9469 pa_function_arg (cumulative_args_t cum_v, machine_mode mode,
9470 const_tree type, bool named ATTRIBUTE_UNUSED)
9472 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9473 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9474 int alignment = 0;
9475 int arg_size;
9476 int fpr_reg_base;
9477 int gpr_reg_base;
9478 rtx retval;
9480 if (mode == VOIDmode)
9481 return NULL_RTX;
9483 arg_size = pa_function_arg_size (mode, type);
9485 /* If this arg would be passed partially or totally on the stack, then
9486 this routine should return zero. pa_arg_partial_bytes will
9487 handle arguments which are split between regs and stack slots if
9488 the ABI mandates split arguments. */
9489 if (!TARGET_64BIT)
9491 /* The 32-bit ABI does not split arguments. */
9492 if (cum->words + arg_size > max_arg_words)
9493 return NULL_RTX;
9495 else
9497 if (arg_size > 1)
9498 alignment = cum->words & 1;
9499 if (cum->words + alignment >= max_arg_words)
9500 return NULL_RTX;
9503 /* The 32bit ABIs and the 64bit ABIs are rather different,
9504 particularly in their handling of FP registers. We might
9505 be able to cleverly share code between them, but I'm not
9506 going to bother in the hope that splitting them up results
9507 in code that is more easily understood. */
9509 if (TARGET_64BIT)
9511 /* Advance the base registers to their current locations.
9513 Remember, gprs grow towards smaller register numbers while
9514 fprs grow to higher register numbers. Also remember that
9515 although FP regs are 32-bit addressable, we pretend that
9516 the registers are 64-bits wide. */
9517 gpr_reg_base = 26 - cum->words;
9518 fpr_reg_base = 32 + cum->words;
9520 /* Arguments wider than one word and small aggregates need special
9521 treatment. */
9522 if (arg_size > 1
9523 || mode == BLKmode
9524 || (type && (AGGREGATE_TYPE_P (type)
9525 || TREE_CODE (type) == COMPLEX_TYPE
9526 || TREE_CODE (type) == VECTOR_TYPE)))
9528 /* Double-extended precision (80-bit), quad-precision (128-bit)
9529 and aggregates including complex numbers are aligned on
9530 128-bit boundaries. The first eight 64-bit argument slots
9531 are associated one-to-one, with general registers r26
9532 through r19, and also with floating-point registers fr4
9533 through fr11. Arguments larger than one word are always
9534 passed in general registers.
9536 Using a PARALLEL with a word mode register results in left
9537 justified data on a big-endian target. */
9539 rtx loc[8];
9540 int i, offset = 0, ub = arg_size;
9542 /* Align the base register. */
9543 gpr_reg_base -= alignment;
9545 ub = MIN (ub, max_arg_words - cum->words - alignment);
9546 for (i = 0; i < ub; i++)
9548 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9549 gen_rtx_REG (DImode, gpr_reg_base),
9550 GEN_INT (offset));
9551 gpr_reg_base -= 1;
9552 offset += 8;
9555 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9558 else
9560 /* If the argument is larger than a word, then we know precisely
9561 which registers we must use. */
9562 if (arg_size > 1)
9564 if (cum->words)
9566 gpr_reg_base = 23;
9567 fpr_reg_base = 38;
9569 else
9571 gpr_reg_base = 25;
9572 fpr_reg_base = 34;
9575 /* Structures 5 to 8 bytes in size are passed in the general
9576 registers in the same manner as other non floating-point
9577 objects. The data is right-justified and zero-extended
9578 to 64 bits. This is opposite to the normal justification
9579 used on big endian targets and requires special treatment.
9580 We now define BLOCK_REG_PADDING to pad these objects.
9581 Aggregates, complex and vector types are passed in the same
9582 manner as structures. */
9583 if (mode == BLKmode
9584 || (type && (AGGREGATE_TYPE_P (type)
9585 || TREE_CODE (type) == COMPLEX_TYPE
9586 || TREE_CODE (type) == VECTOR_TYPE)))
9588 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9589 gen_rtx_REG (DImode, gpr_reg_base),
9590 const0_rtx);
9591 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9594 else
9596 /* We have a single word (32 bits). A simple computation
9597 will get us the register #s we need. */
9598 gpr_reg_base = 26 - cum->words;
9599 fpr_reg_base = 32 + 2 * cum->words;
9603 /* Determine if the argument needs to be passed in both general and
9604 floating point registers. */
9605 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9606 /* If we are doing soft-float with portable runtime, then there
9607 is no need to worry about FP regs. */
9608 && !TARGET_SOFT_FLOAT
9609 /* The parameter must be some kind of scalar float, else we just
9610 pass it in integer registers. */
9611 && GET_MODE_CLASS (mode) == MODE_FLOAT
9612 /* The target function must not have a prototype. */
9613 && cum->nargs_prototype <= 0
9614 /* libcalls do not need to pass items in both FP and general
9615 registers. */
9616 && type != NULL_TREE
9617 /* All this hair applies to "outgoing" args only. This includes
9618 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9619 && !cum->incoming)
9620 /* Also pass outgoing floating arguments in both registers in indirect
9621 calls with the 32 bit ABI and the HP assembler since there is no
9622 way to the specify argument locations in static functions. */
9623 || (!TARGET_64BIT
9624 && !TARGET_GAS
9625 && !cum->incoming
9626 && cum->indirect
9627 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9629 retval
9630 = gen_rtx_PARALLEL
9631 (mode,
9632 gen_rtvec (2,
9633 gen_rtx_EXPR_LIST (VOIDmode,
9634 gen_rtx_REG (mode, fpr_reg_base),
9635 const0_rtx),
9636 gen_rtx_EXPR_LIST (VOIDmode,
9637 gen_rtx_REG (mode, gpr_reg_base),
9638 const0_rtx)));
9640 else
9642 /* See if we should pass this parameter in a general register. */
9643 if (TARGET_SOFT_FLOAT
9644 /* Indirect calls in the normal 32bit ABI require all arguments
9645 to be passed in general registers. */
9646 || (!TARGET_PORTABLE_RUNTIME
9647 && !TARGET_64BIT
9648 && !TARGET_ELF32
9649 && cum->indirect)
9650 /* If the parameter is not a scalar floating-point parameter,
9651 then it belongs in GPRs. */
9652 || GET_MODE_CLASS (mode) != MODE_FLOAT
9653 /* Structure with single SFmode field belongs in GPR. */
9654 || (type && AGGREGATE_TYPE_P (type)))
9655 retval = gen_rtx_REG (mode, gpr_reg_base);
9656 else
9657 retval = gen_rtx_REG (mode, fpr_reg_base);
9659 return retval;
9662 /* Arguments larger than one word are double word aligned. */
9664 static unsigned int
9665 pa_function_arg_boundary (machine_mode mode, const_tree type)
9667 bool singleword = (type
9668 ? (integer_zerop (TYPE_SIZE (type))
9669 || !TREE_CONSTANT (TYPE_SIZE (type))
9670 || int_size_in_bytes (type) <= UNITS_PER_WORD)
9671 : GET_MODE_SIZE (mode) <= UNITS_PER_WORD);
9673 return singleword ? PARM_BOUNDARY : MAX_PARM_BOUNDARY;
9676 /* If this arg would be passed totally in registers or totally on the stack,
9677 then this routine should return zero. */
9679 static int
9680 pa_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
9681 tree type, bool named ATTRIBUTE_UNUSED)
9683 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9684 unsigned int max_arg_words = 8;
9685 unsigned int offset = 0;
9687 if (!TARGET_64BIT)
9688 return 0;
9690 if (pa_function_arg_size (mode, type) > 1 && (cum->words & 1))
9691 offset = 1;
9693 if (cum->words + offset + pa_function_arg_size (mode, type) <= max_arg_words)
9694 /* Arg fits fully into registers. */
9695 return 0;
9696 else if (cum->words + offset >= max_arg_words)
9697 /* Arg fully on the stack. */
9698 return 0;
9699 else
9700 /* Arg is split. */
9701 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9705 /* A get_unnamed_section callback for switching to the text section.
9707 This function is only used with SOM. Because we don't support
9708 named subspaces, we can only create a new subspace or switch back
9709 to the default text subspace. */
9711 static void
9712 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9714 gcc_assert (TARGET_SOM);
9715 if (TARGET_GAS)
9717 if (cfun && cfun->machine && !cfun->machine->in_nsubspa)
9719 /* We only want to emit a .nsubspa directive once at the
9720 start of the function. */
9721 cfun->machine->in_nsubspa = 1;
9723 /* Create a new subspace for the text. This provides
9724 better stub placement and one-only functions. */
9725 if (cfun->decl
9726 && DECL_ONE_ONLY (cfun->decl)
9727 && !DECL_WEAK (cfun->decl))
9729 output_section_asm_op ("\t.SPACE $TEXT$\n"
9730 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9731 "ACCESS=44,SORT=24,COMDAT");
9732 return;
9735 else
9737 /* There isn't a current function or the body of the current
9738 function has been completed. So, we are changing to the
9739 text section to output debugging information. Thus, we
9740 need to forget that we are in the text section so that
9741 varasm.c will call us when text_section is selected again. */
9742 gcc_assert (!cfun || !cfun->machine
9743 || cfun->machine->in_nsubspa == 2);
9744 in_section = NULL;
9746 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9747 return;
9749 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9752 /* A get_unnamed_section callback for switching to comdat data
9753 sections. This function is only used with SOM. */
9755 static void
9756 som_output_comdat_data_section_asm_op (const void *data)
9758 in_section = NULL;
9759 output_section_asm_op (data);
9762 /* Implement TARGET_ASM_INIT_SECTIONS. */
9764 static void
9765 pa_som_asm_init_sections (void)
9767 text_section
9768 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9770 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9771 is not being generated. */
9772 som_readonly_data_section
9773 = get_unnamed_section (0, output_section_asm_op,
9774 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9776 /* When secondary definitions are not supported, SOM makes readonly
9777 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9778 the comdat flag. */
9779 som_one_only_readonly_data_section
9780 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9781 "\t.SPACE $TEXT$\n"
9782 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9783 "ACCESS=0x2c,SORT=16,COMDAT");
9786 /* When secondary definitions are not supported, SOM makes data one-only
9787 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9788 som_one_only_data_section
9789 = get_unnamed_section (SECTION_WRITE,
9790 som_output_comdat_data_section_asm_op,
9791 "\t.SPACE $PRIVATE$\n"
9792 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9793 "ACCESS=31,SORT=24,COMDAT");
9795 if (flag_tm)
9796 som_tm_clone_table_section
9797 = get_unnamed_section (0, output_section_asm_op,
9798 "\t.SPACE $PRIVATE$\n\t.SUBSPA $TM_CLONE_TABLE$");
9800 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9801 which reference data within the $TEXT$ space (for example constant
9802 strings in the $LIT$ subspace).
9804 The assemblers (GAS and HP as) both have problems with handling
9805 the difference of two symbols which is the other correct way to
9806 reference constant data during PIC code generation.
9808 So, there's no way to reference constant data which is in the
9809 $TEXT$ space during PIC generation. Instead place all constant
9810 data into the $PRIVATE$ subspace (this reduces sharing, but it
9811 works correctly). */
9812 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9814 /* We must not have a reference to an external symbol defined in a
9815 shared library in a readonly section, else the SOM linker will
9816 complain.
9818 So, we force exception information into the data section. */
9819 exception_section = data_section;
9822 /* Implement TARGET_ASM_TM_CLONE_TABLE_SECTION. */
9824 static section *
9825 pa_som_tm_clone_table_section (void)
9827 return som_tm_clone_table_section;
9830 /* On hpux10, the linker will give an error if we have a reference
9831 in the read-only data section to a symbol defined in a shared
9832 library. Therefore, expressions that might require a reloc
9833 cannot be placed in the read-only data section. */
9835 static section *
9836 pa_select_section (tree exp, int reloc,
9837 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9839 if (TREE_CODE (exp) == VAR_DECL
9840 && TREE_READONLY (exp)
9841 && !TREE_THIS_VOLATILE (exp)
9842 && DECL_INITIAL (exp)
9843 && (DECL_INITIAL (exp) == error_mark_node
9844 || TREE_CONSTANT (DECL_INITIAL (exp)))
9845 && !reloc)
9847 if (TARGET_SOM
9848 && DECL_ONE_ONLY (exp)
9849 && !DECL_WEAK (exp))
9850 return som_one_only_readonly_data_section;
9851 else
9852 return readonly_data_section;
9854 else if (CONSTANT_CLASS_P (exp) && !reloc)
9855 return readonly_data_section;
9856 else if (TARGET_SOM
9857 && TREE_CODE (exp) == VAR_DECL
9858 && DECL_ONE_ONLY (exp)
9859 && !DECL_WEAK (exp))
9860 return som_one_only_data_section;
9861 else
9862 return data_section;
9865 /* Implement pa_reloc_rw_mask. */
9867 static int
9868 pa_reloc_rw_mask (void)
9870 /* We force (const (plus (symbol) (const_int))) to memory when the
9871 const_int doesn't fit in a 14-bit integer. The SOM linker can't
9872 handle this construct in read-only memory and we want to avoid
9873 this for ELF. So, we always force an RTX needing relocation to
9874 the data section. */
9875 return 3;
9878 static void
9879 pa_globalize_label (FILE *stream, const char *name)
9881 /* We only handle DATA objects here, functions are globalized in
9882 ASM_DECLARE_FUNCTION_NAME. */
9883 if (! FUNCTION_NAME_P (name))
9885 fputs ("\t.EXPORT ", stream);
9886 assemble_name (stream, name);
9887 fputs (",DATA\n", stream);
9891 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9893 static rtx
9894 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9895 int incoming ATTRIBUTE_UNUSED)
9897 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9900 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9902 bool
9903 pa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9905 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9906 PA64 ABI says that objects larger than 128 bits are returned in memory.
9907 Note, int_size_in_bytes can return -1 if the size of the object is
9908 variable or larger than the maximum value that can be expressed as
9909 a HOST_WIDE_INT. It can also return zero for an empty type. The
9910 simplest way to handle variable and empty types is to pass them in
9911 memory. This avoids problems in defining the boundaries of argument
9912 slots, allocating registers, etc. */
9913 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9914 || int_size_in_bytes (type) <= 0);
9917 /* Structure to hold declaration and name of external symbols that are
9918 emitted by GCC. We generate a vector of these symbols and output them
9919 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9920 This avoids putting out names that are never really used. */
9922 typedef struct GTY(()) extern_symbol
9924 tree decl;
9925 const char *name;
9926 } extern_symbol;
9928 /* Define gc'd vector type for extern_symbol. */
9930 /* Vector of extern_symbol pointers. */
9931 static GTY(()) vec<extern_symbol, va_gc> *extern_symbols;
9933 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9934 /* Mark DECL (name NAME) as an external reference (assembler output
9935 file FILE). This saves the names to output at the end of the file
9936 if actually referenced. */
9938 void
9939 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9941 gcc_assert (file == asm_out_file);
9942 extern_symbol p = {decl, name};
9943 vec_safe_push (extern_symbols, p);
9945 #endif
9947 /* Output text required at the end of an assembler file.
9948 This includes deferred plabels and .import directives for
9949 all external symbols that were actually referenced. */
9951 static void
9952 pa_file_end (void)
9954 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9955 unsigned int i;
9956 extern_symbol *p;
9958 if (!NO_DEFERRED_PROFILE_COUNTERS)
9959 output_deferred_profile_counters ();
9960 #endif
9962 output_deferred_plabels ();
9964 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9965 for (i = 0; vec_safe_iterate (extern_symbols, i, &p); i++)
9967 tree decl = p->decl;
9969 if (!TREE_ASM_WRITTEN (decl)
9970 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
9971 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
9974 vec_free (extern_symbols);
9975 #endif
9977 if (NEED_INDICATE_EXEC_STACK)
9978 file_end_indicate_exec_stack ();
9981 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
9983 static bool
9984 pa_can_change_mode_class (machine_mode from, machine_mode to,
9985 reg_class_t rclass)
9987 if (from == to)
9988 return true;
9990 if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to))
9991 return true;
9993 /* Reject changes to/from modes with zero size. */
9994 if (!GET_MODE_SIZE (from) || !GET_MODE_SIZE (to))
9995 return false;
9997 /* Reject changes to/from complex and vector modes. */
9998 if (COMPLEX_MODE_P (from) || VECTOR_MODE_P (from)
9999 || COMPLEX_MODE_P (to) || VECTOR_MODE_P (to))
10000 return false;
10002 /* There is no way to load QImode or HImode values directly from memory
10003 to a FP register. SImode loads to the FP registers are not zero
10004 extended. On the 64-bit target, this conflicts with the definition
10005 of LOAD_EXTEND_OP. Thus, we reject all mode changes in the FP registers
10006 except for DImode to SImode on the 64-bit target. It is handled by
10007 register renaming in pa_print_operand. */
10008 if (MAYBE_FP_REG_CLASS_P (rclass))
10009 return TARGET_64BIT && from == DImode && to == SImode;
10011 /* TARGET_HARD_REGNO_MODE_OK places modes with sizes larger than a word
10012 in specific sets of registers. Thus, we cannot allow changing
10013 to a larger mode when it's larger than a word. */
10014 if (GET_MODE_SIZE (to) > UNITS_PER_WORD
10015 && GET_MODE_SIZE (to) > GET_MODE_SIZE (from))
10016 return false;
10018 return true;
10021 /* Implement TARGET_MODES_TIEABLE_P.
10023 We should return FALSE for QImode and HImode because these modes
10024 are not ok in the floating-point registers. However, this prevents
10025 tieing these modes to SImode and DImode in the general registers.
10026 So, this isn't a good idea. We rely on TARGET_HARD_REGNO_MODE_OK and
10027 TARGET_CAN_CHANGE_MODE_CLASS to prevent these modes from being used
10028 in the floating-point registers. */
10030 static bool
10031 pa_modes_tieable_p (machine_mode mode1, machine_mode mode2)
10033 /* Don't tie modes in different classes. */
10034 if (GET_MODE_CLASS (mode1) != GET_MODE_CLASS (mode2))
10035 return false;
10037 return true;
10041 /* Length in units of the trampoline instruction code. */
10043 #define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 32 : 40))
10046 /* Output assembler code for a block containing the constant parts
10047 of a trampoline, leaving space for the variable parts.\
10049 The trampoline sets the static chain pointer to STATIC_CHAIN_REGNUM
10050 and then branches to the specified routine.
10052 This code template is copied from text segment to stack location
10053 and then patched with pa_trampoline_init to contain valid values,
10054 and then entered as a subroutine.
10056 It is best to keep this as small as possible to avoid having to
10057 flush multiple lines in the cache. */
10059 static void
10060 pa_asm_trampoline_template (FILE *f)
10062 if (!TARGET_64BIT)
10064 fputs ("\tldw 36(%r22),%r21\n", f);
10065 fputs ("\tbb,>=,n %r21,30,.+16\n", f);
10066 if (ASSEMBLER_DIALECT == 0)
10067 fputs ("\tdepi 0,31,2,%r21\n", f);
10068 else
10069 fputs ("\tdepwi 0,31,2,%r21\n", f);
10070 fputs ("\tldw 4(%r21),%r19\n", f);
10071 fputs ("\tldw 0(%r21),%r21\n", f);
10072 if (TARGET_PA_20)
10074 fputs ("\tbve (%r21)\n", f);
10075 fputs ("\tldw 40(%r22),%r29\n", f);
10076 fputs ("\t.word 0\n", f);
10077 fputs ("\t.word 0\n", f);
10079 else
10081 fputs ("\tldsid (%r21),%r1\n", f);
10082 fputs ("\tmtsp %r1,%sr0\n", f);
10083 fputs ("\tbe 0(%sr0,%r21)\n", f);
10084 fputs ("\tldw 40(%r22),%r29\n", f);
10086 fputs ("\t.word 0\n", f);
10087 fputs ("\t.word 0\n", f);
10088 fputs ("\t.word 0\n", f);
10089 fputs ("\t.word 0\n", f);
10091 else
10093 fputs ("\t.dword 0\n", f);
10094 fputs ("\t.dword 0\n", f);
10095 fputs ("\t.dword 0\n", f);
10096 fputs ("\t.dword 0\n", f);
10097 fputs ("\tmfia %r31\n", f);
10098 fputs ("\tldd 24(%r31),%r1\n", f);
10099 fputs ("\tldd 24(%r1),%r27\n", f);
10100 fputs ("\tldd 16(%r1),%r1\n", f);
10101 fputs ("\tbve (%r1)\n", f);
10102 fputs ("\tldd 32(%r31),%r31\n", f);
10103 fputs ("\t.dword 0 ; fptr\n", f);
10104 fputs ("\t.dword 0 ; static link\n", f);
10108 /* Emit RTL insns to initialize the variable parts of a trampoline.
10109 FNADDR is an RTX for the address of the function's pure code.
10110 CXT is an RTX for the static chain value for the function.
10112 Move the function address to the trampoline template at offset 36.
10113 Move the static chain value to trampoline template at offset 40.
10114 Move the trampoline address to trampoline template at offset 44.
10115 Move r19 to trampoline template at offset 48. The latter two
10116 words create a plabel for the indirect call to the trampoline.
10118 A similar sequence is used for the 64-bit port but the plabel is
10119 at the beginning of the trampoline.
10121 Finally, the cache entries for the trampoline code are flushed.
10122 This is necessary to ensure that the trampoline instruction sequence
10123 is written to memory prior to any attempts at prefetching the code
10124 sequence. */
10126 static void
10127 pa_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
10129 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
10130 rtx start_addr = gen_reg_rtx (Pmode);
10131 rtx end_addr = gen_reg_rtx (Pmode);
10132 rtx line_length = gen_reg_rtx (Pmode);
10133 rtx r_tramp, tmp;
10135 emit_block_move (m_tramp, assemble_trampoline_template (),
10136 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
10137 r_tramp = force_reg (Pmode, XEXP (m_tramp, 0));
10139 if (!TARGET_64BIT)
10141 tmp = adjust_address (m_tramp, Pmode, 36);
10142 emit_move_insn (tmp, fnaddr);
10143 tmp = adjust_address (m_tramp, Pmode, 40);
10144 emit_move_insn (tmp, chain_value);
10146 /* Create a fat pointer for the trampoline. */
10147 tmp = adjust_address (m_tramp, Pmode, 44);
10148 emit_move_insn (tmp, r_tramp);
10149 tmp = adjust_address (m_tramp, Pmode, 48);
10150 emit_move_insn (tmp, gen_rtx_REG (Pmode, 19));
10152 /* fdc and fic only use registers for the address to flush,
10153 they do not accept integer displacements. We align the
10154 start and end addresses to the beginning of their respective
10155 cache lines to minimize the number of lines flushed. */
10156 emit_insn (gen_andsi3 (start_addr, r_tramp,
10157 GEN_INT (-MIN_CACHELINE_SIZE)));
10158 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp,
10159 TRAMPOLINE_CODE_SIZE-1));
10160 emit_insn (gen_andsi3 (end_addr, tmp,
10161 GEN_INT (-MIN_CACHELINE_SIZE)));
10162 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10163 emit_insn (gen_dcacheflushsi (start_addr, end_addr, line_length));
10164 emit_insn (gen_icacheflushsi (start_addr, end_addr, line_length,
10165 gen_reg_rtx (Pmode),
10166 gen_reg_rtx (Pmode)));
10168 else
10170 tmp = adjust_address (m_tramp, Pmode, 56);
10171 emit_move_insn (tmp, fnaddr);
10172 tmp = adjust_address (m_tramp, Pmode, 64);
10173 emit_move_insn (tmp, chain_value);
10175 /* Create a fat pointer for the trampoline. */
10176 tmp = adjust_address (m_tramp, Pmode, 16);
10177 emit_move_insn (tmp, force_reg (Pmode, plus_constant (Pmode,
10178 r_tramp, 32)));
10179 tmp = adjust_address (m_tramp, Pmode, 24);
10180 emit_move_insn (tmp, gen_rtx_REG (Pmode, 27));
10182 /* fdc and fic only use registers for the address to flush,
10183 they do not accept integer displacements. We align the
10184 start and end addresses to the beginning of their respective
10185 cache lines to minimize the number of lines flushed. */
10186 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp, 32));
10187 emit_insn (gen_anddi3 (start_addr, tmp,
10188 GEN_INT (-MIN_CACHELINE_SIZE)));
10189 tmp = force_reg (Pmode, plus_constant (Pmode, tmp,
10190 TRAMPOLINE_CODE_SIZE - 1));
10191 emit_insn (gen_anddi3 (end_addr, tmp,
10192 GEN_INT (-MIN_CACHELINE_SIZE)));
10193 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10194 emit_insn (gen_dcacheflushdi (start_addr, end_addr, line_length));
10195 emit_insn (gen_icacheflushdi (start_addr, end_addr, line_length,
10196 gen_reg_rtx (Pmode),
10197 gen_reg_rtx (Pmode)));
10200 #ifdef HAVE_ENABLE_EXECUTE_STACK
10201  emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
10202 LCT_NORMAL, VOIDmode, XEXP (m_tramp, 0), Pmode);
10203 #endif
10206 /* Perform any machine-specific adjustment in the address of the trampoline.
10207 ADDR contains the address that was passed to pa_trampoline_init.
10208 Adjust the trampoline address to point to the plabel at offset 44. */
10210 static rtx
10211 pa_trampoline_adjust_address (rtx addr)
10213 if (!TARGET_64BIT)
10214 addr = memory_address (Pmode, plus_constant (Pmode, addr, 46));
10215 return addr;
10218 static rtx
10219 pa_delegitimize_address (rtx orig_x)
10221 rtx x = delegitimize_mem_from_attrs (orig_x);
10223 if (GET_CODE (x) == LO_SUM
10224 && GET_CODE (XEXP (x, 1)) == UNSPEC
10225 && XINT (XEXP (x, 1), 1) == UNSPEC_DLTIND14R)
10226 return gen_const_mem (Pmode, XVECEXP (XEXP (x, 1), 0, 0));
10227 return x;
10230 static rtx
10231 pa_internal_arg_pointer (void)
10233 /* The argument pointer and the hard frame pointer are the same in
10234 the 32-bit runtime, so we don't need a copy. */
10235 if (TARGET_64BIT)
10236 return copy_to_reg (virtual_incoming_args_rtx);
10237 else
10238 return virtual_incoming_args_rtx;
10241 /* Given FROM and TO register numbers, say whether this elimination is allowed.
10242 Frame pointer elimination is automatically handled. */
10244 static bool
10245 pa_can_eliminate (const int from, const int to)
10247 /* The argument cannot be eliminated in the 64-bit runtime. */
10248 if (TARGET_64BIT && from == ARG_POINTER_REGNUM)
10249 return false;
10251 return (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
10252 ? ! frame_pointer_needed
10253 : true);
10256 /* Define the offset between two registers, FROM to be eliminated and its
10257 replacement TO, at the start of a routine. */
10258 HOST_WIDE_INT
10259 pa_initial_elimination_offset (int from, int to)
10261 HOST_WIDE_INT offset;
10263 if ((from == HARD_FRAME_POINTER_REGNUM || from == FRAME_POINTER_REGNUM)
10264 && to == STACK_POINTER_REGNUM)
10265 offset = -pa_compute_frame_size (get_frame_size (), 0);
10266 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10267 offset = 0;
10268 else
10269 gcc_unreachable ();
10271 return offset;
10274 static void
10275 pa_conditional_register_usage (void)
10277 int i;
10279 if (!TARGET_64BIT && !TARGET_PA_11)
10281 for (i = 56; i <= FP_REG_LAST; i++)
10282 fixed_regs[i] = call_used_regs[i] = 1;
10283 for (i = 33; i < 56; i += 2)
10284 fixed_regs[i] = call_used_regs[i] = 1;
10286 if (TARGET_DISABLE_FPREGS || TARGET_SOFT_FLOAT)
10288 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
10289 fixed_regs[i] = call_used_regs[i] = 1;
10291 if (flag_pic)
10292 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10295 /* Target hook for c_mode_for_suffix. */
10297 static machine_mode
10298 pa_c_mode_for_suffix (char suffix)
10300 if (HPUX_LONG_DOUBLE_LIBRARY)
10302 if (suffix == 'q')
10303 return TFmode;
10306 return VOIDmode;
10309 /* Target hook for function_section. */
10311 static section *
10312 pa_function_section (tree decl, enum node_frequency freq,
10313 bool startup, bool exit)
10315 /* Put functions in text section if target doesn't have named sections. */
10316 if (!targetm_common.have_named_sections)
10317 return text_section;
10319 /* Force nested functions into the same section as the containing
10320 function. */
10321 if (decl
10322 && DECL_SECTION_NAME (decl) == NULL
10323 && DECL_CONTEXT (decl) != NULL_TREE
10324 && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL
10325 && DECL_SECTION_NAME (DECL_CONTEXT (decl)) == NULL)
10326 return function_section (DECL_CONTEXT (decl));
10328 /* Otherwise, use the default function section. */
10329 return default_function_section (decl, freq, startup, exit);
10332 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
10334 In 64-bit mode, we reject CONST_DOUBLES. We also reject CONST_INTS
10335 that need more than three instructions to load prior to reload. This
10336 limit is somewhat arbitrary. It takes three instructions to load a
10337 CONST_INT from memory but two are memory accesses. It may be better
10338 to increase the allowed range for CONST_INTS. We may also be able
10339 to handle CONST_DOUBLES. */
10341 static bool
10342 pa_legitimate_constant_p (machine_mode mode, rtx x)
10344 if (GET_MODE_CLASS (mode) == MODE_FLOAT && x != CONST0_RTX (mode))
10345 return false;
10347 if (!NEW_HP_ASSEMBLER && !TARGET_GAS && GET_CODE (x) == LABEL_REF)
10348 return false;
10350 /* TLS_MODEL_GLOBAL_DYNAMIC and TLS_MODEL_LOCAL_DYNAMIC are not
10351 legitimate constants. The other variants can't be handled by
10352 the move patterns after reload starts. */
10353 if (tls_referenced_p (x))
10354 return false;
10356 if (TARGET_64BIT && GET_CODE (x) == CONST_DOUBLE)
10357 return false;
10359 if (TARGET_64BIT
10360 && HOST_BITS_PER_WIDE_INT > 32
10361 && GET_CODE (x) == CONST_INT
10362 && !reload_in_progress
10363 && !reload_completed
10364 && !LEGITIMATE_64BIT_CONST_INT_P (INTVAL (x))
10365 && !pa_cint_ok_for_move (UINTVAL (x)))
10366 return false;
10368 if (function_label_operand (x, mode))
10369 return false;
10371 return true;
10374 /* Implement TARGET_SECTION_TYPE_FLAGS. */
10376 static unsigned int
10377 pa_section_type_flags (tree decl, const char *name, int reloc)
10379 unsigned int flags;
10381 flags = default_section_type_flags (decl, name, reloc);
10383 /* Function labels are placed in the constant pool. This can
10384 cause a section conflict if decls are put in ".data.rel.ro"
10385 or ".data.rel.ro.local" using the __attribute__ construct. */
10386 if (strcmp (name, ".data.rel.ro") == 0
10387 || strcmp (name, ".data.rel.ro.local") == 0)
10388 flags |= SECTION_WRITE | SECTION_RELRO;
10390 return flags;
10393 /* pa_legitimate_address_p recognizes an RTL expression that is a
10394 valid memory address for an instruction. The MODE argument is the
10395 machine mode for the MEM expression that wants to use this address.
10397 On HP PA-RISC, the legitimate address forms are REG+SMALLINT,
10398 REG+REG, and REG+(REG*SCALE). The indexed address forms are only
10399 available with floating point loads and stores, and integer loads.
10400 We get better code by allowing indexed addresses in the initial
10401 RTL generation.
10403 The acceptance of indexed addresses as legitimate implies that we
10404 must provide patterns for doing indexed integer stores, or the move
10405 expanders must force the address of an indexed store to a register.
10406 We have adopted the latter approach.
10408 Another function of pa_legitimate_address_p is to ensure that
10409 the base register is a valid pointer for indexed instructions.
10410 On targets that have non-equivalent space registers, we have to
10411 know at the time of assembler output which register in a REG+REG
10412 pair is the base register. The REG_POINTER flag is sometimes lost
10413 in reload and the following passes, so it can't be relied on during
10414 code generation. Thus, we either have to canonicalize the order
10415 of the registers in REG+REG indexed addresses, or treat REG+REG
10416 addresses separately and provide patterns for both permutations.
10418 The latter approach requires several hundred additional lines of
10419 code in pa.md. The downside to canonicalizing is that a PLUS
10420 in the wrong order can't combine to form to make a scaled indexed
10421 memory operand. As we won't need to canonicalize the operands if
10422 the REG_POINTER lossage can be fixed, it seems better canonicalize.
10424 We initially break out scaled indexed addresses in canonical order
10425 in pa_emit_move_sequence. LEGITIMIZE_ADDRESS also canonicalizes
10426 scaled indexed addresses during RTL generation. However, fold_rtx
10427 has its own opinion on how the operands of a PLUS should be ordered.
10428 If one of the operands is equivalent to a constant, it will make
10429 that operand the second operand. As the base register is likely to
10430 be equivalent to a SYMBOL_REF, we have made it the second operand.
10432 pa_legitimate_address_p accepts REG+REG as legitimate when the
10433 operands are in the order INDEX+BASE on targets with non-equivalent
10434 space registers, and in any order on targets with equivalent space
10435 registers. It accepts both MULT+BASE and BASE+MULT for scaled indexing.
10437 We treat a SYMBOL_REF as legitimate if it is part of the current
10438 function's constant-pool, because such addresses can actually be
10439 output as REG+SMALLINT. */
10441 static bool
10442 pa_legitimate_address_p (machine_mode mode, rtx x, bool strict)
10444 if ((REG_P (x)
10445 && (strict ? STRICT_REG_OK_FOR_BASE_P (x)
10446 : REG_OK_FOR_BASE_P (x)))
10447 || ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_DEC
10448 || GET_CODE (x) == PRE_INC || GET_CODE (x) == POST_INC)
10449 && REG_P (XEXP (x, 0))
10450 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10451 : REG_OK_FOR_BASE_P (XEXP (x, 0)))))
10452 return true;
10454 if (GET_CODE (x) == PLUS)
10456 rtx base, index;
10458 /* For REG+REG, the base register should be in XEXP (x, 1),
10459 so check it first. */
10460 if (REG_P (XEXP (x, 1))
10461 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 1))
10462 : REG_OK_FOR_BASE_P (XEXP (x, 1))))
10463 base = XEXP (x, 1), index = XEXP (x, 0);
10464 else if (REG_P (XEXP (x, 0))
10465 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10466 : REG_OK_FOR_BASE_P (XEXP (x, 0))))
10467 base = XEXP (x, 0), index = XEXP (x, 1);
10468 else
10469 return false;
10471 if (GET_CODE (index) == CONST_INT)
10473 if (INT_5_BITS (index))
10474 return true;
10476 /* When INT14_OK_STRICT is false, a secondary reload is needed
10477 to adjust the displacement of SImode and DImode floating point
10478 instructions but this may fail when the register also needs
10479 reloading. So, we return false when STRICT is true. We
10480 also reject long displacements for float mode addresses since
10481 the majority of accesses will use floating point instructions
10482 that don't support 14-bit offsets. */
10483 if (!INT14_OK_STRICT
10484 && (strict || !(reload_in_progress || reload_completed))
10485 && mode != QImode
10486 && mode != HImode)
10487 return false;
10489 return base14_operand (index, mode);
10492 if (!TARGET_DISABLE_INDEXING
10493 /* Only accept the "canonical" INDEX+BASE operand order
10494 on targets with non-equivalent space registers. */
10495 && (TARGET_NO_SPACE_REGS
10496 ? REG_P (index)
10497 : (base == XEXP (x, 1) && REG_P (index)
10498 && (reload_completed
10499 || (reload_in_progress && HARD_REGISTER_P (base))
10500 || REG_POINTER (base))
10501 && (reload_completed
10502 || (reload_in_progress && HARD_REGISTER_P (index))
10503 || !REG_POINTER (index))))
10504 && MODE_OK_FOR_UNSCALED_INDEXING_P (mode)
10505 && (strict ? STRICT_REG_OK_FOR_INDEX_P (index)
10506 : REG_OK_FOR_INDEX_P (index))
10507 && borx_reg_operand (base, Pmode)
10508 && borx_reg_operand (index, Pmode))
10509 return true;
10511 if (!TARGET_DISABLE_INDEXING
10512 && GET_CODE (index) == MULT
10513 /* Only accept base operands with the REG_POINTER flag prior to
10514 reload on targets with non-equivalent space registers. */
10515 && (TARGET_NO_SPACE_REGS
10516 || (base == XEXP (x, 1)
10517 && (reload_completed
10518 || (reload_in_progress && HARD_REGISTER_P (base))
10519 || REG_POINTER (base))))
10520 && REG_P (XEXP (index, 0))
10521 && GET_MODE (XEXP (index, 0)) == Pmode
10522 && MODE_OK_FOR_SCALED_INDEXING_P (mode)
10523 && (strict ? STRICT_REG_OK_FOR_INDEX_P (XEXP (index, 0))
10524 : REG_OK_FOR_INDEX_P (XEXP (index, 0)))
10525 && GET_CODE (XEXP (index, 1)) == CONST_INT
10526 && INTVAL (XEXP (index, 1))
10527 == (HOST_WIDE_INT) GET_MODE_SIZE (mode)
10528 && borx_reg_operand (base, Pmode))
10529 return true;
10531 return false;
10534 if (GET_CODE (x) == LO_SUM)
10536 rtx y = XEXP (x, 0);
10538 if (GET_CODE (y) == SUBREG)
10539 y = SUBREG_REG (y);
10541 if (REG_P (y)
10542 && (strict ? STRICT_REG_OK_FOR_BASE_P (y)
10543 : REG_OK_FOR_BASE_P (y)))
10545 /* Needed for -fPIC */
10546 if (mode == Pmode
10547 && GET_CODE (XEXP (x, 1)) == UNSPEC)
10548 return true;
10550 if (!INT14_OK_STRICT
10551 && (strict || !(reload_in_progress || reload_completed))
10552 && mode != QImode
10553 && mode != HImode)
10554 return false;
10556 if (CONSTANT_P (XEXP (x, 1)))
10557 return true;
10559 return false;
10562 if (GET_CODE (x) == CONST_INT && INT_5_BITS (x))
10563 return true;
10565 return false;
10568 /* Look for machine dependent ways to make the invalid address AD a
10569 valid address.
10571 For the PA, transform:
10573 memory(X + <large int>)
10575 into:
10577 if (<large int> & mask) >= 16
10578 Y = (<large int> & ~mask) + mask + 1 Round up.
10579 else
10580 Y = (<large int> & ~mask) Round down.
10581 Z = X + Y
10582 memory (Z + (<large int> - Y));
10584 This makes reload inheritance and reload_cse work better since Z
10585 can be reused.
10587 There may be more opportunities to improve code with this hook. */
10590 pa_legitimize_reload_address (rtx ad, machine_mode mode,
10591 int opnum, int type,
10592 int ind_levels ATTRIBUTE_UNUSED)
10594 long offset, newoffset, mask;
10595 rtx new_rtx, temp = NULL_RTX;
10597 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
10598 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
10600 if (optimize && GET_CODE (ad) == PLUS)
10601 temp = simplify_binary_operation (PLUS, Pmode,
10602 XEXP (ad, 0), XEXP (ad, 1));
10604 new_rtx = temp ? temp : ad;
10606 if (optimize
10607 && GET_CODE (new_rtx) == PLUS
10608 && GET_CODE (XEXP (new_rtx, 0)) == REG
10609 && GET_CODE (XEXP (new_rtx, 1)) == CONST_INT)
10611 offset = INTVAL (XEXP ((new_rtx), 1));
10613 /* Choose rounding direction. Round up if we are >= halfway. */
10614 if ((offset & mask) >= ((mask + 1) / 2))
10615 newoffset = (offset & ~mask) + mask + 1;
10616 else
10617 newoffset = offset & ~mask;
10619 /* Ensure that long displacements are aligned. */
10620 if (mask == 0x3fff
10621 && (GET_MODE_CLASS (mode) == MODE_FLOAT
10622 || (TARGET_64BIT && (mode) == DImode)))
10623 newoffset &= ~(GET_MODE_SIZE (mode) - 1);
10625 if (newoffset != 0 && VAL_14_BITS_P (newoffset))
10627 temp = gen_rtx_PLUS (Pmode, XEXP (new_rtx, 0),
10628 GEN_INT (newoffset));
10629 ad = gen_rtx_PLUS (Pmode, temp, GEN_INT (offset - newoffset));
10630 push_reload (XEXP (ad, 0), 0, &XEXP (ad, 0), 0,
10631 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
10632 opnum, (enum reload_type) type);
10633 return ad;
10637 return NULL_RTX;
10640 /* Output address vector. */
10642 void
10643 pa_output_addr_vec (rtx lab, rtx body)
10645 int idx, vlen = XVECLEN (body, 0);
10647 if (!TARGET_SOM)
10648 fputs ("\t.align 4\n", asm_out_file);
10649 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10650 if (TARGET_GAS)
10651 fputs ("\t.begin_brtab\n", asm_out_file);
10652 for (idx = 0; idx < vlen; idx++)
10654 ASM_OUTPUT_ADDR_VEC_ELT
10655 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10657 if (TARGET_GAS)
10658 fputs ("\t.end_brtab\n", asm_out_file);
10661 /* Output address difference vector. */
10663 void
10664 pa_output_addr_diff_vec (rtx lab, rtx body)
10666 rtx base = XEXP (XEXP (body, 0), 0);
10667 int idx, vlen = XVECLEN (body, 1);
10669 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10670 if (TARGET_GAS)
10671 fputs ("\t.begin_brtab\n", asm_out_file);
10672 for (idx = 0; idx < vlen; idx++)
10674 ASM_OUTPUT_ADDR_DIFF_ELT
10675 (asm_out_file,
10676 body,
10677 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
10678 CODE_LABEL_NUMBER (base));
10680 if (TARGET_GAS)
10681 fputs ("\t.end_brtab\n", asm_out_file);
10684 /* This is a helper function for the other atomic operations. This function
10685 emits a loop that contains SEQ that iterates until a compare-and-swap
10686 operation at the end succeeds. MEM is the memory to be modified. SEQ is
10687 a set of instructions that takes a value from OLD_REG as an input and
10688 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
10689 set to the current contents of MEM. After SEQ, a compare-and-swap will
10690 attempt to update MEM with NEW_REG. The function returns true when the
10691 loop was generated successfully. */
10693 static bool
10694 pa_expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
10696 machine_mode mode = GET_MODE (mem);
10697 rtx_code_label *label;
10698 rtx cmp_reg, success, oldval;
10700 /* The loop we want to generate looks like
10702 cmp_reg = mem;
10703 label:
10704 old_reg = cmp_reg;
10705 seq;
10706 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
10707 if (success)
10708 goto label;
10710 Note that we only do the plain load from memory once. Subsequent
10711 iterations use the value loaded by the compare-and-swap pattern. */
10713 label = gen_label_rtx ();
10714 cmp_reg = gen_reg_rtx (mode);
10716 emit_move_insn (cmp_reg, mem);
10717 emit_label (label);
10718 emit_move_insn (old_reg, cmp_reg);
10719 if (seq)
10720 emit_insn (seq);
10722 success = NULL_RTX;
10723 oldval = cmp_reg;
10724 if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
10725 new_reg, false, MEMMODEL_SYNC_SEQ_CST,
10726 MEMMODEL_RELAXED))
10727 return false;
10729 if (oldval != cmp_reg)
10730 emit_move_insn (cmp_reg, oldval);
10732 /* Mark this jump predicted not taken. */
10733 emit_cmp_and_jump_insns (success, const0_rtx, EQ, const0_rtx,
10734 GET_MODE (success), 1, label,
10735 profile_probability::guessed_never ());
10736 return true;
10739 /* This function tries to implement an atomic exchange operation using a
10740 compare_and_swap loop. VAL is written to *MEM. The previous contents of
10741 *MEM are returned, using TARGET if possible. No memory model is required
10742 since a compare_and_swap loop is seq-cst. */
10745 pa_maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val)
10747 machine_mode mode = GET_MODE (mem);
10749 if (can_compare_and_swap_p (mode, true))
10751 if (!target || !register_operand (target, mode))
10752 target = gen_reg_rtx (mode);
10753 if (pa_expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
10754 return target;
10757 return NULL_RTX;
10760 /* Implement TARGET_CALLEE_COPIES. The callee is responsible for copying
10761 arguments passed by hidden reference in the 32-bit HP runtime. Users
10762 can override this behavior for better compatibility with openmp at the
10763 risk of library incompatibilities. Arguments are always passed by value
10764 in the 64-bit HP runtime. */
10766 static bool
10767 pa_callee_copies (cumulative_args_t cum ATTRIBUTE_UNUSED,
10768 machine_mode mode ATTRIBUTE_UNUSED,
10769 const_tree type ATTRIBUTE_UNUSED,
10770 bool named ATTRIBUTE_UNUSED)
10772 return !TARGET_CALLER_COPIES;
10775 /* Implement TARGET_HARD_REGNO_NREGS. */
10777 static unsigned int
10778 pa_hard_regno_nregs (unsigned int regno ATTRIBUTE_UNUSED, machine_mode mode)
10780 return PA_HARD_REGNO_NREGS (regno, mode);
10783 /* Implement TARGET_HARD_REGNO_MODE_OK. */
10785 static bool
10786 pa_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
10788 return PA_HARD_REGNO_MODE_OK (regno, mode);
10791 /* Implement TARGET_STARTING_FRAME_OFFSET.
10793 On the 32-bit ports, we reserve one slot for the previous frame
10794 pointer and one fill slot. The fill slot is for compatibility
10795 with HP compiled programs. On the 64-bit ports, we reserve one
10796 slot for the previous frame pointer. */
10798 static HOST_WIDE_INT
10799 pa_starting_frame_offset (void)
10801 return 8;
10804 /* Figure out the size in words of the function argument. The size
10805 returned by this function should always be greater than zero because
10806 we pass variable and zero sized objects by reference. */
10808 HOST_WIDE_INT
10809 pa_function_arg_size (machine_mode mode, const_tree type)
10811 HOST_WIDE_INT size;
10813 size = mode != BLKmode ? GET_MODE_SIZE (mode) : int_size_in_bytes (type);
10814 return CEIL (size, UNITS_PER_WORD);
10817 #include "gt-pa.h"