Move pa.h FUNCTION_ARG_SIZE to pa.c (PR83858)
[official-gcc.git] / gcc / config / pa / pa.c
blob8e7b11aab45e8416788caaaa02e837bd63d99f2d
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992-2018 Free Software Foundation, Inc.
3 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #define IN_TARGET_CODE 1
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "memmodel.h"
27 #include "backend.h"
28 #include "target.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "df.h"
32 #include "tm_p.h"
33 #include "stringpool.h"
34 #include "attribs.h"
35 #include "optabs.h"
36 #include "regs.h"
37 #include "emit-rtl.h"
38 #include "recog.h"
39 #include "diagnostic-core.h"
40 #include "insn-attr.h"
41 #include "alias.h"
42 #include "fold-const.h"
43 #include "stor-layout.h"
44 #include "varasm.h"
45 #include "calls.h"
46 #include "output.h"
47 #include "except.h"
48 #include "explow.h"
49 #include "expr.h"
50 #include "reload.h"
51 #include "common/common-target.h"
52 #include "langhooks.h"
53 #include "cfgrtl.h"
54 #include "opts.h"
55 #include "builtins.h"
57 /* This file should be included last. */
58 #include "target-def.h"
60 /* Return nonzero if there is a bypass for the output of
61 OUT_INSN and the fp store IN_INSN. */
62 int
63 pa_fpstore_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
65 machine_mode store_mode;
66 machine_mode other_mode;
67 rtx set;
69 if (recog_memoized (in_insn) < 0
70 || (get_attr_type (in_insn) != TYPE_FPSTORE
71 && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD)
72 || recog_memoized (out_insn) < 0)
73 return 0;
75 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
77 set = single_set (out_insn);
78 if (!set)
79 return 0;
81 other_mode = GET_MODE (SET_SRC (set));
83 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
87 #ifndef DO_FRAME_NOTES
88 #ifdef INCOMING_RETURN_ADDR_RTX
89 #define DO_FRAME_NOTES 1
90 #else
91 #define DO_FRAME_NOTES 0
92 #endif
93 #endif
95 static void pa_option_override (void);
96 static void copy_reg_pointer (rtx, rtx);
97 static void fix_range (const char *);
98 static int hppa_register_move_cost (machine_mode mode, reg_class_t,
99 reg_class_t);
100 static int hppa_address_cost (rtx, machine_mode mode, addr_space_t, bool);
101 static bool hppa_rtx_costs (rtx, machine_mode, int, int, int *, bool);
102 static inline rtx force_mode (machine_mode, rtx);
103 static void pa_reorg (void);
104 static void pa_combine_instructions (void);
105 static int pa_can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, int, rtx,
106 rtx, rtx);
107 static bool forward_branch_p (rtx_insn *);
108 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
109 static void compute_zdepdi_operands (unsigned HOST_WIDE_INT, unsigned *);
110 static int compute_movmem_length (rtx_insn *);
111 static int compute_clrmem_length (rtx_insn *);
112 static bool pa_assemble_integer (rtx, unsigned int, int);
113 static void remove_useless_addtr_insns (int);
114 static void store_reg (int, HOST_WIDE_INT, int);
115 static void store_reg_modify (int, int, HOST_WIDE_INT);
116 static void load_reg (int, HOST_WIDE_INT, int);
117 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
118 static rtx pa_function_value (const_tree, const_tree, bool);
119 static rtx pa_libcall_value (machine_mode, const_rtx);
120 static bool pa_function_value_regno_p (const unsigned int);
121 static void pa_output_function_prologue (FILE *);
122 static void update_total_code_bytes (unsigned int);
123 static void pa_output_function_epilogue (FILE *);
124 static int pa_adjust_cost (rtx_insn *, int, rtx_insn *, int, unsigned int);
125 static int pa_adjust_priority (rtx_insn *, int);
126 static int pa_issue_rate (void);
127 static int pa_reloc_rw_mask (void);
128 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
129 static section *pa_som_tm_clone_table_section (void) ATTRIBUTE_UNUSED;
130 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
131 ATTRIBUTE_UNUSED;
132 static void pa_encode_section_info (tree, rtx, int);
133 static const char *pa_strip_name_encoding (const char *);
134 static bool pa_function_ok_for_sibcall (tree, tree);
135 static void pa_globalize_label (FILE *, const char *)
136 ATTRIBUTE_UNUSED;
137 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
138 HOST_WIDE_INT, tree);
139 #if !defined(USE_COLLECT2)
140 static void pa_asm_out_constructor (rtx, int);
141 static void pa_asm_out_destructor (rtx, int);
142 #endif
143 static void pa_init_builtins (void);
144 static rtx pa_expand_builtin (tree, rtx, rtx, machine_mode mode, int);
145 static rtx hppa_builtin_saveregs (void);
146 static void hppa_va_start (tree, rtx);
147 static tree hppa_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
148 static bool pa_scalar_mode_supported_p (scalar_mode);
149 static bool pa_commutative_p (const_rtx x, int outer_code);
150 static void copy_fp_args (rtx_insn *) ATTRIBUTE_UNUSED;
151 static int length_fp_args (rtx_insn *) ATTRIBUTE_UNUSED;
152 static rtx hppa_legitimize_address (rtx, rtx, machine_mode);
153 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
154 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
155 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
156 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
157 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
158 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
159 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
160 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
161 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
162 static void output_deferred_plabels (void);
163 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
164 static void pa_file_end (void);
165 static void pa_init_libfuncs (void);
166 static rtx pa_struct_value_rtx (tree, int);
167 static bool pa_pass_by_reference (cumulative_args_t, machine_mode,
168 const_tree, bool);
169 static int pa_arg_partial_bytes (cumulative_args_t, machine_mode,
170 tree, bool);
171 static void pa_function_arg_advance (cumulative_args_t, machine_mode,
172 const_tree, bool);
173 static rtx pa_function_arg (cumulative_args_t, machine_mode,
174 const_tree, bool);
175 static pad_direction pa_function_arg_padding (machine_mode, const_tree);
176 static unsigned int pa_function_arg_boundary (machine_mode, const_tree);
177 static struct machine_function * pa_init_machine_status (void);
178 static reg_class_t pa_secondary_reload (bool, rtx, reg_class_t,
179 machine_mode,
180 secondary_reload_info *);
181 static bool pa_secondary_memory_needed (machine_mode,
182 reg_class_t, reg_class_t);
183 static void pa_extra_live_on_entry (bitmap);
184 static machine_mode pa_promote_function_mode (const_tree,
185 machine_mode, int *,
186 const_tree, int);
188 static void pa_asm_trampoline_template (FILE *);
189 static void pa_trampoline_init (rtx, tree, rtx);
190 static rtx pa_trampoline_adjust_address (rtx);
191 static rtx pa_delegitimize_address (rtx);
192 static bool pa_print_operand_punct_valid_p (unsigned char);
193 static rtx pa_internal_arg_pointer (void);
194 static bool pa_can_eliminate (const int, const int);
195 static void pa_conditional_register_usage (void);
196 static machine_mode pa_c_mode_for_suffix (char);
197 static section *pa_function_section (tree, enum node_frequency, bool, bool);
198 static bool pa_cannot_force_const_mem (machine_mode, rtx);
199 static bool pa_legitimate_constant_p (machine_mode, rtx);
200 static unsigned int pa_section_type_flags (tree, const char *, int);
201 static bool pa_legitimate_address_p (machine_mode, rtx, bool);
202 static bool pa_callee_copies (cumulative_args_t, machine_mode,
203 const_tree, bool);
204 static unsigned int pa_hard_regno_nregs (unsigned int, machine_mode);
205 static bool pa_hard_regno_mode_ok (unsigned int, machine_mode);
206 static bool pa_modes_tieable_p (machine_mode, machine_mode);
207 static bool pa_can_change_mode_class (machine_mode, machine_mode, reg_class_t);
208 static HOST_WIDE_INT pa_starting_frame_offset (void);
210 /* The following extra sections are only used for SOM. */
211 static GTY(()) section *som_readonly_data_section;
212 static GTY(()) section *som_one_only_readonly_data_section;
213 static GTY(()) section *som_one_only_data_section;
214 static GTY(()) section *som_tm_clone_table_section;
216 /* Counts for the number of callee-saved general and floating point
217 registers which were saved by the current function's prologue. */
218 static int gr_saved, fr_saved;
220 /* Boolean indicating whether the return pointer was saved by the
221 current function's prologue. */
222 static bool rp_saved;
224 static rtx find_addr_reg (rtx);
226 /* Keep track of the number of bytes we have output in the CODE subspace
227 during this compilation so we'll know when to emit inline long-calls. */
228 unsigned long total_code_bytes;
230 /* The last address of the previous function plus the number of bytes in
231 associated thunks that have been output. This is used to determine if
232 a thunk can use an IA-relative branch to reach its target function. */
233 static unsigned int last_address;
235 /* Variables to handle plabels that we discover are necessary at assembly
236 output time. They are output after the current function. */
237 struct GTY(()) deferred_plabel
239 rtx internal_label;
240 rtx symbol;
242 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
243 deferred_plabels;
244 static size_t n_deferred_plabels = 0;
246 /* Initialize the GCC target structure. */
248 #undef TARGET_OPTION_OVERRIDE
249 #define TARGET_OPTION_OVERRIDE pa_option_override
251 #undef TARGET_ASM_ALIGNED_HI_OP
252 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
253 #undef TARGET_ASM_ALIGNED_SI_OP
254 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
255 #undef TARGET_ASM_ALIGNED_DI_OP
256 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
257 #undef TARGET_ASM_UNALIGNED_HI_OP
258 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
259 #undef TARGET_ASM_UNALIGNED_SI_OP
260 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
261 #undef TARGET_ASM_UNALIGNED_DI_OP
262 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
263 #undef TARGET_ASM_INTEGER
264 #define TARGET_ASM_INTEGER pa_assemble_integer
266 #undef TARGET_ASM_FUNCTION_PROLOGUE
267 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
268 #undef TARGET_ASM_FUNCTION_EPILOGUE
269 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
271 #undef TARGET_FUNCTION_VALUE
272 #define TARGET_FUNCTION_VALUE pa_function_value
273 #undef TARGET_LIBCALL_VALUE
274 #define TARGET_LIBCALL_VALUE pa_libcall_value
275 #undef TARGET_FUNCTION_VALUE_REGNO_P
276 #define TARGET_FUNCTION_VALUE_REGNO_P pa_function_value_regno_p
278 #undef TARGET_LEGITIMIZE_ADDRESS
279 #define TARGET_LEGITIMIZE_ADDRESS hppa_legitimize_address
281 #undef TARGET_SCHED_ADJUST_COST
282 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
283 #undef TARGET_SCHED_ADJUST_PRIORITY
284 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
285 #undef TARGET_SCHED_ISSUE_RATE
286 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
288 #undef TARGET_ENCODE_SECTION_INFO
289 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
290 #undef TARGET_STRIP_NAME_ENCODING
291 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
293 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
294 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
296 #undef TARGET_COMMUTATIVE_P
297 #define TARGET_COMMUTATIVE_P pa_commutative_p
299 #undef TARGET_ASM_OUTPUT_MI_THUNK
300 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
301 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
302 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
304 #undef TARGET_ASM_FILE_END
305 #define TARGET_ASM_FILE_END pa_file_end
307 #undef TARGET_ASM_RELOC_RW_MASK
308 #define TARGET_ASM_RELOC_RW_MASK pa_reloc_rw_mask
310 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
311 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P pa_print_operand_punct_valid_p
313 #if !defined(USE_COLLECT2)
314 #undef TARGET_ASM_CONSTRUCTOR
315 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
316 #undef TARGET_ASM_DESTRUCTOR
317 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
318 #endif
320 #undef TARGET_INIT_BUILTINS
321 #define TARGET_INIT_BUILTINS pa_init_builtins
323 #undef TARGET_EXPAND_BUILTIN
324 #define TARGET_EXPAND_BUILTIN pa_expand_builtin
326 #undef TARGET_REGISTER_MOVE_COST
327 #define TARGET_REGISTER_MOVE_COST hppa_register_move_cost
328 #undef TARGET_RTX_COSTS
329 #define TARGET_RTX_COSTS hppa_rtx_costs
330 #undef TARGET_ADDRESS_COST
331 #define TARGET_ADDRESS_COST hppa_address_cost
333 #undef TARGET_MACHINE_DEPENDENT_REORG
334 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
336 #undef TARGET_INIT_LIBFUNCS
337 #define TARGET_INIT_LIBFUNCS pa_init_libfuncs
339 #undef TARGET_PROMOTE_FUNCTION_MODE
340 #define TARGET_PROMOTE_FUNCTION_MODE pa_promote_function_mode
341 #undef TARGET_PROMOTE_PROTOTYPES
342 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
344 #undef TARGET_STRUCT_VALUE_RTX
345 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
346 #undef TARGET_RETURN_IN_MEMORY
347 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
348 #undef TARGET_MUST_PASS_IN_STACK
349 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
350 #undef TARGET_PASS_BY_REFERENCE
351 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
352 #undef TARGET_CALLEE_COPIES
353 #define TARGET_CALLEE_COPIES pa_callee_copies
354 #undef TARGET_ARG_PARTIAL_BYTES
355 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
356 #undef TARGET_FUNCTION_ARG
357 #define TARGET_FUNCTION_ARG pa_function_arg
358 #undef TARGET_FUNCTION_ARG_ADVANCE
359 #define TARGET_FUNCTION_ARG_ADVANCE pa_function_arg_advance
360 #undef TARGET_FUNCTION_ARG_PADDING
361 #define TARGET_FUNCTION_ARG_PADDING pa_function_arg_padding
362 #undef TARGET_FUNCTION_ARG_BOUNDARY
363 #define TARGET_FUNCTION_ARG_BOUNDARY pa_function_arg_boundary
365 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
366 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
367 #undef TARGET_EXPAND_BUILTIN_VA_START
368 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
369 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
370 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
372 #undef TARGET_SCALAR_MODE_SUPPORTED_P
373 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
375 #undef TARGET_CANNOT_FORCE_CONST_MEM
376 #define TARGET_CANNOT_FORCE_CONST_MEM pa_cannot_force_const_mem
378 #undef TARGET_SECONDARY_RELOAD
379 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
380 #undef TARGET_SECONDARY_MEMORY_NEEDED
381 #define TARGET_SECONDARY_MEMORY_NEEDED pa_secondary_memory_needed
383 #undef TARGET_EXTRA_LIVE_ON_ENTRY
384 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
386 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
387 #define TARGET_ASM_TRAMPOLINE_TEMPLATE pa_asm_trampoline_template
388 #undef TARGET_TRAMPOLINE_INIT
389 #define TARGET_TRAMPOLINE_INIT pa_trampoline_init
390 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
391 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS pa_trampoline_adjust_address
392 #undef TARGET_DELEGITIMIZE_ADDRESS
393 #define TARGET_DELEGITIMIZE_ADDRESS pa_delegitimize_address
394 #undef TARGET_INTERNAL_ARG_POINTER
395 #define TARGET_INTERNAL_ARG_POINTER pa_internal_arg_pointer
396 #undef TARGET_CAN_ELIMINATE
397 #define TARGET_CAN_ELIMINATE pa_can_eliminate
398 #undef TARGET_CONDITIONAL_REGISTER_USAGE
399 #define TARGET_CONDITIONAL_REGISTER_USAGE pa_conditional_register_usage
400 #undef TARGET_C_MODE_FOR_SUFFIX
401 #define TARGET_C_MODE_FOR_SUFFIX pa_c_mode_for_suffix
402 #undef TARGET_ASM_FUNCTION_SECTION
403 #define TARGET_ASM_FUNCTION_SECTION pa_function_section
405 #undef TARGET_LEGITIMATE_CONSTANT_P
406 #define TARGET_LEGITIMATE_CONSTANT_P pa_legitimate_constant_p
407 #undef TARGET_SECTION_TYPE_FLAGS
408 #define TARGET_SECTION_TYPE_FLAGS pa_section_type_flags
409 #undef TARGET_LEGITIMATE_ADDRESS_P
410 #define TARGET_LEGITIMATE_ADDRESS_P pa_legitimate_address_p
412 #undef TARGET_LRA_P
413 #define TARGET_LRA_P hook_bool_void_false
415 #undef TARGET_HARD_REGNO_NREGS
416 #define TARGET_HARD_REGNO_NREGS pa_hard_regno_nregs
417 #undef TARGET_HARD_REGNO_MODE_OK
418 #define TARGET_HARD_REGNO_MODE_OK pa_hard_regno_mode_ok
419 #undef TARGET_MODES_TIEABLE_P
420 #define TARGET_MODES_TIEABLE_P pa_modes_tieable_p
422 #undef TARGET_CAN_CHANGE_MODE_CLASS
423 #define TARGET_CAN_CHANGE_MODE_CLASS pa_can_change_mode_class
425 #undef TARGET_CONSTANT_ALIGNMENT
426 #define TARGET_CONSTANT_ALIGNMENT constant_alignment_word_strings
428 #undef TARGET_STARTING_FRAME_OFFSET
429 #define TARGET_STARTING_FRAME_OFFSET pa_starting_frame_offset
431 struct gcc_target targetm = TARGET_INITIALIZER;
433 /* Parse the -mfixed-range= option string. */
435 static void
436 fix_range (const char *const_str)
438 int i, first, last;
439 char *str, *dash, *comma;
441 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
442 REG2 are either register names or register numbers. The effect
443 of this option is to mark the registers in the range from REG1 to
444 REG2 as ``fixed'' so they won't be used by the compiler. This is
445 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
447 i = strlen (const_str);
448 str = (char *) alloca (i + 1);
449 memcpy (str, const_str, i + 1);
451 while (1)
453 dash = strchr (str, '-');
454 if (!dash)
456 warning (0, "value of -mfixed-range must have form REG1-REG2");
457 return;
459 *dash = '\0';
461 comma = strchr (dash + 1, ',');
462 if (comma)
463 *comma = '\0';
465 first = decode_reg_name (str);
466 if (first < 0)
468 warning (0, "unknown register name: %s", str);
469 return;
472 last = decode_reg_name (dash + 1);
473 if (last < 0)
475 warning (0, "unknown register name: %s", dash + 1);
476 return;
479 *dash = '-';
481 if (first > last)
483 warning (0, "%s-%s is an empty range", str, dash + 1);
484 return;
487 for (i = first; i <= last; ++i)
488 fixed_regs[i] = call_used_regs[i] = 1;
490 if (!comma)
491 break;
493 *comma = ',';
494 str = comma + 1;
497 /* Check if all floating point registers have been fixed. */
498 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
499 if (!fixed_regs[i])
500 break;
502 if (i > FP_REG_LAST)
503 target_flags |= MASK_DISABLE_FPREGS;
506 /* Implement the TARGET_OPTION_OVERRIDE hook. */
508 static void
509 pa_option_override (void)
511 unsigned int i;
512 cl_deferred_option *opt;
513 vec<cl_deferred_option> *v
514 = (vec<cl_deferred_option> *) pa_deferred_options;
516 if (v)
517 FOR_EACH_VEC_ELT (*v, i, opt)
519 switch (opt->opt_index)
521 case OPT_mfixed_range_:
522 fix_range (opt->arg);
523 break;
525 default:
526 gcc_unreachable ();
530 if (flag_pic && TARGET_PORTABLE_RUNTIME)
532 warning (0, "PIC code generation is not supported in the portable runtime model");
535 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
537 warning (0, "PIC code generation is not compatible with fast indirect calls");
540 if (! TARGET_GAS && write_symbols != NO_DEBUG)
542 warning (0, "-g is only supported when using GAS on this processor,");
543 warning (0, "-g option disabled");
544 write_symbols = NO_DEBUG;
547 /* We only support the "big PIC" model now. And we always generate PIC
548 code when in 64bit mode. */
549 if (flag_pic == 1 || TARGET_64BIT)
550 flag_pic = 2;
552 /* Disable -freorder-blocks-and-partition as we don't support hot and
553 cold partitioning. */
554 if (flag_reorder_blocks_and_partition)
556 inform (input_location,
557 "-freorder-blocks-and-partition does not work "
558 "on this architecture");
559 flag_reorder_blocks_and_partition = 0;
560 flag_reorder_blocks = 1;
563 /* We can't guarantee that .dword is available for 32-bit targets. */
564 if (UNITS_PER_WORD == 4)
565 targetm.asm_out.aligned_op.di = NULL;
567 /* The unaligned ops are only available when using GAS. */
568 if (!TARGET_GAS)
570 targetm.asm_out.unaligned_op.hi = NULL;
571 targetm.asm_out.unaligned_op.si = NULL;
572 targetm.asm_out.unaligned_op.di = NULL;
575 init_machine_status = pa_init_machine_status;
578 enum pa_builtins
580 PA_BUILTIN_COPYSIGNQ,
581 PA_BUILTIN_FABSQ,
582 PA_BUILTIN_INFQ,
583 PA_BUILTIN_HUGE_VALQ,
584 PA_BUILTIN_max
587 static GTY(()) tree pa_builtins[(int) PA_BUILTIN_max];
589 static void
590 pa_init_builtins (void)
592 #ifdef DONT_HAVE_FPUTC_UNLOCKED
594 tree decl = builtin_decl_explicit (BUILT_IN_PUTC_UNLOCKED);
595 set_builtin_decl (BUILT_IN_FPUTC_UNLOCKED, decl,
596 builtin_decl_implicit_p (BUILT_IN_PUTC_UNLOCKED));
598 #endif
599 #if TARGET_HPUX_11
601 tree decl;
603 if ((decl = builtin_decl_explicit (BUILT_IN_FINITE)) != NULL_TREE)
604 set_user_assembler_name (decl, "_Isfinite");
605 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEF)) != NULL_TREE)
606 set_user_assembler_name (decl, "_Isfinitef");
608 #endif
610 if (HPUX_LONG_DOUBLE_LIBRARY)
612 tree decl, ftype;
614 /* Under HPUX, the __float128 type is a synonym for "long double". */
615 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
616 "__float128");
618 /* TFmode support builtins. */
619 ftype = build_function_type_list (long_double_type_node,
620 long_double_type_node,
621 NULL_TREE);
622 decl = add_builtin_function ("__builtin_fabsq", ftype,
623 PA_BUILTIN_FABSQ, BUILT_IN_MD,
624 "_U_Qfabs", NULL_TREE);
625 TREE_READONLY (decl) = 1;
626 pa_builtins[PA_BUILTIN_FABSQ] = decl;
628 ftype = build_function_type_list (long_double_type_node,
629 long_double_type_node,
630 long_double_type_node,
631 NULL_TREE);
632 decl = add_builtin_function ("__builtin_copysignq", ftype,
633 PA_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
634 "_U_Qfcopysign", NULL_TREE);
635 TREE_READONLY (decl) = 1;
636 pa_builtins[PA_BUILTIN_COPYSIGNQ] = decl;
638 ftype = build_function_type_list (long_double_type_node, NULL_TREE);
639 decl = add_builtin_function ("__builtin_infq", ftype,
640 PA_BUILTIN_INFQ, BUILT_IN_MD,
641 NULL, NULL_TREE);
642 pa_builtins[PA_BUILTIN_INFQ] = decl;
644 decl = add_builtin_function ("__builtin_huge_valq", ftype,
645 PA_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
646 NULL, NULL_TREE);
647 pa_builtins[PA_BUILTIN_HUGE_VALQ] = decl;
651 static rtx
652 pa_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
653 machine_mode mode ATTRIBUTE_UNUSED,
654 int ignore ATTRIBUTE_UNUSED)
656 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
657 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
659 switch (fcode)
661 case PA_BUILTIN_FABSQ:
662 case PA_BUILTIN_COPYSIGNQ:
663 return expand_call (exp, target, ignore);
665 case PA_BUILTIN_INFQ:
666 case PA_BUILTIN_HUGE_VALQ:
668 machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
669 REAL_VALUE_TYPE inf;
670 rtx tmp;
672 real_inf (&inf);
673 tmp = const_double_from_real_value (inf, target_mode);
675 tmp = validize_mem (force_const_mem (target_mode, tmp));
677 if (target == 0)
678 target = gen_reg_rtx (target_mode);
680 emit_move_insn (target, tmp);
681 return target;
684 default:
685 gcc_unreachable ();
688 return NULL_RTX;
691 /* Function to init struct machine_function.
692 This will be called, via a pointer variable,
693 from push_function_context. */
695 static struct machine_function *
696 pa_init_machine_status (void)
698 return ggc_cleared_alloc<machine_function> ();
701 /* If FROM is a probable pointer register, mark TO as a probable
702 pointer register with the same pointer alignment as FROM. */
704 static void
705 copy_reg_pointer (rtx to, rtx from)
707 if (REG_POINTER (from))
708 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
711 /* Return 1 if X contains a symbolic expression. We know these
712 expressions will have one of a few well defined forms, so
713 we need only check those forms. */
715 pa_symbolic_expression_p (rtx x)
718 /* Strip off any HIGH. */
719 if (GET_CODE (x) == HIGH)
720 x = XEXP (x, 0);
722 return symbolic_operand (x, VOIDmode);
725 /* Accept any constant that can be moved in one instruction into a
726 general register. */
728 pa_cint_ok_for_move (unsigned HOST_WIDE_INT ival)
730 /* OK if ldo, ldil, or zdepi, can be used. */
731 return (VAL_14_BITS_P (ival)
732 || pa_ldil_cint_p (ival)
733 || pa_zdepi_cint_p (ival));
736 /* True iff ldil can be used to load this CONST_INT. The least
737 significant 11 bits of the value must be zero and the value must
738 not change sign when extended from 32 to 64 bits. */
740 pa_ldil_cint_p (unsigned HOST_WIDE_INT ival)
742 unsigned HOST_WIDE_INT x;
744 x = ival & (((unsigned HOST_WIDE_INT) -1 << 31) | 0x7ff);
745 return x == 0 || x == ((unsigned HOST_WIDE_INT) -1 << 31);
748 /* True iff zdepi can be used to generate this CONST_INT.
749 zdepi first sign extends a 5-bit signed number to a given field
750 length, then places this field anywhere in a zero. */
752 pa_zdepi_cint_p (unsigned HOST_WIDE_INT x)
754 unsigned HOST_WIDE_INT lsb_mask, t;
756 /* This might not be obvious, but it's at least fast.
757 This function is critical; we don't have the time loops would take. */
758 lsb_mask = x & -x;
759 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
760 /* Return true iff t is a power of two. */
761 return ((t & (t - 1)) == 0);
764 /* True iff depi or extru can be used to compute (reg & mask).
765 Accept bit pattern like these:
766 0....01....1
767 1....10....0
768 1..10..01..1 */
770 pa_and_mask_p (unsigned HOST_WIDE_INT mask)
772 mask = ~mask;
773 mask += mask & -mask;
774 return (mask & (mask - 1)) == 0;
777 /* True iff depi can be used to compute (reg | MASK). */
779 pa_ior_mask_p (unsigned HOST_WIDE_INT mask)
781 mask += mask & -mask;
782 return (mask & (mask - 1)) == 0;
785 /* Legitimize PIC addresses. If the address is already
786 position-independent, we return ORIG. Newly generated
787 position-independent addresses go to REG. If we need more
788 than one register, we lose. */
790 static rtx
791 legitimize_pic_address (rtx orig, machine_mode mode, rtx reg)
793 rtx pic_ref = orig;
795 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
797 /* Labels need special handling. */
798 if (pic_label_operand (orig, mode))
800 rtx_insn *insn;
802 /* We do not want to go through the movXX expanders here since that
803 would create recursion.
805 Nor do we really want to call a generator for a named pattern
806 since that requires multiple patterns if we want to support
807 multiple word sizes.
809 So instead we just emit the raw set, which avoids the movXX
810 expanders completely. */
811 mark_reg_pointer (reg, BITS_PER_UNIT);
812 insn = emit_insn (gen_rtx_SET (reg, orig));
814 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
815 add_reg_note (insn, REG_EQUAL, orig);
817 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
818 and update LABEL_NUSES because this is not done automatically. */
819 if (reload_in_progress || reload_completed)
821 /* Extract LABEL_REF. */
822 if (GET_CODE (orig) == CONST)
823 orig = XEXP (XEXP (orig, 0), 0);
824 /* Extract CODE_LABEL. */
825 orig = XEXP (orig, 0);
826 add_reg_note (insn, REG_LABEL_OPERAND, orig);
827 /* Make sure we have label and not a note. */
828 if (LABEL_P (orig))
829 LABEL_NUSES (orig)++;
831 crtl->uses_pic_offset_table = 1;
832 return reg;
834 if (GET_CODE (orig) == SYMBOL_REF)
836 rtx_insn *insn;
837 rtx tmp_reg;
839 gcc_assert (reg);
841 /* Before reload, allocate a temporary register for the intermediate
842 result. This allows the sequence to be deleted when the final
843 result is unused and the insns are trivially dead. */
844 tmp_reg = ((reload_in_progress || reload_completed)
845 ? reg : gen_reg_rtx (Pmode));
847 if (function_label_operand (orig, VOIDmode))
849 /* Force function label into memory in word mode. */
850 orig = XEXP (force_const_mem (word_mode, orig), 0);
851 /* Load plabel address from DLT. */
852 emit_move_insn (tmp_reg,
853 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
854 gen_rtx_HIGH (word_mode, orig)));
855 pic_ref
856 = gen_const_mem (Pmode,
857 gen_rtx_LO_SUM (Pmode, tmp_reg,
858 gen_rtx_UNSPEC (Pmode,
859 gen_rtvec (1, orig),
860 UNSPEC_DLTIND14R)));
861 emit_move_insn (reg, pic_ref);
862 /* Now load address of function descriptor. */
863 pic_ref = gen_rtx_MEM (Pmode, reg);
865 else
867 /* Load symbol reference from DLT. */
868 emit_move_insn (tmp_reg,
869 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
870 gen_rtx_HIGH (word_mode, orig)));
871 pic_ref
872 = gen_const_mem (Pmode,
873 gen_rtx_LO_SUM (Pmode, tmp_reg,
874 gen_rtx_UNSPEC (Pmode,
875 gen_rtvec (1, orig),
876 UNSPEC_DLTIND14R)));
879 crtl->uses_pic_offset_table = 1;
880 mark_reg_pointer (reg, BITS_PER_UNIT);
881 insn = emit_move_insn (reg, pic_ref);
883 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
884 set_unique_reg_note (insn, REG_EQUAL, orig);
886 return reg;
888 else if (GET_CODE (orig) == CONST)
890 rtx base;
892 if (GET_CODE (XEXP (orig, 0)) == PLUS
893 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
894 return orig;
896 gcc_assert (reg);
897 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
899 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
900 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
901 base == reg ? 0 : reg);
903 if (GET_CODE (orig) == CONST_INT)
905 if (INT_14_BITS (orig))
906 return plus_constant (Pmode, base, INTVAL (orig));
907 orig = force_reg (Pmode, orig);
909 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
910 /* Likewise, should we set special REG_NOTEs here? */
913 return pic_ref;
916 static GTY(()) rtx gen_tls_tga;
918 static rtx
919 gen_tls_get_addr (void)
921 if (!gen_tls_tga)
922 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
923 return gen_tls_tga;
926 static rtx
927 hppa_tls_call (rtx arg)
929 rtx ret;
931 ret = gen_reg_rtx (Pmode);
932 emit_library_call_value (gen_tls_get_addr (), ret,
933 LCT_CONST, Pmode, arg, Pmode);
935 return ret;
938 static rtx
939 legitimize_tls_address (rtx addr)
941 rtx ret, tmp, t1, t2, tp;
942 rtx_insn *insn;
944 /* Currently, we can't handle anything but a SYMBOL_REF. */
945 if (GET_CODE (addr) != SYMBOL_REF)
946 return addr;
948 switch (SYMBOL_REF_TLS_MODEL (addr))
950 case TLS_MODEL_GLOBAL_DYNAMIC:
951 tmp = gen_reg_rtx (Pmode);
952 if (flag_pic)
953 emit_insn (gen_tgd_load_pic (tmp, addr));
954 else
955 emit_insn (gen_tgd_load (tmp, addr));
956 ret = hppa_tls_call (tmp);
957 break;
959 case TLS_MODEL_LOCAL_DYNAMIC:
960 ret = gen_reg_rtx (Pmode);
961 tmp = gen_reg_rtx (Pmode);
962 start_sequence ();
963 if (flag_pic)
964 emit_insn (gen_tld_load_pic (tmp, addr));
965 else
966 emit_insn (gen_tld_load (tmp, addr));
967 t1 = hppa_tls_call (tmp);
968 insn = get_insns ();
969 end_sequence ();
970 t2 = gen_reg_rtx (Pmode);
971 emit_libcall_block (insn, t2, t1,
972 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
973 UNSPEC_TLSLDBASE));
974 emit_insn (gen_tld_offset_load (ret, addr, t2));
975 break;
977 case TLS_MODEL_INITIAL_EXEC:
978 tp = gen_reg_rtx (Pmode);
979 tmp = gen_reg_rtx (Pmode);
980 ret = gen_reg_rtx (Pmode);
981 emit_insn (gen_tp_load (tp));
982 if (flag_pic)
983 emit_insn (gen_tie_load_pic (tmp, addr));
984 else
985 emit_insn (gen_tie_load (tmp, addr));
986 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
987 break;
989 case TLS_MODEL_LOCAL_EXEC:
990 tp = gen_reg_rtx (Pmode);
991 ret = gen_reg_rtx (Pmode);
992 emit_insn (gen_tp_load (tp));
993 emit_insn (gen_tle_load (ret, addr, tp));
994 break;
996 default:
997 gcc_unreachable ();
1000 return ret;
1003 /* Helper for hppa_legitimize_address. Given X, return true if it
1004 is a left shift by 1, 2 or 3 positions or a multiply by 2, 4 or 8.
1006 This respectively represent canonical shift-add rtxs or scaled
1007 memory addresses. */
1008 static bool
1009 mem_shadd_or_shadd_rtx_p (rtx x)
1011 return ((GET_CODE (x) == ASHIFT
1012 || GET_CODE (x) == MULT)
1013 && GET_CODE (XEXP (x, 1)) == CONST_INT
1014 && ((GET_CODE (x) == ASHIFT
1015 && pa_shadd_constant_p (INTVAL (XEXP (x, 1))))
1016 || (GET_CODE (x) == MULT
1017 && pa_mem_shadd_constant_p (INTVAL (XEXP (x, 1))))));
1020 /* Try machine-dependent ways of modifying an illegitimate address
1021 to be legitimate. If we find one, return the new, valid address.
1022 This macro is used in only one place: `memory_address' in explow.c.
1024 OLDX is the address as it was before break_out_memory_refs was called.
1025 In some cases it is useful to look at this to decide what needs to be done.
1027 It is always safe for this macro to do nothing. It exists to recognize
1028 opportunities to optimize the output.
1030 For the PA, transform:
1032 memory(X + <large int>)
1034 into:
1036 if (<large int> & mask) >= 16
1037 Y = (<large int> & ~mask) + mask + 1 Round up.
1038 else
1039 Y = (<large int> & ~mask) Round down.
1040 Z = X + Y
1041 memory (Z + (<large int> - Y));
1043 This is for CSE to find several similar references, and only use one Z.
1045 X can either be a SYMBOL_REF or REG, but because combine cannot
1046 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
1047 D will not fit in 14 bits.
1049 MODE_FLOAT references allow displacements which fit in 5 bits, so use
1050 0x1f as the mask.
1052 MODE_INT references allow displacements which fit in 14 bits, so use
1053 0x3fff as the mask.
1055 This relies on the fact that most mode MODE_FLOAT references will use FP
1056 registers and most mode MODE_INT references will use integer registers.
1057 (In the rare case of an FP register used in an integer MODE, we depend
1058 on secondary reloads to clean things up.)
1061 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
1062 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
1063 addressing modes to be used).
1065 Note that the addresses passed into hppa_legitimize_address always
1066 come from a MEM, so we only have to match the MULT form on incoming
1067 addresses. But to be future proof we also match the ASHIFT form.
1069 However, this routine always places those shift-add sequences into
1070 registers, so we have to generate the ASHIFT form as our output.
1072 Put X and Z into registers. Then put the entire expression into
1073 a register. */
1076 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1077 machine_mode mode)
1079 rtx orig = x;
1081 /* We need to canonicalize the order of operands in unscaled indexed
1082 addresses since the code that checks if an address is valid doesn't
1083 always try both orders. */
1084 if (!TARGET_NO_SPACE_REGS
1085 && GET_CODE (x) == PLUS
1086 && GET_MODE (x) == Pmode
1087 && REG_P (XEXP (x, 0))
1088 && REG_P (XEXP (x, 1))
1089 && REG_POINTER (XEXP (x, 0))
1090 && !REG_POINTER (XEXP (x, 1)))
1091 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
1093 if (tls_referenced_p (x))
1094 return legitimize_tls_address (x);
1095 else if (flag_pic)
1096 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
1098 /* Strip off CONST. */
1099 if (GET_CODE (x) == CONST)
1100 x = XEXP (x, 0);
1102 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1103 That should always be safe. */
1104 if (GET_CODE (x) == PLUS
1105 && GET_CODE (XEXP (x, 0)) == REG
1106 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
1108 rtx reg = force_reg (Pmode, XEXP (x, 1));
1109 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
1112 /* Note we must reject symbols which represent function addresses
1113 since the assembler/linker can't handle arithmetic on plabels. */
1114 if (GET_CODE (x) == PLUS
1115 && GET_CODE (XEXP (x, 1)) == CONST_INT
1116 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1117 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
1118 || GET_CODE (XEXP (x, 0)) == REG))
1120 rtx int_part, ptr_reg;
1121 int newoffset;
1122 int offset = INTVAL (XEXP (x, 1));
1123 int mask;
1125 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
1126 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
1128 /* Choose which way to round the offset. Round up if we
1129 are >= halfway to the next boundary. */
1130 if ((offset & mask) >= ((mask + 1) / 2))
1131 newoffset = (offset & ~ mask) + mask + 1;
1132 else
1133 newoffset = (offset & ~ mask);
1135 /* If the newoffset will not fit in 14 bits (ldo), then
1136 handling this would take 4 or 5 instructions (2 to load
1137 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1138 add the new offset and the SYMBOL_REF.) Combine can
1139 not handle 4->2 or 5->2 combinations, so do not create
1140 them. */
1141 if (! VAL_14_BITS_P (newoffset)
1142 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
1144 rtx const_part = plus_constant (Pmode, XEXP (x, 0), newoffset);
1145 rtx tmp_reg
1146 = force_reg (Pmode,
1147 gen_rtx_HIGH (Pmode, const_part));
1148 ptr_reg
1149 = force_reg (Pmode,
1150 gen_rtx_LO_SUM (Pmode,
1151 tmp_reg, const_part));
1153 else
1155 if (! VAL_14_BITS_P (newoffset))
1156 int_part = force_reg (Pmode, GEN_INT (newoffset));
1157 else
1158 int_part = GEN_INT (newoffset);
1160 ptr_reg = force_reg (Pmode,
1161 gen_rtx_PLUS (Pmode,
1162 force_reg (Pmode, XEXP (x, 0)),
1163 int_part));
1165 return plus_constant (Pmode, ptr_reg, offset - newoffset);
1168 /* Handle (plus (mult (a) (mem_shadd_constant)) (b)). */
1170 if (GET_CODE (x) == PLUS
1171 && mem_shadd_or_shadd_rtx_p (XEXP (x, 0))
1172 && (OBJECT_P (XEXP (x, 1))
1173 || GET_CODE (XEXP (x, 1)) == SUBREG)
1174 && GET_CODE (XEXP (x, 1)) != CONST)
1176 /* If we were given a MULT, we must fix the constant
1177 as we're going to create the ASHIFT form. */
1178 int shift_val = INTVAL (XEXP (XEXP (x, 0), 1));
1179 if (GET_CODE (XEXP (x, 0)) == MULT)
1180 shift_val = exact_log2 (shift_val);
1182 rtx reg1, reg2;
1183 reg1 = XEXP (x, 1);
1184 if (GET_CODE (reg1) != REG)
1185 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1187 reg2 = XEXP (XEXP (x, 0), 0);
1188 if (GET_CODE (reg2) != REG)
1189 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1191 return force_reg (Pmode,
1192 gen_rtx_PLUS (Pmode,
1193 gen_rtx_ASHIFT (Pmode, reg2,
1194 GEN_INT (shift_val)),
1195 reg1));
1198 /* Similarly for (plus (plus (mult (a) (mem_shadd_constant)) (b)) (c)).
1200 Only do so for floating point modes since this is more speculative
1201 and we lose if it's an integer store. */
1202 if (GET_CODE (x) == PLUS
1203 && GET_CODE (XEXP (x, 0)) == PLUS
1204 && mem_shadd_or_shadd_rtx_p (XEXP (XEXP (x, 0), 0))
1205 && (mode == SFmode || mode == DFmode))
1207 int shift_val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
1209 /* If we were given a MULT, we must fix the constant
1210 as we're going to create the ASHIFT form. */
1211 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
1212 shift_val = exact_log2 (shift_val);
1214 /* Try and figure out what to use as a base register. */
1215 rtx reg1, reg2, base, idx;
1217 reg1 = XEXP (XEXP (x, 0), 1);
1218 reg2 = XEXP (x, 1);
1219 base = NULL_RTX;
1220 idx = NULL_RTX;
1222 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1223 then pa_emit_move_sequence will turn on REG_POINTER so we'll know
1224 it's a base register below. */
1225 if (GET_CODE (reg1) != REG)
1226 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1228 if (GET_CODE (reg2) != REG)
1229 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1231 /* Figure out what the base and index are. */
1233 if (GET_CODE (reg1) == REG
1234 && REG_POINTER (reg1))
1236 base = reg1;
1237 idx = gen_rtx_PLUS (Pmode,
1238 gen_rtx_ASHIFT (Pmode,
1239 XEXP (XEXP (XEXP (x, 0), 0), 0),
1240 GEN_INT (shift_val)),
1241 XEXP (x, 1));
1243 else if (GET_CODE (reg2) == REG
1244 && REG_POINTER (reg2))
1246 base = reg2;
1247 idx = XEXP (x, 0);
1250 if (base == 0)
1251 return orig;
1253 /* If the index adds a large constant, try to scale the
1254 constant so that it can be loaded with only one insn. */
1255 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1256 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1257 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1258 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1260 /* Divide the CONST_INT by the scale factor, then add it to A. */
1261 int val = INTVAL (XEXP (idx, 1));
1262 val /= (1 << shift_val);
1264 reg1 = XEXP (XEXP (idx, 0), 0);
1265 if (GET_CODE (reg1) != REG)
1266 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1268 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1270 /* We can now generate a simple scaled indexed address. */
1271 return
1272 force_reg
1273 (Pmode, gen_rtx_PLUS (Pmode,
1274 gen_rtx_ASHIFT (Pmode, reg1,
1275 GEN_INT (shift_val)),
1276 base));
1279 /* If B + C is still a valid base register, then add them. */
1280 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1281 && INTVAL (XEXP (idx, 1)) <= 4096
1282 && INTVAL (XEXP (idx, 1)) >= -4096)
1284 rtx reg1, reg2;
1286 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1288 reg2 = XEXP (XEXP (idx, 0), 0);
1289 if (GET_CODE (reg2) != CONST_INT)
1290 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1292 return force_reg (Pmode,
1293 gen_rtx_PLUS (Pmode,
1294 gen_rtx_ASHIFT (Pmode, reg2,
1295 GEN_INT (shift_val)),
1296 reg1));
1299 /* Get the index into a register, then add the base + index and
1300 return a register holding the result. */
1302 /* First get A into a register. */
1303 reg1 = XEXP (XEXP (idx, 0), 0);
1304 if (GET_CODE (reg1) != REG)
1305 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1307 /* And get B into a register. */
1308 reg2 = XEXP (idx, 1);
1309 if (GET_CODE (reg2) != REG)
1310 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1312 reg1 = force_reg (Pmode,
1313 gen_rtx_PLUS (Pmode,
1314 gen_rtx_ASHIFT (Pmode, reg1,
1315 GEN_INT (shift_val)),
1316 reg2));
1318 /* Add the result to our base register and return. */
1319 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1323 /* Uh-oh. We might have an address for x[n-100000]. This needs
1324 special handling to avoid creating an indexed memory address
1325 with x-100000 as the base.
1327 If the constant part is small enough, then it's still safe because
1328 there is a guard page at the beginning and end of the data segment.
1330 Scaled references are common enough that we want to try and rearrange the
1331 terms so that we can use indexing for these addresses too. Only
1332 do the optimization for floatint point modes. */
1334 if (GET_CODE (x) == PLUS
1335 && pa_symbolic_expression_p (XEXP (x, 1)))
1337 /* Ugly. We modify things here so that the address offset specified
1338 by the index expression is computed first, then added to x to form
1339 the entire address. */
1341 rtx regx1, regx2, regy1, regy2, y;
1343 /* Strip off any CONST. */
1344 y = XEXP (x, 1);
1345 if (GET_CODE (y) == CONST)
1346 y = XEXP (y, 0);
1348 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1350 /* See if this looks like
1351 (plus (mult (reg) (mem_shadd_const))
1352 (const (plus (symbol_ref) (const_int))))
1354 Where const_int is small. In that case the const
1355 expression is a valid pointer for indexing.
1357 If const_int is big, but can be divided evenly by shadd_const
1358 and added to (reg). This allows more scaled indexed addresses. */
1359 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1360 && mem_shadd_or_shadd_rtx_p (XEXP (x, 0))
1361 && GET_CODE (XEXP (y, 1)) == CONST_INT
1362 && INTVAL (XEXP (y, 1)) >= -4096
1363 && INTVAL (XEXP (y, 1)) <= 4095)
1365 int shift_val = INTVAL (XEXP (XEXP (x, 0), 1));
1367 /* If we were given a MULT, we must fix the constant
1368 as we're going to create the ASHIFT form. */
1369 if (GET_CODE (XEXP (x, 0)) == MULT)
1370 shift_val = exact_log2 (shift_val);
1372 rtx reg1, reg2;
1374 reg1 = XEXP (x, 1);
1375 if (GET_CODE (reg1) != REG)
1376 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1378 reg2 = XEXP (XEXP (x, 0), 0);
1379 if (GET_CODE (reg2) != REG)
1380 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1382 return
1383 force_reg (Pmode,
1384 gen_rtx_PLUS (Pmode,
1385 gen_rtx_ASHIFT (Pmode,
1386 reg2,
1387 GEN_INT (shift_val)),
1388 reg1));
1390 else if ((mode == DFmode || mode == SFmode)
1391 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1392 && mem_shadd_or_shadd_rtx_p (XEXP (x, 0))
1393 && GET_CODE (XEXP (y, 1)) == CONST_INT
1394 && INTVAL (XEXP (y, 1)) % (1 << INTVAL (XEXP (XEXP (x, 0), 1))) == 0)
1396 int shift_val = INTVAL (XEXP (XEXP (x, 0), 1));
1398 /* If we were given a MULT, we must fix the constant
1399 as we're going to create the ASHIFT form. */
1400 if (GET_CODE (XEXP (x, 0)) == MULT)
1401 shift_val = exact_log2 (shift_val);
1403 regx1
1404 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1405 / INTVAL (XEXP (XEXP (x, 0), 1))));
1406 regx2 = XEXP (XEXP (x, 0), 0);
1407 if (GET_CODE (regx2) != REG)
1408 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1409 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1410 regx2, regx1));
1411 return
1412 force_reg (Pmode,
1413 gen_rtx_PLUS (Pmode,
1414 gen_rtx_ASHIFT (Pmode, regx2,
1415 GEN_INT (shift_val)),
1416 force_reg (Pmode, XEXP (y, 0))));
1418 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1419 && INTVAL (XEXP (y, 1)) >= -4096
1420 && INTVAL (XEXP (y, 1)) <= 4095)
1422 /* This is safe because of the guard page at the
1423 beginning and end of the data space. Just
1424 return the original address. */
1425 return orig;
1427 else
1429 /* Doesn't look like one we can optimize. */
1430 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1431 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1432 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1433 regx1 = force_reg (Pmode,
1434 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1435 regx1, regy2));
1436 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1441 return orig;
1444 /* Implement the TARGET_REGISTER_MOVE_COST hook.
1446 Compute extra cost of moving data between one register class
1447 and another.
1449 Make moves from SAR so expensive they should never happen. We used to
1450 have 0xffff here, but that generates overflow in rare cases.
1452 Copies involving a FP register and a non-FP register are relatively
1453 expensive because they must go through memory.
1455 Other copies are reasonably cheap. */
1457 static int
1458 hppa_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
1459 reg_class_t from, reg_class_t to)
1461 if (from == SHIFT_REGS)
1462 return 0x100;
1463 else if (to == SHIFT_REGS && FP_REG_CLASS_P (from))
1464 return 18;
1465 else if ((FP_REG_CLASS_P (from) && ! FP_REG_CLASS_P (to))
1466 || (FP_REG_CLASS_P (to) && ! FP_REG_CLASS_P (from)))
1467 return 16;
1468 else
1469 return 2;
1472 /* For the HPPA, REG and REG+CONST is cost 0
1473 and addresses involving symbolic constants are cost 2.
1475 PIC addresses are very expensive.
1477 It is no coincidence that this has the same structure
1478 as pa_legitimate_address_p. */
1480 static int
1481 hppa_address_cost (rtx X, machine_mode mode ATTRIBUTE_UNUSED,
1482 addr_space_t as ATTRIBUTE_UNUSED,
1483 bool speed ATTRIBUTE_UNUSED)
1485 switch (GET_CODE (X))
1487 case REG:
1488 case PLUS:
1489 case LO_SUM:
1490 return 1;
1491 case HIGH:
1492 return 2;
1493 default:
1494 return 4;
1498 /* Compute a (partial) cost for rtx X. Return true if the complete
1499 cost has been computed, and false if subexpressions should be
1500 scanned. In either case, *TOTAL contains the cost result. */
1502 static bool
1503 hppa_rtx_costs (rtx x, machine_mode mode, int outer_code,
1504 int opno ATTRIBUTE_UNUSED,
1505 int *total, bool speed ATTRIBUTE_UNUSED)
1507 int factor;
1508 int code = GET_CODE (x);
1510 switch (code)
1512 case CONST_INT:
1513 if (INTVAL (x) == 0)
1514 *total = 0;
1515 else if (INT_14_BITS (x))
1516 *total = 1;
1517 else
1518 *total = 2;
1519 return true;
1521 case HIGH:
1522 *total = 2;
1523 return true;
1525 case CONST:
1526 case LABEL_REF:
1527 case SYMBOL_REF:
1528 *total = 4;
1529 return true;
1531 case CONST_DOUBLE:
1532 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1533 && outer_code != SET)
1534 *total = 0;
1535 else
1536 *total = 8;
1537 return true;
1539 case MULT:
1540 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1542 *total = COSTS_N_INSNS (3);
1543 return true;
1546 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1547 factor = GET_MODE_SIZE (mode) / 4;
1548 if (factor == 0)
1549 factor = 1;
1551 if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1552 *total = factor * factor * COSTS_N_INSNS (8);
1553 else
1554 *total = factor * factor * COSTS_N_INSNS (20);
1555 return true;
1557 case DIV:
1558 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1560 *total = COSTS_N_INSNS (14);
1561 return true;
1563 /* FALLTHRU */
1565 case UDIV:
1566 case MOD:
1567 case UMOD:
1568 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1569 factor = GET_MODE_SIZE (mode) / 4;
1570 if (factor == 0)
1571 factor = 1;
1573 *total = factor * factor * COSTS_N_INSNS (60);
1574 return true;
1576 case PLUS: /* this includes shNadd insns */
1577 case MINUS:
1578 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1580 *total = COSTS_N_INSNS (3);
1581 return true;
1584 /* A size N times larger than UNITS_PER_WORD needs N times as
1585 many insns, taking N times as long. */
1586 factor = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
1587 if (factor == 0)
1588 factor = 1;
1589 *total = factor * COSTS_N_INSNS (1);
1590 return true;
1592 case ASHIFT:
1593 case ASHIFTRT:
1594 case LSHIFTRT:
1595 *total = COSTS_N_INSNS (1);
1596 return true;
1598 default:
1599 return false;
1603 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1604 new rtx with the correct mode. */
1605 static inline rtx
1606 force_mode (machine_mode mode, rtx orig)
1608 if (mode == GET_MODE (orig))
1609 return orig;
1611 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1613 return gen_rtx_REG (mode, REGNO (orig));
1616 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1618 static bool
1619 pa_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1621 return tls_referenced_p (x);
1624 /* Emit insns to move operands[1] into operands[0].
1626 Return 1 if we have written out everything that needs to be done to
1627 do the move. Otherwise, return 0 and the caller will emit the move
1628 normally.
1630 Note SCRATCH_REG may not be in the proper mode depending on how it
1631 will be used. This routine is responsible for creating a new copy
1632 of SCRATCH_REG in the proper mode. */
1635 pa_emit_move_sequence (rtx *operands, machine_mode mode, rtx scratch_reg)
1637 register rtx operand0 = operands[0];
1638 register rtx operand1 = operands[1];
1639 register rtx tem;
1641 /* We can only handle indexed addresses in the destination operand
1642 of floating point stores. Thus, we need to break out indexed
1643 addresses from the destination operand. */
1644 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1646 gcc_assert (can_create_pseudo_p ());
1648 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1649 operand0 = replace_equiv_address (operand0, tem);
1652 /* On targets with non-equivalent space registers, break out unscaled
1653 indexed addresses from the source operand before the final CSE.
1654 We have to do this because the REG_POINTER flag is not correctly
1655 carried through various optimization passes and CSE may substitute
1656 a pseudo without the pointer set for one with the pointer set. As
1657 a result, we loose various opportunities to create insns with
1658 unscaled indexed addresses. */
1659 if (!TARGET_NO_SPACE_REGS
1660 && !cse_not_expected
1661 && GET_CODE (operand1) == MEM
1662 && GET_CODE (XEXP (operand1, 0)) == PLUS
1663 && REG_P (XEXP (XEXP (operand1, 0), 0))
1664 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1665 operand1
1666 = replace_equiv_address (operand1,
1667 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1669 if (scratch_reg
1670 && reload_in_progress && GET_CODE (operand0) == REG
1671 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1672 operand0 = reg_equiv_mem (REGNO (operand0));
1673 else if (scratch_reg
1674 && reload_in_progress && GET_CODE (operand0) == SUBREG
1675 && GET_CODE (SUBREG_REG (operand0)) == REG
1676 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1678 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1679 the code which tracks sets/uses for delete_output_reload. */
1680 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1681 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
1682 SUBREG_BYTE (operand0));
1683 operand0 = alter_subreg (&temp, true);
1686 if (scratch_reg
1687 && reload_in_progress && GET_CODE (operand1) == REG
1688 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1689 operand1 = reg_equiv_mem (REGNO (operand1));
1690 else if (scratch_reg
1691 && reload_in_progress && GET_CODE (operand1) == SUBREG
1692 && GET_CODE (SUBREG_REG (operand1)) == REG
1693 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1695 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1696 the code which tracks sets/uses for delete_output_reload. */
1697 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1698 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
1699 SUBREG_BYTE (operand1));
1700 operand1 = alter_subreg (&temp, true);
1703 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1704 && ((tem = find_replacement (&XEXP (operand0, 0)))
1705 != XEXP (operand0, 0)))
1706 operand0 = replace_equiv_address (operand0, tem);
1708 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1709 && ((tem = find_replacement (&XEXP (operand1, 0)))
1710 != XEXP (operand1, 0)))
1711 operand1 = replace_equiv_address (operand1, tem);
1713 /* Handle secondary reloads for loads/stores of FP registers from
1714 REG+D addresses where D does not fit in 5 or 14 bits, including
1715 (subreg (mem (addr))) cases, and reloads for other unsupported
1716 memory operands. */
1717 if (scratch_reg
1718 && FP_REG_P (operand0)
1719 && (MEM_P (operand1)
1720 || (GET_CODE (operand1) == SUBREG
1721 && MEM_P (XEXP (operand1, 0)))))
1723 rtx op1 = operand1;
1725 if (GET_CODE (op1) == SUBREG)
1726 op1 = XEXP (op1, 0);
1728 if (reg_plus_base_memory_operand (op1, GET_MODE (op1)))
1730 if (!(TARGET_PA_20
1731 && !TARGET_ELF32
1732 && INT_14_BITS (XEXP (XEXP (op1, 0), 1)))
1733 && !INT_5_BITS (XEXP (XEXP (op1, 0), 1)))
1735 /* SCRATCH_REG will hold an address and maybe the actual data.
1736 We want it in WORD_MODE regardless of what mode it was
1737 originally given to us. */
1738 scratch_reg = force_mode (word_mode, scratch_reg);
1740 /* D might not fit in 14 bits either; for such cases load D
1741 into scratch reg. */
1742 if (!INT_14_BITS (XEXP (XEXP (op1, 0), 1)))
1744 emit_move_insn (scratch_reg, XEXP (XEXP (op1, 0), 1));
1745 emit_move_insn (scratch_reg,
1746 gen_rtx_fmt_ee (GET_CODE (XEXP (op1, 0)),
1747 Pmode,
1748 XEXP (XEXP (op1, 0), 0),
1749 scratch_reg));
1751 else
1752 emit_move_insn (scratch_reg, XEXP (op1, 0));
1753 emit_insn (gen_rtx_SET (operand0,
1754 replace_equiv_address (op1, scratch_reg)));
1755 return 1;
1758 else if ((!INT14_OK_STRICT && symbolic_memory_operand (op1, VOIDmode))
1759 || IS_LO_SUM_DLT_ADDR_P (XEXP (op1, 0))
1760 || IS_INDEX_ADDR_P (XEXP (op1, 0)))
1762 /* Load memory address into SCRATCH_REG. */
1763 scratch_reg = force_mode (word_mode, scratch_reg);
1764 emit_move_insn (scratch_reg, XEXP (op1, 0));
1765 emit_insn (gen_rtx_SET (operand0,
1766 replace_equiv_address (op1, scratch_reg)));
1767 return 1;
1770 else if (scratch_reg
1771 && FP_REG_P (operand1)
1772 && (MEM_P (operand0)
1773 || (GET_CODE (operand0) == SUBREG
1774 && MEM_P (XEXP (operand0, 0)))))
1776 rtx op0 = operand0;
1778 if (GET_CODE (op0) == SUBREG)
1779 op0 = XEXP (op0, 0);
1781 if (reg_plus_base_memory_operand (op0, GET_MODE (op0)))
1783 if (!(TARGET_PA_20
1784 && !TARGET_ELF32
1785 && INT_14_BITS (XEXP (XEXP (op0, 0), 1)))
1786 && !INT_5_BITS (XEXP (XEXP (op0, 0), 1)))
1788 /* SCRATCH_REG will hold an address and maybe the actual data.
1789 We want it in WORD_MODE regardless of what mode it was
1790 originally given to us. */
1791 scratch_reg = force_mode (word_mode, scratch_reg);
1793 /* D might not fit in 14 bits either; for such cases load D
1794 into scratch reg. */
1795 if (!INT_14_BITS (XEXP (XEXP (op0, 0), 1)))
1797 emit_move_insn (scratch_reg, XEXP (XEXP (op0, 0), 1));
1798 emit_move_insn (scratch_reg,
1799 gen_rtx_fmt_ee (GET_CODE (XEXP (op0, 0)),
1800 Pmode,
1801 XEXP (XEXP (op0, 0), 0),
1802 scratch_reg));
1804 else
1805 emit_move_insn (scratch_reg, XEXP (op0, 0));
1806 emit_insn (gen_rtx_SET (replace_equiv_address (op0, scratch_reg),
1807 operand1));
1808 return 1;
1811 else if ((!INT14_OK_STRICT && symbolic_memory_operand (op0, VOIDmode))
1812 || IS_LO_SUM_DLT_ADDR_P (XEXP (op0, 0))
1813 || IS_INDEX_ADDR_P (XEXP (op0, 0)))
1815 /* Load memory address into SCRATCH_REG. */
1816 scratch_reg = force_mode (word_mode, scratch_reg);
1817 emit_move_insn (scratch_reg, XEXP (op0, 0));
1818 emit_insn (gen_rtx_SET (replace_equiv_address (op0, scratch_reg),
1819 operand1));
1820 return 1;
1823 /* Handle secondary reloads for loads of FP registers from constant
1824 expressions by forcing the constant into memory. For the most part,
1825 this is only necessary for SImode and DImode.
1827 Use scratch_reg to hold the address of the memory location. */
1828 else if (scratch_reg
1829 && CONSTANT_P (operand1)
1830 && FP_REG_P (operand0))
1832 rtx const_mem, xoperands[2];
1834 if (operand1 == CONST0_RTX (mode))
1836 emit_insn (gen_rtx_SET (operand0, operand1));
1837 return 1;
1840 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1841 it in WORD_MODE regardless of what mode it was originally given
1842 to us. */
1843 scratch_reg = force_mode (word_mode, scratch_reg);
1845 /* Force the constant into memory and put the address of the
1846 memory location into scratch_reg. */
1847 const_mem = force_const_mem (mode, operand1);
1848 xoperands[0] = scratch_reg;
1849 xoperands[1] = XEXP (const_mem, 0);
1850 pa_emit_move_sequence (xoperands, Pmode, 0);
1852 /* Now load the destination register. */
1853 emit_insn (gen_rtx_SET (operand0,
1854 replace_equiv_address (const_mem, scratch_reg)));
1855 return 1;
1857 /* Handle secondary reloads for SAR. These occur when trying to load
1858 the SAR from memory or a constant. */
1859 else if (scratch_reg
1860 && GET_CODE (operand0) == REG
1861 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1862 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1863 && (GET_CODE (operand1) == MEM || GET_CODE (operand1) == CONST_INT))
1865 /* D might not fit in 14 bits either; for such cases load D into
1866 scratch reg. */
1867 if (GET_CODE (operand1) == MEM
1868 && !memory_address_p (GET_MODE (operand0), XEXP (operand1, 0)))
1870 /* We are reloading the address into the scratch register, so we
1871 want to make sure the scratch register is a full register. */
1872 scratch_reg = force_mode (word_mode, scratch_reg);
1874 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1875 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1876 0)),
1877 Pmode,
1878 XEXP (XEXP (operand1, 0),
1880 scratch_reg));
1882 /* Now we are going to load the scratch register from memory,
1883 we want to load it in the same width as the original MEM,
1884 which must be the same as the width of the ultimate destination,
1885 OPERAND0. */
1886 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1888 emit_move_insn (scratch_reg,
1889 replace_equiv_address (operand1, scratch_reg));
1891 else
1893 /* We want to load the scratch register using the same mode as
1894 the ultimate destination. */
1895 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1897 emit_move_insn (scratch_reg, operand1);
1900 /* And emit the insn to set the ultimate destination. We know that
1901 the scratch register has the same mode as the destination at this
1902 point. */
1903 emit_move_insn (operand0, scratch_reg);
1904 return 1;
1907 /* Handle the most common case: storing into a register. */
1908 if (register_operand (operand0, mode))
1910 /* Legitimize TLS symbol references. This happens for references
1911 that aren't a legitimate constant. */
1912 if (PA_SYMBOL_REF_TLS_P (operand1))
1913 operand1 = legitimize_tls_address (operand1);
1915 if (register_operand (operand1, mode)
1916 || (GET_CODE (operand1) == CONST_INT
1917 && pa_cint_ok_for_move (UINTVAL (operand1)))
1918 || (operand1 == CONST0_RTX (mode))
1919 || (GET_CODE (operand1) == HIGH
1920 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1921 /* Only `general_operands' can come here, so MEM is ok. */
1922 || GET_CODE (operand1) == MEM)
1924 /* Various sets are created during RTL generation which don't
1925 have the REG_POINTER flag correctly set. After the CSE pass,
1926 instruction recognition can fail if we don't consistently
1927 set this flag when performing register copies. This should
1928 also improve the opportunities for creating insns that use
1929 unscaled indexing. */
1930 if (REG_P (operand0) && REG_P (operand1))
1932 if (REG_POINTER (operand1)
1933 && !REG_POINTER (operand0)
1934 && !HARD_REGISTER_P (operand0))
1935 copy_reg_pointer (operand0, operand1);
1938 /* When MEMs are broken out, the REG_POINTER flag doesn't
1939 get set. In some cases, we can set the REG_POINTER flag
1940 from the declaration for the MEM. */
1941 if (REG_P (operand0)
1942 && GET_CODE (operand1) == MEM
1943 && !REG_POINTER (operand0))
1945 tree decl = MEM_EXPR (operand1);
1947 /* Set the register pointer flag and register alignment
1948 if the declaration for this memory reference is a
1949 pointer type. */
1950 if (decl)
1952 tree type;
1954 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1955 tree operand 1. */
1956 if (TREE_CODE (decl) == COMPONENT_REF)
1957 decl = TREE_OPERAND (decl, 1);
1959 type = TREE_TYPE (decl);
1960 type = strip_array_types (type);
1962 if (POINTER_TYPE_P (type))
1963 mark_reg_pointer (operand0, BITS_PER_UNIT);
1967 emit_insn (gen_rtx_SET (operand0, operand1));
1968 return 1;
1971 else if (GET_CODE (operand0) == MEM)
1973 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1974 && !(reload_in_progress || reload_completed))
1976 rtx temp = gen_reg_rtx (DFmode);
1978 emit_insn (gen_rtx_SET (temp, operand1));
1979 emit_insn (gen_rtx_SET (operand0, temp));
1980 return 1;
1982 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1984 /* Run this case quickly. */
1985 emit_insn (gen_rtx_SET (operand0, operand1));
1986 return 1;
1988 if (! (reload_in_progress || reload_completed))
1990 operands[0] = validize_mem (operand0);
1991 operands[1] = operand1 = force_reg (mode, operand1);
1995 /* Simplify the source if we need to.
1996 Note we do have to handle function labels here, even though we do
1997 not consider them legitimate constants. Loop optimizations can
1998 call the emit_move_xxx with one as a source. */
1999 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
2000 || (GET_CODE (operand1) == HIGH
2001 && symbolic_operand (XEXP (operand1, 0), mode))
2002 || function_label_operand (operand1, VOIDmode)
2003 || tls_referenced_p (operand1))
2005 int ishighonly = 0;
2007 if (GET_CODE (operand1) == HIGH)
2009 ishighonly = 1;
2010 operand1 = XEXP (operand1, 0);
2012 if (symbolic_operand (operand1, mode))
2014 /* Argh. The assembler and linker can't handle arithmetic
2015 involving plabels.
2017 So we force the plabel into memory, load operand0 from
2018 the memory location, then add in the constant part. */
2019 if ((GET_CODE (operand1) == CONST
2020 && GET_CODE (XEXP (operand1, 0)) == PLUS
2021 && function_label_operand (XEXP (XEXP (operand1, 0), 0),
2022 VOIDmode))
2023 || function_label_operand (operand1, VOIDmode))
2025 rtx temp, const_part;
2027 /* Figure out what (if any) scratch register to use. */
2028 if (reload_in_progress || reload_completed)
2030 scratch_reg = scratch_reg ? scratch_reg : operand0;
2031 /* SCRATCH_REG will hold an address and maybe the actual
2032 data. We want it in WORD_MODE regardless of what mode it
2033 was originally given to us. */
2034 scratch_reg = force_mode (word_mode, scratch_reg);
2036 else if (flag_pic)
2037 scratch_reg = gen_reg_rtx (Pmode);
2039 if (GET_CODE (operand1) == CONST)
2041 /* Save away the constant part of the expression. */
2042 const_part = XEXP (XEXP (operand1, 0), 1);
2043 gcc_assert (GET_CODE (const_part) == CONST_INT);
2045 /* Force the function label into memory. */
2046 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
2048 else
2050 /* No constant part. */
2051 const_part = NULL_RTX;
2053 /* Force the function label into memory. */
2054 temp = force_const_mem (mode, operand1);
2058 /* Get the address of the memory location. PIC-ify it if
2059 necessary. */
2060 temp = XEXP (temp, 0);
2061 if (flag_pic)
2062 temp = legitimize_pic_address (temp, mode, scratch_reg);
2064 /* Put the address of the memory location into our destination
2065 register. */
2066 operands[1] = temp;
2067 pa_emit_move_sequence (operands, mode, scratch_reg);
2069 /* Now load from the memory location into our destination
2070 register. */
2071 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
2072 pa_emit_move_sequence (operands, mode, scratch_reg);
2074 /* And add back in the constant part. */
2075 if (const_part != NULL_RTX)
2076 expand_inc (operand0, const_part);
2078 return 1;
2081 if (flag_pic)
2083 rtx_insn *insn;
2084 rtx temp;
2086 if (reload_in_progress || reload_completed)
2088 temp = scratch_reg ? scratch_reg : operand0;
2089 /* TEMP will hold an address and maybe the actual
2090 data. We want it in WORD_MODE regardless of what mode it
2091 was originally given to us. */
2092 temp = force_mode (word_mode, temp);
2094 else
2095 temp = gen_reg_rtx (Pmode);
2097 /* Force (const (plus (symbol) (const_int))) to memory
2098 if the const_int will not fit in 14 bits. Although
2099 this requires a relocation, the instruction sequence
2100 needed to load the value is shorter. */
2101 if (GET_CODE (operand1) == CONST
2102 && GET_CODE (XEXP (operand1, 0)) == PLUS
2103 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
2104 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1)))
2106 rtx x, m = force_const_mem (mode, operand1);
2108 x = legitimize_pic_address (XEXP (m, 0), mode, temp);
2109 x = replace_equiv_address (m, x);
2110 insn = emit_move_insn (operand0, x);
2112 else
2114 operands[1] = legitimize_pic_address (operand1, mode, temp);
2115 if (REG_P (operand0) && REG_P (operands[1]))
2116 copy_reg_pointer (operand0, operands[1]);
2117 insn = emit_move_insn (operand0, operands[1]);
2120 /* Put a REG_EQUAL note on this insn. */
2121 set_unique_reg_note (insn, REG_EQUAL, operand1);
2123 /* On the HPPA, references to data space are supposed to use dp,
2124 register 27, but showing it in the RTL inhibits various cse
2125 and loop optimizations. */
2126 else
2128 rtx temp, set;
2130 if (reload_in_progress || reload_completed)
2132 temp = scratch_reg ? scratch_reg : operand0;
2133 /* TEMP will hold an address and maybe the actual
2134 data. We want it in WORD_MODE regardless of what mode it
2135 was originally given to us. */
2136 temp = force_mode (word_mode, temp);
2138 else
2139 temp = gen_reg_rtx (mode);
2141 /* Loading a SYMBOL_REF into a register makes that register
2142 safe to be used as the base in an indexed address.
2144 Don't mark hard registers though. That loses. */
2145 if (GET_CODE (operand0) == REG
2146 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
2147 mark_reg_pointer (operand0, BITS_PER_UNIT);
2148 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
2149 mark_reg_pointer (temp, BITS_PER_UNIT);
2151 if (ishighonly)
2152 set = gen_rtx_SET (operand0, temp);
2153 else
2154 set = gen_rtx_SET (operand0,
2155 gen_rtx_LO_SUM (mode, temp, operand1));
2157 emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (mode, operand1)));
2158 emit_insn (set);
2161 return 1;
2163 else if (tls_referenced_p (operand1))
2165 rtx tmp = operand1;
2166 rtx addend = NULL;
2168 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
2170 addend = XEXP (XEXP (tmp, 0), 1);
2171 tmp = XEXP (XEXP (tmp, 0), 0);
2174 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
2175 tmp = legitimize_tls_address (tmp);
2176 if (addend)
2178 tmp = gen_rtx_PLUS (mode, tmp, addend);
2179 tmp = force_operand (tmp, operands[0]);
2181 operands[1] = tmp;
2183 else if (GET_CODE (operand1) != CONST_INT
2184 || !pa_cint_ok_for_move (UINTVAL (operand1)))
2186 rtx temp;
2187 rtx_insn *insn;
2188 rtx op1 = operand1;
2189 HOST_WIDE_INT value = 0;
2190 HOST_WIDE_INT insv = 0;
2191 int insert = 0;
2193 if (GET_CODE (operand1) == CONST_INT)
2194 value = INTVAL (operand1);
2196 if (TARGET_64BIT
2197 && GET_CODE (operand1) == CONST_INT
2198 && HOST_BITS_PER_WIDE_INT > 32
2199 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
2201 HOST_WIDE_INT nval;
2203 /* Extract the low order 32 bits of the value and sign extend.
2204 If the new value is the same as the original value, we can
2205 can use the original value as-is. If the new value is
2206 different, we use it and insert the most-significant 32-bits
2207 of the original value into the final result. */
2208 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
2209 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
2210 if (value != nval)
2212 #if HOST_BITS_PER_WIDE_INT > 32
2213 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
2214 #endif
2215 insert = 1;
2216 value = nval;
2217 operand1 = GEN_INT (nval);
2221 if (reload_in_progress || reload_completed)
2222 temp = scratch_reg ? scratch_reg : operand0;
2223 else
2224 temp = gen_reg_rtx (mode);
2226 /* We don't directly split DImode constants on 32-bit targets
2227 because PLUS uses an 11-bit immediate and the insn sequence
2228 generated is not as efficient as the one using HIGH/LO_SUM. */
2229 if (GET_CODE (operand1) == CONST_INT
2230 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
2231 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2232 && !insert)
2234 /* Directly break constant into high and low parts. This
2235 provides better optimization opportunities because various
2236 passes recognize constants split with PLUS but not LO_SUM.
2237 We use a 14-bit signed low part except when the addition
2238 of 0x4000 to the high part might change the sign of the
2239 high part. */
2240 HOST_WIDE_INT low = value & 0x3fff;
2241 HOST_WIDE_INT high = value & ~ 0x3fff;
2243 if (low >= 0x2000)
2245 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2246 high += 0x2000;
2247 else
2248 high += 0x4000;
2251 low = value - high;
2253 emit_insn (gen_rtx_SET (temp, GEN_INT (high)));
2254 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2256 else
2258 emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (mode, operand1)));
2259 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2262 insn = emit_move_insn (operands[0], operands[1]);
2264 /* Now insert the most significant 32 bits of the value
2265 into the register. When we don't have a second register
2266 available, it could take up to nine instructions to load
2267 a 64-bit integer constant. Prior to reload, we force
2268 constants that would take more than three instructions
2269 to load to the constant pool. During and after reload,
2270 we have to handle all possible values. */
2271 if (insert)
2273 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2274 register and the value to be inserted is outside the
2275 range that can be loaded with three depdi instructions. */
2276 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2278 operand1 = GEN_INT (insv);
2280 emit_insn (gen_rtx_SET (temp,
2281 gen_rtx_HIGH (mode, operand1)));
2282 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2283 if (mode == DImode)
2284 insn = emit_insn (gen_insvdi (operand0, GEN_INT (32),
2285 const0_rtx, temp));
2286 else
2287 insn = emit_insn (gen_insvsi (operand0, GEN_INT (32),
2288 const0_rtx, temp));
2290 else
2292 int len = 5, pos = 27;
2294 /* Insert the bits using the depdi instruction. */
2295 while (pos >= 0)
2297 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2298 HOST_WIDE_INT sign = v5 < 0;
2300 /* Left extend the insertion. */
2301 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2302 while (pos > 0 && (insv & 1) == sign)
2304 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2305 len += 1;
2306 pos -= 1;
2309 if (mode == DImode)
2310 insn = emit_insn (gen_insvdi (operand0,
2311 GEN_INT (len),
2312 GEN_INT (pos),
2313 GEN_INT (v5)));
2314 else
2315 insn = emit_insn (gen_insvsi (operand0,
2316 GEN_INT (len),
2317 GEN_INT (pos),
2318 GEN_INT (v5)));
2320 len = pos > 0 && pos < 5 ? pos : 5;
2321 pos -= len;
2326 set_unique_reg_note (insn, REG_EQUAL, op1);
2328 return 1;
2331 /* Now have insn-emit do whatever it normally does. */
2332 return 0;
2335 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2336 it will need a link/runtime reloc). */
2339 pa_reloc_needed (tree exp)
2341 int reloc = 0;
2343 switch (TREE_CODE (exp))
2345 case ADDR_EXPR:
2346 return 1;
2348 case POINTER_PLUS_EXPR:
2349 case PLUS_EXPR:
2350 case MINUS_EXPR:
2351 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2352 reloc |= pa_reloc_needed (TREE_OPERAND (exp, 1));
2353 break;
2355 CASE_CONVERT:
2356 case NON_LVALUE_EXPR:
2357 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2358 break;
2360 case CONSTRUCTOR:
2362 tree value;
2363 unsigned HOST_WIDE_INT ix;
2365 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2366 if (value)
2367 reloc |= pa_reloc_needed (value);
2369 break;
2371 case ERROR_MARK:
2372 break;
2374 default:
2375 break;
2377 return reloc;
2381 /* Return the best assembler insn template
2382 for moving operands[1] into operands[0] as a fullword. */
2383 const char *
2384 pa_singlemove_string (rtx *operands)
2386 HOST_WIDE_INT intval;
2388 if (GET_CODE (operands[0]) == MEM)
2389 return "stw %r1,%0";
2390 if (GET_CODE (operands[1]) == MEM)
2391 return "ldw %1,%0";
2392 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2394 long i;
2396 gcc_assert (GET_MODE (operands[1]) == SFmode);
2398 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2399 bit pattern. */
2400 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (operands[1]), i);
2402 operands[1] = GEN_INT (i);
2403 /* Fall through to CONST_INT case. */
2405 if (GET_CODE (operands[1]) == CONST_INT)
2407 intval = INTVAL (operands[1]);
2409 if (VAL_14_BITS_P (intval))
2410 return "ldi %1,%0";
2411 else if ((intval & 0x7ff) == 0)
2412 return "ldil L'%1,%0";
2413 else if (pa_zdepi_cint_p (intval))
2414 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2415 else
2416 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2418 return "copy %1,%0";
2422 /* Compute position (in OP[1]) and width (in OP[2])
2423 useful for copying IMM to a register using the zdepi
2424 instructions. Store the immediate value to insert in OP[0]. */
2425 static void
2426 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2428 int lsb, len;
2430 /* Find the least significant set bit in IMM. */
2431 for (lsb = 0; lsb < 32; lsb++)
2433 if ((imm & 1) != 0)
2434 break;
2435 imm >>= 1;
2438 /* Choose variants based on *sign* of the 5-bit field. */
2439 if ((imm & 0x10) == 0)
2440 len = (lsb <= 28) ? 4 : 32 - lsb;
2441 else
2443 /* Find the width of the bitstring in IMM. */
2444 for (len = 5; len < 32 - lsb; len++)
2446 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2447 break;
2450 /* Sign extend IMM as a 5-bit value. */
2451 imm = (imm & 0xf) - 0x10;
2454 op[0] = imm;
2455 op[1] = 31 - lsb;
2456 op[2] = len;
2459 /* Compute position (in OP[1]) and width (in OP[2])
2460 useful for copying IMM to a register using the depdi,z
2461 instructions. Store the immediate value to insert in OP[0]. */
2463 static void
2464 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2466 int lsb, len, maxlen;
2468 maxlen = MIN (HOST_BITS_PER_WIDE_INT, 64);
2470 /* Find the least significant set bit in IMM. */
2471 for (lsb = 0; lsb < maxlen; lsb++)
2473 if ((imm & 1) != 0)
2474 break;
2475 imm >>= 1;
2478 /* Choose variants based on *sign* of the 5-bit field. */
2479 if ((imm & 0x10) == 0)
2480 len = (lsb <= maxlen - 4) ? 4 : maxlen - lsb;
2481 else
2483 /* Find the width of the bitstring in IMM. */
2484 for (len = 5; len < maxlen - lsb; len++)
2486 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2487 break;
2490 /* Extend length if host is narrow and IMM is negative. */
2491 if (HOST_BITS_PER_WIDE_INT == 32 && len == maxlen - lsb)
2492 len += 32;
2494 /* Sign extend IMM as a 5-bit value. */
2495 imm = (imm & 0xf) - 0x10;
2498 op[0] = imm;
2499 op[1] = 63 - lsb;
2500 op[2] = len;
2503 /* Output assembler code to perform a doubleword move insn
2504 with operands OPERANDS. */
2506 const char *
2507 pa_output_move_double (rtx *operands)
2509 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2510 rtx latehalf[2];
2511 rtx addreg0 = 0, addreg1 = 0;
2512 int highonly = 0;
2514 /* First classify both operands. */
2516 if (REG_P (operands[0]))
2517 optype0 = REGOP;
2518 else if (offsettable_memref_p (operands[0]))
2519 optype0 = OFFSOP;
2520 else if (GET_CODE (operands[0]) == MEM)
2521 optype0 = MEMOP;
2522 else
2523 optype0 = RNDOP;
2525 if (REG_P (operands[1]))
2526 optype1 = REGOP;
2527 else if (CONSTANT_P (operands[1]))
2528 optype1 = CNSTOP;
2529 else if (offsettable_memref_p (operands[1]))
2530 optype1 = OFFSOP;
2531 else if (GET_CODE (operands[1]) == MEM)
2532 optype1 = MEMOP;
2533 else
2534 optype1 = RNDOP;
2536 /* Check for the cases that the operand constraints are not
2537 supposed to allow to happen. */
2538 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2540 /* Handle copies between general and floating registers. */
2542 if (optype0 == REGOP && optype1 == REGOP
2543 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2545 if (FP_REG_P (operands[0]))
2547 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2548 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2549 return "{fldds|fldd} -16(%%sp),%0";
2551 else
2553 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2554 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2555 return "{ldws|ldw} -12(%%sp),%R0";
2559 /* Handle auto decrementing and incrementing loads and stores
2560 specifically, since the structure of the function doesn't work
2561 for them without major modification. Do it better when we learn
2562 this port about the general inc/dec addressing of PA.
2563 (This was written by tege. Chide him if it doesn't work.) */
2565 if (optype0 == MEMOP)
2567 /* We have to output the address syntax ourselves, since print_operand
2568 doesn't deal with the addresses we want to use. Fix this later. */
2570 rtx addr = XEXP (operands[0], 0);
2571 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2573 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2575 operands[0] = XEXP (addr, 0);
2576 gcc_assert (GET_CODE (operands[1]) == REG
2577 && GET_CODE (operands[0]) == REG);
2579 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2581 /* No overlap between high target register and address
2582 register. (We do this in a non-obvious way to
2583 save a register file writeback) */
2584 if (GET_CODE (addr) == POST_INC)
2585 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2586 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2588 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2590 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2592 operands[0] = XEXP (addr, 0);
2593 gcc_assert (GET_CODE (operands[1]) == REG
2594 && GET_CODE (operands[0]) == REG);
2596 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2597 /* No overlap between high target register and address
2598 register. (We do this in a non-obvious way to save a
2599 register file writeback) */
2600 if (GET_CODE (addr) == PRE_INC)
2601 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2602 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2605 if (optype1 == MEMOP)
2607 /* We have to output the address syntax ourselves, since print_operand
2608 doesn't deal with the addresses we want to use. Fix this later. */
2610 rtx addr = XEXP (operands[1], 0);
2611 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2613 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2615 operands[1] = XEXP (addr, 0);
2616 gcc_assert (GET_CODE (operands[0]) == REG
2617 && GET_CODE (operands[1]) == REG);
2619 if (!reg_overlap_mentioned_p (high_reg, addr))
2621 /* No overlap between high target register and address
2622 register. (We do this in a non-obvious way to
2623 save a register file writeback) */
2624 if (GET_CODE (addr) == POST_INC)
2625 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2626 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2628 else
2630 /* This is an undefined situation. We should load into the
2631 address register *and* update that register. Probably
2632 we don't need to handle this at all. */
2633 if (GET_CODE (addr) == POST_INC)
2634 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2635 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2638 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2640 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2642 operands[1] = XEXP (addr, 0);
2643 gcc_assert (GET_CODE (operands[0]) == REG
2644 && GET_CODE (operands[1]) == REG);
2646 if (!reg_overlap_mentioned_p (high_reg, addr))
2648 /* No overlap between high target register and address
2649 register. (We do this in a non-obvious way to
2650 save a register file writeback) */
2651 if (GET_CODE (addr) == PRE_INC)
2652 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2653 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2655 else
2657 /* This is an undefined situation. We should load into the
2658 address register *and* update that register. Probably
2659 we don't need to handle this at all. */
2660 if (GET_CODE (addr) == PRE_INC)
2661 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2662 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2665 else if (GET_CODE (addr) == PLUS
2666 && GET_CODE (XEXP (addr, 0)) == MULT)
2668 rtx xoperands[4];
2670 /* Load address into left half of destination register. */
2671 xoperands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
2672 xoperands[1] = XEXP (addr, 1);
2673 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2674 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2675 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2676 xoperands);
2677 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2679 else if (GET_CODE (addr) == PLUS
2680 && REG_P (XEXP (addr, 0))
2681 && REG_P (XEXP (addr, 1)))
2683 rtx xoperands[3];
2685 /* Load address into left half of destination register. */
2686 xoperands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
2687 xoperands[1] = XEXP (addr, 0);
2688 xoperands[2] = XEXP (addr, 1);
2689 output_asm_insn ("{addl|add,l} %1,%2,%0",
2690 xoperands);
2691 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2695 /* If an operand is an unoffsettable memory ref, find a register
2696 we can increment temporarily to make it refer to the second word. */
2698 if (optype0 == MEMOP)
2699 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2701 if (optype1 == MEMOP)
2702 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2704 /* Ok, we can do one word at a time.
2705 Normally we do the low-numbered word first.
2707 In either case, set up in LATEHALF the operands to use
2708 for the high-numbered word and in some cases alter the
2709 operands in OPERANDS to be suitable for the low-numbered word. */
2711 if (optype0 == REGOP)
2712 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2713 else if (optype0 == OFFSOP)
2714 latehalf[0] = adjust_address_nv (operands[0], SImode, 4);
2715 else
2716 latehalf[0] = operands[0];
2718 if (optype1 == REGOP)
2719 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2720 else if (optype1 == OFFSOP)
2721 latehalf[1] = adjust_address_nv (operands[1], SImode, 4);
2722 else if (optype1 == CNSTOP)
2724 if (GET_CODE (operands[1]) == HIGH)
2726 operands[1] = XEXP (operands[1], 0);
2727 highonly = 1;
2729 split_double (operands[1], &operands[1], &latehalf[1]);
2731 else
2732 latehalf[1] = operands[1];
2734 /* If the first move would clobber the source of the second one,
2735 do them in the other order.
2737 This can happen in two cases:
2739 mem -> register where the first half of the destination register
2740 is the same register used in the memory's address. Reload
2741 can create such insns.
2743 mem in this case will be either register indirect or register
2744 indirect plus a valid offset.
2746 register -> register move where REGNO(dst) == REGNO(src + 1)
2747 someone (Tim/Tege?) claimed this can happen for parameter loads.
2749 Handle mem -> register case first. */
2750 if (optype0 == REGOP
2751 && (optype1 == MEMOP || optype1 == OFFSOP)
2752 && refers_to_regno_p (REGNO (operands[0]), operands[1]))
2754 /* Do the late half first. */
2755 if (addreg1)
2756 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2757 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2759 /* Then clobber. */
2760 if (addreg1)
2761 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2762 return pa_singlemove_string (operands);
2765 /* Now handle register -> register case. */
2766 if (optype0 == REGOP && optype1 == REGOP
2767 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2769 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2770 return pa_singlemove_string (operands);
2773 /* Normal case: do the two words, low-numbered first. */
2775 output_asm_insn (pa_singlemove_string (operands), operands);
2777 /* Make any unoffsettable addresses point at high-numbered word. */
2778 if (addreg0)
2779 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2780 if (addreg1)
2781 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2783 /* Do high-numbered word. */
2784 if (highonly)
2785 output_asm_insn ("ldil L'%1,%0", latehalf);
2786 else
2787 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2789 /* Undo the adds we just did. */
2790 if (addreg0)
2791 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2792 if (addreg1)
2793 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2795 return "";
2798 const char *
2799 pa_output_fp_move_double (rtx *operands)
2801 if (FP_REG_P (operands[0]))
2803 if (FP_REG_P (operands[1])
2804 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2805 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2806 else
2807 output_asm_insn ("fldd%F1 %1,%0", operands);
2809 else if (FP_REG_P (operands[1]))
2811 output_asm_insn ("fstd%F0 %1,%0", operands);
2813 else
2815 rtx xoperands[2];
2817 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2819 /* This is a pain. You have to be prepared to deal with an
2820 arbitrary address here including pre/post increment/decrement.
2822 so avoid this in the MD. */
2823 gcc_assert (GET_CODE (operands[0]) == REG);
2825 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2826 xoperands[0] = operands[0];
2827 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2829 return "";
2832 /* Return a REG that occurs in ADDR with coefficient 1.
2833 ADDR can be effectively incremented by incrementing REG. */
2835 static rtx
2836 find_addr_reg (rtx addr)
2838 while (GET_CODE (addr) == PLUS)
2840 if (GET_CODE (XEXP (addr, 0)) == REG)
2841 addr = XEXP (addr, 0);
2842 else if (GET_CODE (XEXP (addr, 1)) == REG)
2843 addr = XEXP (addr, 1);
2844 else if (CONSTANT_P (XEXP (addr, 0)))
2845 addr = XEXP (addr, 1);
2846 else if (CONSTANT_P (XEXP (addr, 1)))
2847 addr = XEXP (addr, 0);
2848 else
2849 gcc_unreachable ();
2851 gcc_assert (GET_CODE (addr) == REG);
2852 return addr;
2855 /* Emit code to perform a block move.
2857 OPERANDS[0] is the destination pointer as a REG, clobbered.
2858 OPERANDS[1] is the source pointer as a REG, clobbered.
2859 OPERANDS[2] is a register for temporary storage.
2860 OPERANDS[3] is a register for temporary storage.
2861 OPERANDS[4] is the size as a CONST_INT
2862 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2863 OPERANDS[6] is another temporary register. */
2865 const char *
2866 pa_output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2868 int align = INTVAL (operands[5]);
2869 unsigned long n_bytes = INTVAL (operands[4]);
2871 /* We can't move more than a word at a time because the PA
2872 has no longer integer move insns. (Could use fp mem ops?) */
2873 if (align > (TARGET_64BIT ? 8 : 4))
2874 align = (TARGET_64BIT ? 8 : 4);
2876 /* Note that we know each loop below will execute at least twice
2877 (else we would have open-coded the copy). */
2878 switch (align)
2880 case 8:
2881 /* Pre-adjust the loop counter. */
2882 operands[4] = GEN_INT (n_bytes - 16);
2883 output_asm_insn ("ldi %4,%2", operands);
2885 /* Copying loop. */
2886 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2887 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2888 output_asm_insn ("std,ma %3,8(%0)", operands);
2889 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2890 output_asm_insn ("std,ma %6,8(%0)", operands);
2892 /* Handle the residual. There could be up to 7 bytes of
2893 residual to copy! */
2894 if (n_bytes % 16 != 0)
2896 operands[4] = GEN_INT (n_bytes % 8);
2897 if (n_bytes % 16 >= 8)
2898 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2899 if (n_bytes % 8 != 0)
2900 output_asm_insn ("ldd 0(%1),%6", operands);
2901 if (n_bytes % 16 >= 8)
2902 output_asm_insn ("std,ma %3,8(%0)", operands);
2903 if (n_bytes % 8 != 0)
2904 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2906 return "";
2908 case 4:
2909 /* Pre-adjust the loop counter. */
2910 operands[4] = GEN_INT (n_bytes - 8);
2911 output_asm_insn ("ldi %4,%2", operands);
2913 /* Copying loop. */
2914 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2915 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2916 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2917 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2918 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2920 /* Handle the residual. There could be up to 7 bytes of
2921 residual to copy! */
2922 if (n_bytes % 8 != 0)
2924 operands[4] = GEN_INT (n_bytes % 4);
2925 if (n_bytes % 8 >= 4)
2926 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2927 if (n_bytes % 4 != 0)
2928 output_asm_insn ("ldw 0(%1),%6", operands);
2929 if (n_bytes % 8 >= 4)
2930 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2931 if (n_bytes % 4 != 0)
2932 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2934 return "";
2936 case 2:
2937 /* Pre-adjust the loop counter. */
2938 operands[4] = GEN_INT (n_bytes - 4);
2939 output_asm_insn ("ldi %4,%2", operands);
2941 /* Copying loop. */
2942 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2943 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2944 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2945 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2946 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2948 /* Handle the residual. */
2949 if (n_bytes % 4 != 0)
2951 if (n_bytes % 4 >= 2)
2952 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2953 if (n_bytes % 2 != 0)
2954 output_asm_insn ("ldb 0(%1),%6", operands);
2955 if (n_bytes % 4 >= 2)
2956 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2957 if (n_bytes % 2 != 0)
2958 output_asm_insn ("stb %6,0(%0)", operands);
2960 return "";
2962 case 1:
2963 /* Pre-adjust the loop counter. */
2964 operands[4] = GEN_INT (n_bytes - 2);
2965 output_asm_insn ("ldi %4,%2", operands);
2967 /* Copying loop. */
2968 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2969 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2970 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2971 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2972 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2974 /* Handle the residual. */
2975 if (n_bytes % 2 != 0)
2977 output_asm_insn ("ldb 0(%1),%3", operands);
2978 output_asm_insn ("stb %3,0(%0)", operands);
2980 return "";
2982 default:
2983 gcc_unreachable ();
2987 /* Count the number of insns necessary to handle this block move.
2989 Basic structure is the same as emit_block_move, except that we
2990 count insns rather than emit them. */
2992 static int
2993 compute_movmem_length (rtx_insn *insn)
2995 rtx pat = PATTERN (insn);
2996 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2997 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2998 unsigned int n_insns = 0;
3000 /* We can't move more than four bytes at a time because the PA
3001 has no longer integer move insns. (Could use fp mem ops?) */
3002 if (align > (TARGET_64BIT ? 8 : 4))
3003 align = (TARGET_64BIT ? 8 : 4);
3005 /* The basic copying loop. */
3006 n_insns = 6;
3008 /* Residuals. */
3009 if (n_bytes % (2 * align) != 0)
3011 if ((n_bytes % (2 * align)) >= align)
3012 n_insns += 2;
3014 if ((n_bytes % align) != 0)
3015 n_insns += 2;
3018 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3019 return n_insns * 4;
3022 /* Emit code to perform a block clear.
3024 OPERANDS[0] is the destination pointer as a REG, clobbered.
3025 OPERANDS[1] is a register for temporary storage.
3026 OPERANDS[2] is the size as a CONST_INT
3027 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
3029 const char *
3030 pa_output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
3032 int align = INTVAL (operands[3]);
3033 unsigned long n_bytes = INTVAL (operands[2]);
3035 /* We can't clear more than a word at a time because the PA
3036 has no longer integer move insns. */
3037 if (align > (TARGET_64BIT ? 8 : 4))
3038 align = (TARGET_64BIT ? 8 : 4);
3040 /* Note that we know each loop below will execute at least twice
3041 (else we would have open-coded the copy). */
3042 switch (align)
3044 case 8:
3045 /* Pre-adjust the loop counter. */
3046 operands[2] = GEN_INT (n_bytes - 16);
3047 output_asm_insn ("ldi %2,%1", operands);
3049 /* Loop. */
3050 output_asm_insn ("std,ma %%r0,8(%0)", operands);
3051 output_asm_insn ("addib,>= -16,%1,.-4", operands);
3052 output_asm_insn ("std,ma %%r0,8(%0)", operands);
3054 /* Handle the residual. There could be up to 7 bytes of
3055 residual to copy! */
3056 if (n_bytes % 16 != 0)
3058 operands[2] = GEN_INT (n_bytes % 8);
3059 if (n_bytes % 16 >= 8)
3060 output_asm_insn ("std,ma %%r0,8(%0)", operands);
3061 if (n_bytes % 8 != 0)
3062 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
3064 return "";
3066 case 4:
3067 /* Pre-adjust the loop counter. */
3068 operands[2] = GEN_INT (n_bytes - 8);
3069 output_asm_insn ("ldi %2,%1", operands);
3071 /* Loop. */
3072 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
3073 output_asm_insn ("addib,>= -8,%1,.-4", operands);
3074 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
3076 /* Handle the residual. There could be up to 7 bytes of
3077 residual to copy! */
3078 if (n_bytes % 8 != 0)
3080 operands[2] = GEN_INT (n_bytes % 4);
3081 if (n_bytes % 8 >= 4)
3082 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
3083 if (n_bytes % 4 != 0)
3084 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
3086 return "";
3088 case 2:
3089 /* Pre-adjust the loop counter. */
3090 operands[2] = GEN_INT (n_bytes - 4);
3091 output_asm_insn ("ldi %2,%1", operands);
3093 /* Loop. */
3094 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3095 output_asm_insn ("addib,>= -4,%1,.-4", operands);
3096 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3098 /* Handle the residual. */
3099 if (n_bytes % 4 != 0)
3101 if (n_bytes % 4 >= 2)
3102 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3103 if (n_bytes % 2 != 0)
3104 output_asm_insn ("stb %%r0,0(%0)", operands);
3106 return "";
3108 case 1:
3109 /* Pre-adjust the loop counter. */
3110 operands[2] = GEN_INT (n_bytes - 2);
3111 output_asm_insn ("ldi %2,%1", operands);
3113 /* Loop. */
3114 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3115 output_asm_insn ("addib,>= -2,%1,.-4", operands);
3116 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3118 /* Handle the residual. */
3119 if (n_bytes % 2 != 0)
3120 output_asm_insn ("stb %%r0,0(%0)", operands);
3122 return "";
3124 default:
3125 gcc_unreachable ();
3129 /* Count the number of insns necessary to handle this block move.
3131 Basic structure is the same as emit_block_move, except that we
3132 count insns rather than emit them. */
3134 static int
3135 compute_clrmem_length (rtx_insn *insn)
3137 rtx pat = PATTERN (insn);
3138 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
3139 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
3140 unsigned int n_insns = 0;
3142 /* We can't clear more than a word at a time because the PA
3143 has no longer integer move insns. */
3144 if (align > (TARGET_64BIT ? 8 : 4))
3145 align = (TARGET_64BIT ? 8 : 4);
3147 /* The basic loop. */
3148 n_insns = 4;
3150 /* Residuals. */
3151 if (n_bytes % (2 * align) != 0)
3153 if ((n_bytes % (2 * align)) >= align)
3154 n_insns++;
3156 if ((n_bytes % align) != 0)
3157 n_insns++;
3160 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3161 return n_insns * 4;
3165 const char *
3166 pa_output_and (rtx *operands)
3168 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3170 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3171 int ls0, ls1, ms0, p, len;
3173 for (ls0 = 0; ls0 < 32; ls0++)
3174 if ((mask & (1 << ls0)) == 0)
3175 break;
3177 for (ls1 = ls0; ls1 < 32; ls1++)
3178 if ((mask & (1 << ls1)) != 0)
3179 break;
3181 for (ms0 = ls1; ms0 < 32; ms0++)
3182 if ((mask & (1 << ms0)) == 0)
3183 break;
3185 gcc_assert (ms0 == 32);
3187 if (ls1 == 32)
3189 len = ls0;
3191 gcc_assert (len);
3193 operands[2] = GEN_INT (len);
3194 return "{extru|extrw,u} %1,31,%2,%0";
3196 else
3198 /* We could use this `depi' for the case above as well, but `depi'
3199 requires one more register file access than an `extru'. */
3201 p = 31 - ls0;
3202 len = ls1 - ls0;
3204 operands[2] = GEN_INT (p);
3205 operands[3] = GEN_INT (len);
3206 return "{depi|depwi} 0,%2,%3,%0";
3209 else
3210 return "and %1,%2,%0";
3213 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3214 storing the result in operands[0]. */
3215 const char *
3216 pa_output_64bit_and (rtx *operands)
3218 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3220 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3221 int ls0, ls1, ms0, p, len;
3223 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
3224 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
3225 break;
3227 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
3228 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
3229 break;
3231 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
3232 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
3233 break;
3235 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
3237 if (ls1 == HOST_BITS_PER_WIDE_INT)
3239 len = ls0;
3241 gcc_assert (len);
3243 operands[2] = GEN_INT (len);
3244 return "extrd,u %1,63,%2,%0";
3246 else
3248 /* We could use this `depi' for the case above as well, but `depi'
3249 requires one more register file access than an `extru'. */
3251 p = 63 - ls0;
3252 len = ls1 - ls0;
3254 operands[2] = GEN_INT (p);
3255 operands[3] = GEN_INT (len);
3256 return "depdi 0,%2,%3,%0";
3259 else
3260 return "and %1,%2,%0";
3263 const char *
3264 pa_output_ior (rtx *operands)
3266 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3267 int bs0, bs1, p, len;
3269 if (INTVAL (operands[2]) == 0)
3270 return "copy %1,%0";
3272 for (bs0 = 0; bs0 < 32; bs0++)
3273 if ((mask & (1 << bs0)) != 0)
3274 break;
3276 for (bs1 = bs0; bs1 < 32; bs1++)
3277 if ((mask & (1 << bs1)) == 0)
3278 break;
3280 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3282 p = 31 - bs0;
3283 len = bs1 - bs0;
3285 operands[2] = GEN_INT (p);
3286 operands[3] = GEN_INT (len);
3287 return "{depi|depwi} -1,%2,%3,%0";
3290 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3291 storing the result in operands[0]. */
3292 const char *
3293 pa_output_64bit_ior (rtx *operands)
3295 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3296 int bs0, bs1, p, len;
3298 if (INTVAL (operands[2]) == 0)
3299 return "copy %1,%0";
3301 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3302 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3303 break;
3305 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3306 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3307 break;
3309 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
3310 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3312 p = 63 - bs0;
3313 len = bs1 - bs0;
3315 operands[2] = GEN_INT (p);
3316 operands[3] = GEN_INT (len);
3317 return "depdi -1,%2,%3,%0";
3320 /* Target hook for assembling integer objects. This code handles
3321 aligned SI and DI integers specially since function references
3322 must be preceded by P%. */
3324 static bool
3325 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3327 bool result;
3328 tree decl = NULL;
3330 /* When we have a SYMBOL_REF with a SYMBOL_REF_DECL, we need to call
3331 call assemble_external and set the SYMBOL_REF_DECL to NULL before
3332 calling output_addr_const. Otherwise, it may call assemble_external
3333 in the midst of outputing the assembler code for the SYMBOL_REF.
3334 We restore the SYMBOL_REF_DECL after the output is done. */
3335 if (GET_CODE (x) == SYMBOL_REF)
3337 decl = SYMBOL_REF_DECL (x);
3338 if (decl)
3340 assemble_external (decl);
3341 SET_SYMBOL_REF_DECL (x, NULL);
3345 if (size == UNITS_PER_WORD
3346 && aligned_p
3347 && function_label_operand (x, VOIDmode))
3349 fputs (size == 8? "\t.dword\t" : "\t.word\t", asm_out_file);
3351 /* We don't want an OPD when generating fast indirect calls. */
3352 if (!TARGET_FAST_INDIRECT_CALLS)
3353 fputs ("P%", asm_out_file);
3355 output_addr_const (asm_out_file, x);
3356 fputc ('\n', asm_out_file);
3357 result = true;
3359 else
3360 result = default_assemble_integer (x, size, aligned_p);
3362 if (decl)
3363 SET_SYMBOL_REF_DECL (x, decl);
3365 return result;
3368 /* Output an ascii string. */
3369 void
3370 pa_output_ascii (FILE *file, const char *p, int size)
3372 int i;
3373 int chars_output;
3374 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3376 /* The HP assembler can only take strings of 256 characters at one
3377 time. This is a limitation on input line length, *not* the
3378 length of the string. Sigh. Even worse, it seems that the
3379 restriction is in number of input characters (see \xnn &
3380 \whatever). So we have to do this very carefully. */
3382 fputs ("\t.STRING \"", file);
3384 chars_output = 0;
3385 for (i = 0; i < size; i += 4)
3387 int co = 0;
3388 int io = 0;
3389 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3391 register unsigned int c = (unsigned char) p[i + io];
3393 if (c == '\"' || c == '\\')
3394 partial_output[co++] = '\\';
3395 if (c >= ' ' && c < 0177)
3396 partial_output[co++] = c;
3397 else
3399 unsigned int hexd;
3400 partial_output[co++] = '\\';
3401 partial_output[co++] = 'x';
3402 hexd = c / 16 - 0 + '0';
3403 if (hexd > '9')
3404 hexd -= '9' - 'a' + 1;
3405 partial_output[co++] = hexd;
3406 hexd = c % 16 - 0 + '0';
3407 if (hexd > '9')
3408 hexd -= '9' - 'a' + 1;
3409 partial_output[co++] = hexd;
3412 if (chars_output + co > 243)
3414 fputs ("\"\n\t.STRING \"", file);
3415 chars_output = 0;
3417 fwrite (partial_output, 1, (size_t) co, file);
3418 chars_output += co;
3419 co = 0;
3421 fputs ("\"\n", file);
3424 /* Try to rewrite floating point comparisons & branches to avoid
3425 useless add,tr insns.
3427 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3428 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3429 first attempt to remove useless add,tr insns. It is zero
3430 for the second pass as reorg sometimes leaves bogus REG_DEAD
3431 notes lying around.
3433 When CHECK_NOTES is zero we can only eliminate add,tr insns
3434 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3435 instructions. */
3436 static void
3437 remove_useless_addtr_insns (int check_notes)
3439 rtx_insn *insn;
3440 static int pass = 0;
3442 /* This is fairly cheap, so always run it when optimizing. */
3443 if (optimize > 0)
3445 int fcmp_count = 0;
3446 int fbranch_count = 0;
3448 /* Walk all the insns in this function looking for fcmp & fbranch
3449 instructions. Keep track of how many of each we find. */
3450 for (insn = get_insns (); insn; insn = next_insn (insn))
3452 rtx tmp;
3454 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3455 if (! NONJUMP_INSN_P (insn) && ! JUMP_P (insn))
3456 continue;
3458 tmp = PATTERN (insn);
3460 /* It must be a set. */
3461 if (GET_CODE (tmp) != SET)
3462 continue;
3464 /* If the destination is CCFP, then we've found an fcmp insn. */
3465 tmp = SET_DEST (tmp);
3466 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3468 fcmp_count++;
3469 continue;
3472 tmp = PATTERN (insn);
3473 /* If this is an fbranch instruction, bump the fbranch counter. */
3474 if (GET_CODE (tmp) == SET
3475 && SET_DEST (tmp) == pc_rtx
3476 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3477 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3478 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3479 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3481 fbranch_count++;
3482 continue;
3487 /* Find all floating point compare + branch insns. If possible,
3488 reverse the comparison & the branch to avoid add,tr insns. */
3489 for (insn = get_insns (); insn; insn = next_insn (insn))
3491 rtx tmp;
3492 rtx_insn *next;
3494 /* Ignore anything that isn't an INSN. */
3495 if (! NONJUMP_INSN_P (insn))
3496 continue;
3498 tmp = PATTERN (insn);
3500 /* It must be a set. */
3501 if (GET_CODE (tmp) != SET)
3502 continue;
3504 /* The destination must be CCFP, which is register zero. */
3505 tmp = SET_DEST (tmp);
3506 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3507 continue;
3509 /* INSN should be a set of CCFP.
3511 See if the result of this insn is used in a reversed FP
3512 conditional branch. If so, reverse our condition and
3513 the branch. Doing so avoids useless add,tr insns. */
3514 next = next_insn (insn);
3515 while (next)
3517 /* Jumps, calls and labels stop our search. */
3518 if (JUMP_P (next) || CALL_P (next) || LABEL_P (next))
3519 break;
3521 /* As does another fcmp insn. */
3522 if (NONJUMP_INSN_P (next)
3523 && GET_CODE (PATTERN (next)) == SET
3524 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3525 && REGNO (SET_DEST (PATTERN (next))) == 0)
3526 break;
3528 next = next_insn (next);
3531 /* Is NEXT_INSN a branch? */
3532 if (next && JUMP_P (next))
3534 rtx pattern = PATTERN (next);
3536 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3537 and CCFP dies, then reverse our conditional and the branch
3538 to avoid the add,tr. */
3539 if (GET_CODE (pattern) == SET
3540 && SET_DEST (pattern) == pc_rtx
3541 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3542 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3543 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3544 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3545 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3546 && (fcmp_count == fbranch_count
3547 || (check_notes
3548 && find_regno_note (next, REG_DEAD, 0))))
3550 /* Reverse the branch. */
3551 tmp = XEXP (SET_SRC (pattern), 1);
3552 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3553 XEXP (SET_SRC (pattern), 2) = tmp;
3554 INSN_CODE (next) = -1;
3556 /* Reverse our condition. */
3557 tmp = PATTERN (insn);
3558 PUT_CODE (XEXP (tmp, 1),
3559 (reverse_condition_maybe_unordered
3560 (GET_CODE (XEXP (tmp, 1)))));
3566 pass = !pass;
3570 /* You may have trouble believing this, but this is the 32 bit HP-PA
3571 stack layout. Wow.
3573 Offset Contents
3575 Variable arguments (optional; any number may be allocated)
3577 SP-(4*(N+9)) arg word N
3579 SP-56 arg word 5
3580 SP-52 arg word 4
3582 Fixed arguments (must be allocated; may remain unused)
3584 SP-48 arg word 3
3585 SP-44 arg word 2
3586 SP-40 arg word 1
3587 SP-36 arg word 0
3589 Frame Marker
3591 SP-32 External Data Pointer (DP)
3592 SP-28 External sr4
3593 SP-24 External/stub RP (RP')
3594 SP-20 Current RP
3595 SP-16 Static Link
3596 SP-12 Clean up
3597 SP-8 Calling Stub RP (RP'')
3598 SP-4 Previous SP
3600 Top of Frame
3602 SP-0 Stack Pointer (points to next available address)
3606 /* This function saves registers as follows. Registers marked with ' are
3607 this function's registers (as opposed to the previous function's).
3608 If a frame_pointer isn't needed, r4 is saved as a general register;
3609 the space for the frame pointer is still allocated, though, to keep
3610 things simple.
3613 Top of Frame
3615 SP (FP') Previous FP
3616 SP + 4 Alignment filler (sigh)
3617 SP + 8 Space for locals reserved here.
3621 SP + n All call saved register used.
3625 SP + o All call saved fp registers used.
3629 SP + p (SP') points to next available address.
3633 /* Global variables set by output_function_prologue(). */
3634 /* Size of frame. Need to know this to emit return insns from
3635 leaf procedures. */
3636 static HOST_WIDE_INT actual_fsize, local_fsize;
3637 static int save_fregs;
3639 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3640 Handle case where DISP > 8k by using the add_high_const patterns.
3642 Note in DISP > 8k case, we will leave the high part of the address
3643 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3645 static void
3646 store_reg (int reg, HOST_WIDE_INT disp, int base)
3648 rtx dest, src, basereg;
3649 rtx_insn *insn;
3651 src = gen_rtx_REG (word_mode, reg);
3652 basereg = gen_rtx_REG (Pmode, base);
3653 if (VAL_14_BITS_P (disp))
3655 dest = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
3656 insn = emit_move_insn (dest, src);
3658 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3660 rtx delta = GEN_INT (disp);
3661 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3663 emit_move_insn (tmpreg, delta);
3664 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3665 if (DO_FRAME_NOTES)
3667 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3668 gen_rtx_SET (tmpreg,
3669 gen_rtx_PLUS (Pmode, basereg, delta)));
3670 RTX_FRAME_RELATED_P (insn) = 1;
3672 dest = gen_rtx_MEM (word_mode, tmpreg);
3673 insn = emit_move_insn (dest, src);
3675 else
3677 rtx delta = GEN_INT (disp);
3678 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3679 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3681 emit_move_insn (tmpreg, high);
3682 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3683 insn = emit_move_insn (dest, src);
3684 if (DO_FRAME_NOTES)
3685 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3686 gen_rtx_SET (gen_rtx_MEM (word_mode,
3687 gen_rtx_PLUS (word_mode,
3688 basereg,
3689 delta)),
3690 src));
3693 if (DO_FRAME_NOTES)
3694 RTX_FRAME_RELATED_P (insn) = 1;
3697 /* Emit RTL to store REG at the memory location specified by BASE and then
3698 add MOD to BASE. MOD must be <= 8k. */
3700 static void
3701 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3703 rtx basereg, srcreg, delta;
3704 rtx_insn *insn;
3706 gcc_assert (VAL_14_BITS_P (mod));
3708 basereg = gen_rtx_REG (Pmode, base);
3709 srcreg = gen_rtx_REG (word_mode, reg);
3710 delta = GEN_INT (mod);
3712 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3713 if (DO_FRAME_NOTES)
3715 RTX_FRAME_RELATED_P (insn) = 1;
3717 /* RTX_FRAME_RELATED_P must be set on each frame related set
3718 in a parallel with more than one element. */
3719 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3720 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3724 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3725 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3726 whether to add a frame note or not.
3728 In the DISP > 8k case, we leave the high part of the address in %r1.
3729 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3731 static void
3732 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3734 rtx_insn *insn;
3736 if (VAL_14_BITS_P (disp))
3738 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3739 plus_constant (Pmode,
3740 gen_rtx_REG (Pmode, base), disp));
3742 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3744 rtx basereg = gen_rtx_REG (Pmode, base);
3745 rtx delta = GEN_INT (disp);
3746 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3748 emit_move_insn (tmpreg, delta);
3749 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3750 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3751 if (DO_FRAME_NOTES)
3752 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3753 gen_rtx_SET (tmpreg,
3754 gen_rtx_PLUS (Pmode, basereg, delta)));
3756 else
3758 rtx basereg = gen_rtx_REG (Pmode, base);
3759 rtx delta = GEN_INT (disp);
3760 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3762 emit_move_insn (tmpreg,
3763 gen_rtx_PLUS (Pmode, basereg,
3764 gen_rtx_HIGH (Pmode, delta)));
3765 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3766 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3769 if (DO_FRAME_NOTES && note)
3770 RTX_FRAME_RELATED_P (insn) = 1;
3773 HOST_WIDE_INT
3774 pa_compute_frame_size (poly_int64 size, int *fregs_live)
3776 int freg_saved = 0;
3777 int i, j;
3779 /* The code in pa_expand_prologue and pa_expand_epilogue must
3780 be consistent with the rounding and size calculation done here.
3781 Change them at the same time. */
3783 /* We do our own stack alignment. First, round the size of the
3784 stack locals up to a word boundary. */
3785 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3787 /* Space for previous frame pointer + filler. If any frame is
3788 allocated, we need to add in the TARGET_STARTING_FRAME_OFFSET. We
3789 waste some space here for the sake of HP compatibility. The
3790 first slot is only used when the frame pointer is needed. */
3791 if (size || frame_pointer_needed)
3792 size += pa_starting_frame_offset ();
3794 /* If the current function calls __builtin_eh_return, then we need
3795 to allocate stack space for registers that will hold data for
3796 the exception handler. */
3797 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3799 unsigned int i;
3801 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3802 continue;
3803 size += i * UNITS_PER_WORD;
3806 /* Account for space used by the callee general register saves. */
3807 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3808 if (df_regs_ever_live_p (i))
3809 size += UNITS_PER_WORD;
3811 /* Account for space used by the callee floating point register saves. */
3812 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3813 if (df_regs_ever_live_p (i)
3814 || (!TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3816 freg_saved = 1;
3818 /* We always save both halves of the FP register, so always
3819 increment the frame size by 8 bytes. */
3820 size += 8;
3823 /* If any of the floating registers are saved, account for the
3824 alignment needed for the floating point register save block. */
3825 if (freg_saved)
3827 size = (size + 7) & ~7;
3828 if (fregs_live)
3829 *fregs_live = 1;
3832 /* The various ABIs include space for the outgoing parameters in the
3833 size of the current function's stack frame. We don't need to align
3834 for the outgoing arguments as their alignment is set by the final
3835 rounding for the frame as a whole. */
3836 size += crtl->outgoing_args_size;
3838 /* Allocate space for the fixed frame marker. This space must be
3839 allocated for any function that makes calls or allocates
3840 stack space. */
3841 if (!crtl->is_leaf || size)
3842 size += TARGET_64BIT ? 48 : 32;
3844 /* Finally, round to the preferred stack boundary. */
3845 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3846 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3849 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3850 of memory. If any fpu reg is used in the function, we allocate
3851 such a block here, at the bottom of the frame, just in case it's needed.
3853 If this function is a leaf procedure, then we may choose not
3854 to do a "save" insn. The decision about whether or not
3855 to do this is made in regclass.c. */
3857 static void
3858 pa_output_function_prologue (FILE *file)
3860 /* The function's label and associated .PROC must never be
3861 separated and must be output *after* any profiling declarations
3862 to avoid changing spaces/subspaces within a procedure. */
3863 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3864 fputs ("\t.PROC\n", file);
3866 /* pa_expand_prologue does the dirty work now. We just need
3867 to output the assembler directives which denote the start
3868 of a function. */
3869 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3870 if (crtl->is_leaf)
3871 fputs (",NO_CALLS", file);
3872 else
3873 fputs (",CALLS", file);
3874 if (rp_saved)
3875 fputs (",SAVE_RP", file);
3877 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3878 at the beginning of the frame and that it is used as the frame
3879 pointer for the frame. We do this because our current frame
3880 layout doesn't conform to that specified in the HP runtime
3881 documentation and we need a way to indicate to programs such as
3882 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3883 isn't used by HP compilers but is supported by the assembler.
3884 However, SAVE_SP is supposed to indicate that the previous stack
3885 pointer has been saved in the frame marker. */
3886 if (frame_pointer_needed)
3887 fputs (",SAVE_SP", file);
3889 /* Pass on information about the number of callee register saves
3890 performed in the prologue.
3892 The compiler is supposed to pass the highest register number
3893 saved, the assembler then has to adjust that number before
3894 entering it into the unwind descriptor (to account for any
3895 caller saved registers with lower register numbers than the
3896 first callee saved register). */
3897 if (gr_saved)
3898 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3900 if (fr_saved)
3901 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3903 fputs ("\n\t.ENTRY\n", file);
3905 remove_useless_addtr_insns (0);
3908 void
3909 pa_expand_prologue (void)
3911 int merge_sp_adjust_with_store = 0;
3912 HOST_WIDE_INT size = get_frame_size ();
3913 HOST_WIDE_INT offset;
3914 int i;
3915 rtx tmpreg;
3916 rtx_insn *insn;
3918 gr_saved = 0;
3919 fr_saved = 0;
3920 save_fregs = 0;
3922 /* Compute total size for frame pointer, filler, locals and rounding to
3923 the next word boundary. Similar code appears in pa_compute_frame_size
3924 and must be changed in tandem with this code. */
3925 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3926 if (local_fsize || frame_pointer_needed)
3927 local_fsize += pa_starting_frame_offset ();
3929 actual_fsize = pa_compute_frame_size (size, &save_fregs);
3930 if (flag_stack_usage_info)
3931 current_function_static_stack_size = actual_fsize;
3933 /* Compute a few things we will use often. */
3934 tmpreg = gen_rtx_REG (word_mode, 1);
3936 /* Save RP first. The calling conventions manual states RP will
3937 always be stored into the caller's frame at sp - 20 or sp - 16
3938 depending on which ABI is in use. */
3939 if (df_regs_ever_live_p (2) || crtl->calls_eh_return)
3941 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3942 rp_saved = true;
3944 else
3945 rp_saved = false;
3947 /* Allocate the local frame and set up the frame pointer if needed. */
3948 if (actual_fsize != 0)
3950 if (frame_pointer_needed)
3952 /* Copy the old frame pointer temporarily into %r1. Set up the
3953 new stack pointer, then store away the saved old frame pointer
3954 into the stack at sp and at the same time update the stack
3955 pointer by actual_fsize bytes. Two versions, first
3956 handles small (<8k) frames. The second handles large (>=8k)
3957 frames. */
3958 insn = emit_move_insn (tmpreg, hard_frame_pointer_rtx);
3959 if (DO_FRAME_NOTES)
3960 RTX_FRAME_RELATED_P (insn) = 1;
3962 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3963 if (DO_FRAME_NOTES)
3964 RTX_FRAME_RELATED_P (insn) = 1;
3966 if (VAL_14_BITS_P (actual_fsize))
3967 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3968 else
3970 /* It is incorrect to store the saved frame pointer at *sp,
3971 then increment sp (writes beyond the current stack boundary).
3973 So instead use stwm to store at *sp and post-increment the
3974 stack pointer as an atomic operation. Then increment sp to
3975 finish allocating the new frame. */
3976 HOST_WIDE_INT adjust1 = 8192 - 64;
3977 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3979 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3980 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3981 adjust2, 1);
3984 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3985 we need to store the previous stack pointer (frame pointer)
3986 into the frame marker on targets that use the HP unwind
3987 library. This allows the HP unwind library to be used to
3988 unwind GCC frames. However, we are not fully compatible
3989 with the HP library because our frame layout differs from
3990 that specified in the HP runtime specification.
3992 We don't want a frame note on this instruction as the frame
3993 marker moves during dynamic stack allocation.
3995 This instruction also serves as a blockage to prevent
3996 register spills from being scheduled before the stack
3997 pointer is raised. This is necessary as we store
3998 registers using the frame pointer as a base register,
3999 and the frame pointer is set before sp is raised. */
4000 if (TARGET_HPUX_UNWIND_LIBRARY)
4002 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
4003 GEN_INT (TARGET_64BIT ? -8 : -4));
4005 emit_move_insn (gen_rtx_MEM (word_mode, addr),
4006 hard_frame_pointer_rtx);
4008 else
4009 emit_insn (gen_blockage ());
4011 /* no frame pointer needed. */
4012 else
4014 /* In some cases we can perform the first callee register save
4015 and allocating the stack frame at the same time. If so, just
4016 make a note of it and defer allocating the frame until saving
4017 the callee registers. */
4018 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
4019 merge_sp_adjust_with_store = 1;
4020 /* Can not optimize. Adjust the stack frame by actual_fsize
4021 bytes. */
4022 else
4023 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4024 actual_fsize, 1);
4028 /* Normal register save.
4030 Do not save the frame pointer in the frame_pointer_needed case. It
4031 was done earlier. */
4032 if (frame_pointer_needed)
4034 offset = local_fsize;
4036 /* Saving the EH return data registers in the frame is the simplest
4037 way to get the frame unwind information emitted. We put them
4038 just before the general registers. */
4039 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4041 unsigned int i, regno;
4043 for (i = 0; ; ++i)
4045 regno = EH_RETURN_DATA_REGNO (i);
4046 if (regno == INVALID_REGNUM)
4047 break;
4049 store_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4050 offset += UNITS_PER_WORD;
4054 for (i = 18; i >= 4; i--)
4055 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4057 store_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4058 offset += UNITS_PER_WORD;
4059 gr_saved++;
4061 /* Account for %r3 which is saved in a special place. */
4062 gr_saved++;
4064 /* No frame pointer needed. */
4065 else
4067 offset = local_fsize - actual_fsize;
4069 /* Saving the EH return data registers in the frame is the simplest
4070 way to get the frame unwind information emitted. */
4071 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4073 unsigned int i, regno;
4075 for (i = 0; ; ++i)
4077 regno = EH_RETURN_DATA_REGNO (i);
4078 if (regno == INVALID_REGNUM)
4079 break;
4081 /* If merge_sp_adjust_with_store is nonzero, then we can
4082 optimize the first save. */
4083 if (merge_sp_adjust_with_store)
4085 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
4086 merge_sp_adjust_with_store = 0;
4088 else
4089 store_reg (regno, offset, STACK_POINTER_REGNUM);
4090 offset += UNITS_PER_WORD;
4094 for (i = 18; i >= 3; i--)
4095 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4097 /* If merge_sp_adjust_with_store is nonzero, then we can
4098 optimize the first GR save. */
4099 if (merge_sp_adjust_with_store)
4101 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
4102 merge_sp_adjust_with_store = 0;
4104 else
4105 store_reg (i, offset, STACK_POINTER_REGNUM);
4106 offset += UNITS_PER_WORD;
4107 gr_saved++;
4110 /* If we wanted to merge the SP adjustment with a GR save, but we never
4111 did any GR saves, then just emit the adjustment here. */
4112 if (merge_sp_adjust_with_store)
4113 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4114 actual_fsize, 1);
4117 /* The hppa calling conventions say that %r19, the pic offset
4118 register, is saved at sp - 32 (in this function's frame)
4119 when generating PIC code. FIXME: What is the correct thing
4120 to do for functions which make no calls and allocate no
4121 frame? Do we need to allocate a frame, or can we just omit
4122 the save? For now we'll just omit the save.
4124 We don't want a note on this insn as the frame marker can
4125 move if there is a dynamic stack allocation. */
4126 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
4128 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
4130 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
4134 /* Align pointer properly (doubleword boundary). */
4135 offset = (offset + 7) & ~7;
4137 /* Floating point register store. */
4138 if (save_fregs)
4140 rtx base;
4142 /* First get the frame or stack pointer to the start of the FP register
4143 save area. */
4144 if (frame_pointer_needed)
4146 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4147 base = hard_frame_pointer_rtx;
4149 else
4151 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4152 base = stack_pointer_rtx;
4155 /* Now actually save the FP registers. */
4156 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4158 if (df_regs_ever_live_p (i)
4159 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4161 rtx addr, reg;
4162 rtx_insn *insn;
4163 addr = gen_rtx_MEM (DFmode,
4164 gen_rtx_POST_INC (word_mode, tmpreg));
4165 reg = gen_rtx_REG (DFmode, i);
4166 insn = emit_move_insn (addr, reg);
4167 if (DO_FRAME_NOTES)
4169 RTX_FRAME_RELATED_P (insn) = 1;
4170 if (TARGET_64BIT)
4172 rtx mem = gen_rtx_MEM (DFmode,
4173 plus_constant (Pmode, base,
4174 offset));
4175 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4176 gen_rtx_SET (mem, reg));
4178 else
4180 rtx meml = gen_rtx_MEM (SFmode,
4181 plus_constant (Pmode, base,
4182 offset));
4183 rtx memr = gen_rtx_MEM (SFmode,
4184 plus_constant (Pmode, base,
4185 offset + 4));
4186 rtx regl = gen_rtx_REG (SFmode, i);
4187 rtx regr = gen_rtx_REG (SFmode, i + 1);
4188 rtx setl = gen_rtx_SET (meml, regl);
4189 rtx setr = gen_rtx_SET (memr, regr);
4190 rtvec vec;
4192 RTX_FRAME_RELATED_P (setl) = 1;
4193 RTX_FRAME_RELATED_P (setr) = 1;
4194 vec = gen_rtvec (2, setl, setr);
4195 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4196 gen_rtx_SEQUENCE (VOIDmode, vec));
4199 offset += GET_MODE_SIZE (DFmode);
4200 fr_saved++;
4206 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4207 Handle case where DISP > 8k by using the add_high_const patterns. */
4209 static void
4210 load_reg (int reg, HOST_WIDE_INT disp, int base)
4212 rtx dest = gen_rtx_REG (word_mode, reg);
4213 rtx basereg = gen_rtx_REG (Pmode, base);
4214 rtx src;
4216 if (VAL_14_BITS_P (disp))
4217 src = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
4218 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
4220 rtx delta = GEN_INT (disp);
4221 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4223 emit_move_insn (tmpreg, delta);
4224 if (TARGET_DISABLE_INDEXING)
4226 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4227 src = gen_rtx_MEM (word_mode, tmpreg);
4229 else
4230 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4232 else
4234 rtx delta = GEN_INT (disp);
4235 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
4236 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4238 emit_move_insn (tmpreg, high);
4239 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
4242 emit_move_insn (dest, src);
4245 /* Update the total code bytes output to the text section. */
4247 static void
4248 update_total_code_bytes (unsigned int nbytes)
4250 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
4251 && !IN_NAMED_SECTION_P (cfun->decl))
4253 unsigned int old_total = total_code_bytes;
4255 total_code_bytes += nbytes;
4257 /* Be prepared to handle overflows. */
4258 if (old_total > total_code_bytes)
4259 total_code_bytes = UINT_MAX;
4263 /* This function generates the assembly code for function exit.
4264 Args are as for output_function_prologue ().
4266 The function epilogue should not depend on the current stack
4267 pointer! It should use the frame pointer only. This is mandatory
4268 because of alloca; we also take advantage of it to omit stack
4269 adjustments before returning. */
4271 static void
4272 pa_output_function_epilogue (FILE *file)
4274 rtx_insn *insn = get_last_insn ();
4275 bool extra_nop;
4277 /* pa_expand_epilogue does the dirty work now. We just need
4278 to output the assembler directives which denote the end
4279 of a function.
4281 To make debuggers happy, emit a nop if the epilogue was completely
4282 eliminated due to a volatile call as the last insn in the
4283 current function. That way the return address (in %r2) will
4284 always point to a valid instruction in the current function. */
4286 /* Get the last real insn. */
4287 if (NOTE_P (insn))
4288 insn = prev_real_insn (insn);
4290 /* If it is a sequence, then look inside. */
4291 if (insn && NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
4292 insn = as_a <rtx_sequence *> (PATTERN (insn))-> insn (0);
4294 /* If insn is a CALL_INSN, then it must be a call to a volatile
4295 function (otherwise there would be epilogue insns). */
4296 if (insn && CALL_P (insn))
4298 fputs ("\tnop\n", file);
4299 extra_nop = true;
4301 else
4302 extra_nop = false;
4304 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4306 if (TARGET_SOM && TARGET_GAS)
4308 /* We are done with this subspace except possibly for some additional
4309 debug information. Forget that we are in this subspace to ensure
4310 that the next function is output in its own subspace. */
4311 in_section = NULL;
4312 cfun->machine->in_nsubspa = 2;
4315 /* Thunks do their own insn accounting. */
4316 if (cfun->is_thunk)
4317 return;
4319 if (INSN_ADDRESSES_SET_P ())
4321 last_address = extra_nop ? 4 : 0;
4322 insn = get_last_nonnote_insn ();
4323 if (insn)
4325 last_address += INSN_ADDRESSES (INSN_UID (insn));
4326 if (INSN_P (insn))
4327 last_address += insn_default_length (insn);
4329 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4330 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4332 else
4333 last_address = UINT_MAX;
4335 /* Finally, update the total number of code bytes output so far. */
4336 update_total_code_bytes (last_address);
4339 void
4340 pa_expand_epilogue (void)
4342 rtx tmpreg;
4343 HOST_WIDE_INT offset;
4344 HOST_WIDE_INT ret_off = 0;
4345 int i;
4346 int merge_sp_adjust_with_load = 0;
4348 /* We will use this often. */
4349 tmpreg = gen_rtx_REG (word_mode, 1);
4351 /* Try to restore RP early to avoid load/use interlocks when
4352 RP gets used in the return (bv) instruction. This appears to still
4353 be necessary even when we schedule the prologue and epilogue. */
4354 if (rp_saved)
4356 ret_off = TARGET_64BIT ? -16 : -20;
4357 if (frame_pointer_needed)
4359 load_reg (2, ret_off, HARD_FRAME_POINTER_REGNUM);
4360 ret_off = 0;
4362 else
4364 /* No frame pointer, and stack is smaller than 8k. */
4365 if (VAL_14_BITS_P (ret_off - actual_fsize))
4367 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4368 ret_off = 0;
4373 /* General register restores. */
4374 if (frame_pointer_needed)
4376 offset = local_fsize;
4378 /* If the current function calls __builtin_eh_return, then we need
4379 to restore the saved EH data registers. */
4380 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4382 unsigned int i, regno;
4384 for (i = 0; ; ++i)
4386 regno = EH_RETURN_DATA_REGNO (i);
4387 if (regno == INVALID_REGNUM)
4388 break;
4390 load_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4391 offset += UNITS_PER_WORD;
4395 for (i = 18; i >= 4; i--)
4396 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4398 load_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4399 offset += UNITS_PER_WORD;
4402 else
4404 offset = local_fsize - actual_fsize;
4406 /* If the current function calls __builtin_eh_return, then we need
4407 to restore the saved EH data registers. */
4408 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4410 unsigned int i, regno;
4412 for (i = 0; ; ++i)
4414 regno = EH_RETURN_DATA_REGNO (i);
4415 if (regno == INVALID_REGNUM)
4416 break;
4418 /* Only for the first load.
4419 merge_sp_adjust_with_load holds the register load
4420 with which we will merge the sp adjustment. */
4421 if (merge_sp_adjust_with_load == 0
4422 && local_fsize == 0
4423 && VAL_14_BITS_P (-actual_fsize))
4424 merge_sp_adjust_with_load = regno;
4425 else
4426 load_reg (regno, offset, STACK_POINTER_REGNUM);
4427 offset += UNITS_PER_WORD;
4431 for (i = 18; i >= 3; i--)
4433 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4435 /* Only for the first load.
4436 merge_sp_adjust_with_load holds the register load
4437 with which we will merge the sp adjustment. */
4438 if (merge_sp_adjust_with_load == 0
4439 && local_fsize == 0
4440 && VAL_14_BITS_P (-actual_fsize))
4441 merge_sp_adjust_with_load = i;
4442 else
4443 load_reg (i, offset, STACK_POINTER_REGNUM);
4444 offset += UNITS_PER_WORD;
4449 /* Align pointer properly (doubleword boundary). */
4450 offset = (offset + 7) & ~7;
4452 /* FP register restores. */
4453 if (save_fregs)
4455 /* Adjust the register to index off of. */
4456 if (frame_pointer_needed)
4457 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4458 else
4459 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4461 /* Actually do the restores now. */
4462 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4463 if (df_regs_ever_live_p (i)
4464 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4466 rtx src = gen_rtx_MEM (DFmode,
4467 gen_rtx_POST_INC (word_mode, tmpreg));
4468 rtx dest = gen_rtx_REG (DFmode, i);
4469 emit_move_insn (dest, src);
4473 /* Emit a blockage insn here to keep these insns from being moved to
4474 an earlier spot in the epilogue, or into the main instruction stream.
4476 This is necessary as we must not cut the stack back before all the
4477 restores are finished. */
4478 emit_insn (gen_blockage ());
4480 /* Reset stack pointer (and possibly frame pointer). The stack
4481 pointer is initially set to fp + 64 to avoid a race condition. */
4482 if (frame_pointer_needed)
4484 rtx delta = GEN_INT (-64);
4486 set_reg_plus_d (STACK_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM, 64, 0);
4487 emit_insn (gen_pre_load (hard_frame_pointer_rtx,
4488 stack_pointer_rtx, delta));
4490 /* If we were deferring a callee register restore, do it now. */
4491 else if (merge_sp_adjust_with_load)
4493 rtx delta = GEN_INT (-actual_fsize);
4494 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4496 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4498 else if (actual_fsize != 0)
4499 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4500 - actual_fsize, 0);
4502 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4503 frame greater than 8k), do so now. */
4504 if (ret_off != 0)
4505 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4507 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4509 rtx sa = EH_RETURN_STACKADJ_RTX;
4511 emit_insn (gen_blockage ());
4512 emit_insn (TARGET_64BIT
4513 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4514 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4518 bool
4519 pa_can_use_return_insn (void)
4521 if (!reload_completed)
4522 return false;
4524 if (frame_pointer_needed)
4525 return false;
4527 if (df_regs_ever_live_p (2))
4528 return false;
4530 if (crtl->profile)
4531 return false;
4533 return pa_compute_frame_size (get_frame_size (), 0) == 0;
4537 hppa_pic_save_rtx (void)
4539 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4542 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4543 #define NO_DEFERRED_PROFILE_COUNTERS 0
4544 #endif
4547 /* Vector of funcdef numbers. */
4548 static vec<int> funcdef_nos;
4550 /* Output deferred profile counters. */
4551 static void
4552 output_deferred_profile_counters (void)
4554 unsigned int i;
4555 int align, n;
4557 if (funcdef_nos.is_empty ())
4558 return;
4560 switch_to_section (data_section);
4561 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4562 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4564 for (i = 0; funcdef_nos.iterate (i, &n); i++)
4566 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4567 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4570 funcdef_nos.release ();
4573 void
4574 hppa_profile_hook (int label_no)
4576 /* We use SImode for the address of the function in both 32 and
4577 64-bit code to avoid having to provide DImode versions of the
4578 lcla2 and load_offset_label_address insn patterns. */
4579 rtx reg = gen_reg_rtx (SImode);
4580 rtx_code_label *label_rtx = gen_label_rtx ();
4581 rtx mcount = gen_rtx_MEM (Pmode, gen_rtx_SYMBOL_REF (Pmode, "_mcount"));
4582 int reg_parm_stack_space = REG_PARM_STACK_SPACE (NULL_TREE);
4583 rtx arg_bytes, begin_label_rtx;
4584 rtx_insn *call_insn;
4585 char begin_label_name[16];
4586 bool use_mcount_pcrel_call;
4588 /* If we can reach _mcount with a pc-relative call, we can optimize
4589 loading the address of the current function. This requires linker
4590 long branch stub support. */
4591 if (!TARGET_PORTABLE_RUNTIME
4592 && !TARGET_LONG_CALLS
4593 && (TARGET_SOM || flag_function_sections))
4594 use_mcount_pcrel_call = TRUE;
4595 else
4596 use_mcount_pcrel_call = FALSE;
4598 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4599 label_no);
4600 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4602 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4604 if (!use_mcount_pcrel_call)
4606 /* The address of the function is loaded into %r25 with an instruction-
4607 relative sequence that avoids the use of relocations. The sequence
4608 is split so that the load_offset_label_address instruction can
4609 occupy the delay slot of the call to _mcount. */
4610 if (TARGET_PA_20)
4611 emit_insn (gen_lcla2 (reg, label_rtx));
4612 else
4613 emit_insn (gen_lcla1 (reg, label_rtx));
4615 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4616 reg,
4617 begin_label_rtx,
4618 label_rtx));
4621 if (!NO_DEFERRED_PROFILE_COUNTERS)
4623 rtx count_label_rtx, addr, r24;
4624 char count_label_name[16];
4626 funcdef_nos.safe_push (label_no);
4627 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4628 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode,
4629 ggc_strdup (count_label_name));
4631 addr = force_reg (Pmode, count_label_rtx);
4632 r24 = gen_rtx_REG (Pmode, 24);
4633 emit_move_insn (r24, addr);
4635 arg_bytes = GEN_INT (TARGET_64BIT ? 24 : 12);
4636 if (use_mcount_pcrel_call)
4637 call_insn = emit_call_insn (gen_call_mcount (mcount, arg_bytes,
4638 begin_label_rtx));
4639 else
4640 call_insn = emit_call_insn (gen_call (mcount, arg_bytes));
4642 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4644 else
4646 arg_bytes = GEN_INT (TARGET_64BIT ? 16 : 8);
4647 if (use_mcount_pcrel_call)
4648 call_insn = emit_call_insn (gen_call_mcount (mcount, arg_bytes,
4649 begin_label_rtx));
4650 else
4651 call_insn = emit_call_insn (gen_call (mcount, arg_bytes));
4654 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4655 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4657 /* Indicate the _mcount call cannot throw, nor will it execute a
4658 non-local goto. */
4659 make_reg_eh_region_note_nothrow_nononlocal (call_insn);
4661 /* Allocate space for fixed arguments. */
4662 if (reg_parm_stack_space > crtl->outgoing_args_size)
4663 crtl->outgoing_args_size = reg_parm_stack_space;
4666 /* Fetch the return address for the frame COUNT steps up from
4667 the current frame, after the prologue. FRAMEADDR is the
4668 frame pointer of the COUNT frame.
4670 We want to ignore any export stub remnants here. To handle this,
4671 we examine the code at the return address, and if it is an export
4672 stub, we return a memory rtx for the stub return address stored
4673 at frame-24.
4675 The value returned is used in two different ways:
4677 1. To find a function's caller.
4679 2. To change the return address for a function.
4681 This function handles most instances of case 1; however, it will
4682 fail if there are two levels of stubs to execute on the return
4683 path. The only way I believe that can happen is if the return value
4684 needs a parameter relocation, which never happens for C code.
4686 This function handles most instances of case 2; however, it will
4687 fail if we did not originally have stub code on the return path
4688 but will need stub code on the new return path. This can happen if
4689 the caller & callee are both in the main program, but the new
4690 return location is in a shared library. */
4693 pa_return_addr_rtx (int count, rtx frameaddr)
4695 rtx label;
4696 rtx rp;
4697 rtx saved_rp;
4698 rtx ins;
4700 /* The instruction stream at the return address of a PA1.X export stub is:
4702 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4703 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4704 0x00011820 | stub+16: mtsp r1,sr0
4705 0xe0400002 | stub+20: be,n 0(sr0,rp)
4707 0xe0400002 must be specified as -532676606 so that it won't be
4708 rejected as an invalid immediate operand on 64-bit hosts.
4710 The instruction stream at the return address of a PA2.0 export stub is:
4712 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4713 0xe840d002 | stub+12: bve,n (rp)
4716 HOST_WIDE_INT insns[4];
4717 int i, len;
4719 if (count != 0)
4720 return NULL_RTX;
4722 rp = get_hard_reg_initial_val (Pmode, 2);
4724 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4725 return rp;
4727 /* If there is no export stub then just use the value saved from
4728 the return pointer register. */
4730 saved_rp = gen_reg_rtx (Pmode);
4731 emit_move_insn (saved_rp, rp);
4733 /* Get pointer to the instruction stream. We have to mask out the
4734 privilege level from the two low order bits of the return address
4735 pointer here so that ins will point to the start of the first
4736 instruction that would have been executed if we returned. */
4737 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4738 label = gen_label_rtx ();
4740 if (TARGET_PA_20)
4742 insns[0] = 0x4bc23fd1;
4743 insns[1] = -398405630;
4744 len = 2;
4746 else
4748 insns[0] = 0x4bc23fd1;
4749 insns[1] = 0x004010a1;
4750 insns[2] = 0x00011820;
4751 insns[3] = -532676606;
4752 len = 4;
4755 /* Check the instruction stream at the normal return address for the
4756 export stub. If it is an export stub, than our return address is
4757 really in -24[frameaddr]. */
4759 for (i = 0; i < len; i++)
4761 rtx op0 = gen_rtx_MEM (SImode, plus_constant (Pmode, ins, i * 4));
4762 rtx op1 = GEN_INT (insns[i]);
4763 emit_cmp_and_jump_insns (op0, op1, NE, NULL, SImode, 0, label);
4766 /* Here we know that our return address points to an export
4767 stub. We don't want to return the address of the export stub,
4768 but rather the return address of the export stub. That return
4769 address is stored at -24[frameaddr]. */
4771 emit_move_insn (saved_rp,
4772 gen_rtx_MEM (Pmode,
4773 memory_address (Pmode,
4774 plus_constant (Pmode, frameaddr,
4775 -24))));
4777 emit_label (label);
4779 return saved_rp;
4782 void
4783 pa_emit_bcond_fp (rtx operands[])
4785 enum rtx_code code = GET_CODE (operands[0]);
4786 rtx operand0 = operands[1];
4787 rtx operand1 = operands[2];
4788 rtx label = operands[3];
4790 emit_insn (gen_rtx_SET (gen_rtx_REG (CCFPmode, 0),
4791 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1)));
4793 emit_jump_insn (gen_rtx_SET (pc_rtx,
4794 gen_rtx_IF_THEN_ELSE (VOIDmode,
4795 gen_rtx_fmt_ee (NE,
4796 VOIDmode,
4797 gen_rtx_REG (CCFPmode, 0),
4798 const0_rtx),
4799 gen_rtx_LABEL_REF (VOIDmode, label),
4800 pc_rtx)));
4804 /* Adjust the cost of a scheduling dependency. Return the new cost of
4805 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4807 static int
4808 pa_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
4809 unsigned int)
4811 enum attr_type attr_type;
4813 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4814 true dependencies as they are described with bypasses now. */
4815 if (pa_cpu >= PROCESSOR_8000 || dep_type == 0)
4816 return cost;
4818 if (! recog_memoized (insn))
4819 return 0;
4821 attr_type = get_attr_type (insn);
4823 switch (dep_type)
4825 case REG_DEP_ANTI:
4826 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4827 cycles later. */
4829 if (attr_type == TYPE_FPLOAD)
4831 rtx pat = PATTERN (insn);
4832 rtx dep_pat = PATTERN (dep_insn);
4833 if (GET_CODE (pat) == PARALLEL)
4835 /* This happens for the fldXs,mb patterns. */
4836 pat = XVECEXP (pat, 0, 0);
4838 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4839 /* If this happens, we have to extend this to schedule
4840 optimally. Return 0 for now. */
4841 return 0;
4843 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4845 if (! recog_memoized (dep_insn))
4846 return 0;
4847 switch (get_attr_type (dep_insn))
4849 case TYPE_FPALU:
4850 case TYPE_FPMULSGL:
4851 case TYPE_FPMULDBL:
4852 case TYPE_FPDIVSGL:
4853 case TYPE_FPDIVDBL:
4854 case TYPE_FPSQRTSGL:
4855 case TYPE_FPSQRTDBL:
4856 /* A fpload can't be issued until one cycle before a
4857 preceding arithmetic operation has finished if
4858 the target of the fpload is any of the sources
4859 (or destination) of the arithmetic operation. */
4860 return insn_default_latency (dep_insn) - 1;
4862 default:
4863 return 0;
4867 else if (attr_type == TYPE_FPALU)
4869 rtx pat = PATTERN (insn);
4870 rtx dep_pat = PATTERN (dep_insn);
4871 if (GET_CODE (pat) == PARALLEL)
4873 /* This happens for the fldXs,mb patterns. */
4874 pat = XVECEXP (pat, 0, 0);
4876 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4877 /* If this happens, we have to extend this to schedule
4878 optimally. Return 0 for now. */
4879 return 0;
4881 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4883 if (! recog_memoized (dep_insn))
4884 return 0;
4885 switch (get_attr_type (dep_insn))
4887 case TYPE_FPDIVSGL:
4888 case TYPE_FPDIVDBL:
4889 case TYPE_FPSQRTSGL:
4890 case TYPE_FPSQRTDBL:
4891 /* An ALU flop can't be issued until two cycles before a
4892 preceding divide or sqrt operation has finished if
4893 the target of the ALU flop is any of the sources
4894 (or destination) of the divide or sqrt operation. */
4895 return insn_default_latency (dep_insn) - 2;
4897 default:
4898 return 0;
4903 /* For other anti dependencies, the cost is 0. */
4904 return 0;
4906 case REG_DEP_OUTPUT:
4907 /* Output dependency; DEP_INSN writes a register that INSN writes some
4908 cycles later. */
4909 if (attr_type == TYPE_FPLOAD)
4911 rtx pat = PATTERN (insn);
4912 rtx dep_pat = PATTERN (dep_insn);
4913 if (GET_CODE (pat) == PARALLEL)
4915 /* This happens for the fldXs,mb patterns. */
4916 pat = XVECEXP (pat, 0, 0);
4918 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4919 /* If this happens, we have to extend this to schedule
4920 optimally. Return 0 for now. */
4921 return 0;
4923 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4925 if (! recog_memoized (dep_insn))
4926 return 0;
4927 switch (get_attr_type (dep_insn))
4929 case TYPE_FPALU:
4930 case TYPE_FPMULSGL:
4931 case TYPE_FPMULDBL:
4932 case TYPE_FPDIVSGL:
4933 case TYPE_FPDIVDBL:
4934 case TYPE_FPSQRTSGL:
4935 case TYPE_FPSQRTDBL:
4936 /* A fpload can't be issued until one cycle before a
4937 preceding arithmetic operation has finished if
4938 the target of the fpload is the destination of the
4939 arithmetic operation.
4941 Exception: For PA7100LC, PA7200 and PA7300, the cost
4942 is 3 cycles, unless they bundle together. We also
4943 pay the penalty if the second insn is a fpload. */
4944 return insn_default_latency (dep_insn) - 1;
4946 default:
4947 return 0;
4951 else if (attr_type == TYPE_FPALU)
4953 rtx pat = PATTERN (insn);
4954 rtx dep_pat = PATTERN (dep_insn);
4955 if (GET_CODE (pat) == PARALLEL)
4957 /* This happens for the fldXs,mb patterns. */
4958 pat = XVECEXP (pat, 0, 0);
4960 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4961 /* If this happens, we have to extend this to schedule
4962 optimally. Return 0 for now. */
4963 return 0;
4965 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4967 if (! recog_memoized (dep_insn))
4968 return 0;
4969 switch (get_attr_type (dep_insn))
4971 case TYPE_FPDIVSGL:
4972 case TYPE_FPDIVDBL:
4973 case TYPE_FPSQRTSGL:
4974 case TYPE_FPSQRTDBL:
4975 /* An ALU flop can't be issued until two cycles before a
4976 preceding divide or sqrt operation has finished if
4977 the target of the ALU flop is also the target of
4978 the divide or sqrt operation. */
4979 return insn_default_latency (dep_insn) - 2;
4981 default:
4982 return 0;
4987 /* For other output dependencies, the cost is 0. */
4988 return 0;
4990 default:
4991 gcc_unreachable ();
4995 /* Adjust scheduling priorities. We use this to try and keep addil
4996 and the next use of %r1 close together. */
4997 static int
4998 pa_adjust_priority (rtx_insn *insn, int priority)
5000 rtx set = single_set (insn);
5001 rtx src, dest;
5002 if (set)
5004 src = SET_SRC (set);
5005 dest = SET_DEST (set);
5006 if (GET_CODE (src) == LO_SUM
5007 && symbolic_operand (XEXP (src, 1), VOIDmode)
5008 && ! read_only_operand (XEXP (src, 1), VOIDmode))
5009 priority >>= 3;
5011 else if (GET_CODE (src) == MEM
5012 && GET_CODE (XEXP (src, 0)) == LO_SUM
5013 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
5014 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
5015 priority >>= 1;
5017 else if (GET_CODE (dest) == MEM
5018 && GET_CODE (XEXP (dest, 0)) == LO_SUM
5019 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
5020 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
5021 priority >>= 3;
5023 return priority;
5026 /* The 700 can only issue a single insn at a time.
5027 The 7XXX processors can issue two insns at a time.
5028 The 8000 can issue 4 insns at a time. */
5029 static int
5030 pa_issue_rate (void)
5032 switch (pa_cpu)
5034 case PROCESSOR_700: return 1;
5035 case PROCESSOR_7100: return 2;
5036 case PROCESSOR_7100LC: return 2;
5037 case PROCESSOR_7200: return 2;
5038 case PROCESSOR_7300: return 2;
5039 case PROCESSOR_8000: return 4;
5041 default:
5042 gcc_unreachable ();
5048 /* Return any length plus adjustment needed by INSN which already has
5049 its length computed as LENGTH. Return LENGTH if no adjustment is
5050 necessary.
5052 Also compute the length of an inline block move here as it is too
5053 complicated to express as a length attribute in pa.md. */
5055 pa_adjust_insn_length (rtx_insn *insn, int length)
5057 rtx pat = PATTERN (insn);
5059 /* If length is negative or undefined, provide initial length. */
5060 if ((unsigned int) length >= INT_MAX)
5062 if (GET_CODE (pat) == SEQUENCE)
5063 insn = as_a <rtx_insn *> (XVECEXP (pat, 0, 0));
5065 switch (get_attr_type (insn))
5067 case TYPE_MILLI:
5068 length = pa_attr_length_millicode_call (insn);
5069 break;
5070 case TYPE_CALL:
5071 length = pa_attr_length_call (insn, 0);
5072 break;
5073 case TYPE_SIBCALL:
5074 length = pa_attr_length_call (insn, 1);
5075 break;
5076 case TYPE_DYNCALL:
5077 length = pa_attr_length_indirect_call (insn);
5078 break;
5079 case TYPE_SH_FUNC_ADRS:
5080 length = pa_attr_length_millicode_call (insn) + 20;
5081 break;
5082 default:
5083 gcc_unreachable ();
5087 /* Block move pattern. */
5088 if (NONJUMP_INSN_P (insn)
5089 && GET_CODE (pat) == PARALLEL
5090 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
5091 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
5092 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
5093 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
5094 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
5095 length += compute_movmem_length (insn) - 4;
5096 /* Block clear pattern. */
5097 else if (NONJUMP_INSN_P (insn)
5098 && GET_CODE (pat) == PARALLEL
5099 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
5100 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
5101 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
5102 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
5103 length += compute_clrmem_length (insn) - 4;
5104 /* Conditional branch with an unfilled delay slot. */
5105 else if (JUMP_P (insn) && ! simplejump_p (insn))
5107 /* Adjust a short backwards conditional with an unfilled delay slot. */
5108 if (GET_CODE (pat) == SET
5109 && length == 4
5110 && JUMP_LABEL (insn) != NULL_RTX
5111 && ! forward_branch_p (insn))
5112 length += 4;
5113 else if (GET_CODE (pat) == PARALLEL
5114 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
5115 && length == 4)
5116 length += 4;
5117 /* Adjust dbra insn with short backwards conditional branch with
5118 unfilled delay slot -- only for case where counter is in a
5119 general register register. */
5120 else if (GET_CODE (pat) == PARALLEL
5121 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
5122 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
5123 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
5124 && length == 4
5125 && ! forward_branch_p (insn))
5126 length += 4;
5128 return length;
5131 /* Implement the TARGET_PRINT_OPERAND_PUNCT_VALID_P hook. */
5133 static bool
5134 pa_print_operand_punct_valid_p (unsigned char code)
5136 if (code == '@'
5137 || code == '#'
5138 || code == '*'
5139 || code == '^')
5140 return true;
5142 return false;
5145 /* Print operand X (an rtx) in assembler syntax to file FILE.
5146 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
5147 For `%' followed by punctuation, CODE is the punctuation and X is null. */
5149 void
5150 pa_print_operand (FILE *file, rtx x, int code)
5152 switch (code)
5154 case '#':
5155 /* Output a 'nop' if there's nothing for the delay slot. */
5156 if (dbr_sequence_length () == 0)
5157 fputs ("\n\tnop", file);
5158 return;
5159 case '*':
5160 /* Output a nullification completer if there's nothing for the */
5161 /* delay slot or nullification is requested. */
5162 if (dbr_sequence_length () == 0 ||
5163 (final_sequence &&
5164 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
5165 fputs (",n", file);
5166 return;
5167 case 'R':
5168 /* Print out the second register name of a register pair.
5169 I.e., R (6) => 7. */
5170 fputs (reg_names[REGNO (x) + 1], file);
5171 return;
5172 case 'r':
5173 /* A register or zero. */
5174 if (x == const0_rtx
5175 || (x == CONST0_RTX (DFmode))
5176 || (x == CONST0_RTX (SFmode)))
5178 fputs ("%r0", file);
5179 return;
5181 else
5182 break;
5183 case 'f':
5184 /* A register or zero (floating point). */
5185 if (x == const0_rtx
5186 || (x == CONST0_RTX (DFmode))
5187 || (x == CONST0_RTX (SFmode)))
5189 fputs ("%fr0", file);
5190 return;
5192 else
5193 break;
5194 case 'A':
5196 rtx xoperands[2];
5198 xoperands[0] = XEXP (XEXP (x, 0), 0);
5199 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
5200 pa_output_global_address (file, xoperands[1], 0);
5201 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
5202 return;
5205 case 'C': /* Plain (C)ondition */
5206 case 'X':
5207 switch (GET_CODE (x))
5209 case EQ:
5210 fputs ("=", file); break;
5211 case NE:
5212 fputs ("<>", file); break;
5213 case GT:
5214 fputs (">", file); break;
5215 case GE:
5216 fputs (">=", file); break;
5217 case GEU:
5218 fputs (">>=", file); break;
5219 case GTU:
5220 fputs (">>", file); break;
5221 case LT:
5222 fputs ("<", file); break;
5223 case LE:
5224 fputs ("<=", file); break;
5225 case LEU:
5226 fputs ("<<=", file); break;
5227 case LTU:
5228 fputs ("<<", file); break;
5229 default:
5230 gcc_unreachable ();
5232 return;
5233 case 'N': /* Condition, (N)egated */
5234 switch (GET_CODE (x))
5236 case EQ:
5237 fputs ("<>", file); break;
5238 case NE:
5239 fputs ("=", file); break;
5240 case GT:
5241 fputs ("<=", file); break;
5242 case GE:
5243 fputs ("<", file); break;
5244 case GEU:
5245 fputs ("<<", file); break;
5246 case GTU:
5247 fputs ("<<=", file); break;
5248 case LT:
5249 fputs (">=", file); break;
5250 case LE:
5251 fputs (">", file); break;
5252 case LEU:
5253 fputs (">>", file); break;
5254 case LTU:
5255 fputs (">>=", file); break;
5256 default:
5257 gcc_unreachable ();
5259 return;
5260 /* For floating point comparisons. Note that the output
5261 predicates are the complement of the desired mode. The
5262 conditions for GT, GE, LT, LE and LTGT cause an invalid
5263 operation exception if the result is unordered and this
5264 exception is enabled in the floating-point status register. */
5265 case 'Y':
5266 switch (GET_CODE (x))
5268 case EQ:
5269 fputs ("!=", file); break;
5270 case NE:
5271 fputs ("=", file); break;
5272 case GT:
5273 fputs ("!>", file); break;
5274 case GE:
5275 fputs ("!>=", file); break;
5276 case LT:
5277 fputs ("!<", file); break;
5278 case LE:
5279 fputs ("!<=", file); break;
5280 case LTGT:
5281 fputs ("!<>", file); break;
5282 case UNLE:
5283 fputs ("!?<=", file); break;
5284 case UNLT:
5285 fputs ("!?<", file); break;
5286 case UNGE:
5287 fputs ("!?>=", file); break;
5288 case UNGT:
5289 fputs ("!?>", file); break;
5290 case UNEQ:
5291 fputs ("!?=", file); break;
5292 case UNORDERED:
5293 fputs ("!?", file); break;
5294 case ORDERED:
5295 fputs ("?", file); break;
5296 default:
5297 gcc_unreachable ();
5299 return;
5300 case 'S': /* Condition, operands are (S)wapped. */
5301 switch (GET_CODE (x))
5303 case EQ:
5304 fputs ("=", file); break;
5305 case NE:
5306 fputs ("<>", file); break;
5307 case GT:
5308 fputs ("<", file); break;
5309 case GE:
5310 fputs ("<=", file); break;
5311 case GEU:
5312 fputs ("<<=", file); break;
5313 case GTU:
5314 fputs ("<<", file); break;
5315 case LT:
5316 fputs (">", file); break;
5317 case LE:
5318 fputs (">=", file); break;
5319 case LEU:
5320 fputs (">>=", file); break;
5321 case LTU:
5322 fputs (">>", file); break;
5323 default:
5324 gcc_unreachable ();
5326 return;
5327 case 'B': /* Condition, (B)oth swapped and negate. */
5328 switch (GET_CODE (x))
5330 case EQ:
5331 fputs ("<>", file); break;
5332 case NE:
5333 fputs ("=", file); break;
5334 case GT:
5335 fputs (">=", file); break;
5336 case GE:
5337 fputs (">", file); break;
5338 case GEU:
5339 fputs (">>", file); break;
5340 case GTU:
5341 fputs (">>=", file); break;
5342 case LT:
5343 fputs ("<=", file); break;
5344 case LE:
5345 fputs ("<", file); break;
5346 case LEU:
5347 fputs ("<<", file); break;
5348 case LTU:
5349 fputs ("<<=", file); break;
5350 default:
5351 gcc_unreachable ();
5353 return;
5354 case 'k':
5355 gcc_assert (GET_CODE (x) == CONST_INT);
5356 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
5357 return;
5358 case 'Q':
5359 gcc_assert (GET_CODE (x) == CONST_INT);
5360 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5361 return;
5362 case 'L':
5363 gcc_assert (GET_CODE (x) == CONST_INT);
5364 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5365 return;
5366 case 'o':
5367 gcc_assert (GET_CODE (x) == CONST_INT
5368 && (INTVAL (x) == 1 || INTVAL (x) == 2 || INTVAL (x) == 3));
5369 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5370 return;
5371 case 'O':
5372 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
5373 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5374 return;
5375 case 'p':
5376 gcc_assert (GET_CODE (x) == CONST_INT);
5377 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5378 return;
5379 case 'P':
5380 gcc_assert (GET_CODE (x) == CONST_INT);
5381 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5382 return;
5383 case 'I':
5384 if (GET_CODE (x) == CONST_INT)
5385 fputs ("i", file);
5386 return;
5387 case 'M':
5388 case 'F':
5389 switch (GET_CODE (XEXP (x, 0)))
5391 case PRE_DEC:
5392 case PRE_INC:
5393 if (ASSEMBLER_DIALECT == 0)
5394 fputs ("s,mb", file);
5395 else
5396 fputs (",mb", file);
5397 break;
5398 case POST_DEC:
5399 case POST_INC:
5400 if (ASSEMBLER_DIALECT == 0)
5401 fputs ("s,ma", file);
5402 else
5403 fputs (",ma", file);
5404 break;
5405 case PLUS:
5406 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5407 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5409 if (ASSEMBLER_DIALECT == 0)
5410 fputs ("x", file);
5412 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5413 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5415 if (ASSEMBLER_DIALECT == 0)
5416 fputs ("x,s", file);
5417 else
5418 fputs (",s", file);
5420 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5421 fputs ("s", file);
5422 break;
5423 default:
5424 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5425 fputs ("s", file);
5426 break;
5428 return;
5429 case 'G':
5430 pa_output_global_address (file, x, 0);
5431 return;
5432 case 'H':
5433 pa_output_global_address (file, x, 1);
5434 return;
5435 case 0: /* Don't do anything special */
5436 break;
5437 case 'Z':
5439 unsigned op[3];
5440 compute_zdepwi_operands (INTVAL (x), op);
5441 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5442 return;
5444 case 'z':
5446 unsigned op[3];
5447 compute_zdepdi_operands (INTVAL (x), op);
5448 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5449 return;
5451 case 'c':
5452 /* We can get here from a .vtable_inherit due to our
5453 CONSTANT_ADDRESS_P rejecting perfectly good constant
5454 addresses. */
5455 break;
5456 default:
5457 gcc_unreachable ();
5459 if (GET_CODE (x) == REG)
5461 fputs (reg_names [REGNO (x)], file);
5462 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5464 fputs ("R", file);
5465 return;
5467 if (FP_REG_P (x)
5468 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5469 && (REGNO (x) & 1) == 0)
5470 fputs ("L", file);
5472 else if (GET_CODE (x) == MEM)
5474 int size = GET_MODE_SIZE (GET_MODE (x));
5475 rtx base = NULL_RTX;
5476 switch (GET_CODE (XEXP (x, 0)))
5478 case PRE_DEC:
5479 case POST_DEC:
5480 base = XEXP (XEXP (x, 0), 0);
5481 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5482 break;
5483 case PRE_INC:
5484 case POST_INC:
5485 base = XEXP (XEXP (x, 0), 0);
5486 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5487 break;
5488 case PLUS:
5489 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5490 fprintf (file, "%s(%s)",
5491 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5492 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5493 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5494 fprintf (file, "%s(%s)",
5495 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5496 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5497 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5498 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5500 /* Because the REG_POINTER flag can get lost during reload,
5501 pa_legitimate_address_p canonicalizes the order of the
5502 index and base registers in the combined move patterns. */
5503 rtx base = XEXP (XEXP (x, 0), 1);
5504 rtx index = XEXP (XEXP (x, 0), 0);
5506 fprintf (file, "%s(%s)",
5507 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5509 else
5510 output_address (GET_MODE (x), XEXP (x, 0));
5511 break;
5512 default:
5513 output_address (GET_MODE (x), XEXP (x, 0));
5514 break;
5517 else
5518 output_addr_const (file, x);
5521 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5523 void
5524 pa_output_global_address (FILE *file, rtx x, int round_constant)
5527 /* Imagine (high (const (plus ...))). */
5528 if (GET_CODE (x) == HIGH)
5529 x = XEXP (x, 0);
5531 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5532 output_addr_const (file, x);
5533 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5535 output_addr_const (file, x);
5536 fputs ("-$global$", file);
5538 else if (GET_CODE (x) == CONST)
5540 const char *sep = "";
5541 int offset = 0; /* assembler wants -$global$ at end */
5542 rtx base = NULL_RTX;
5544 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5546 case LABEL_REF:
5547 case SYMBOL_REF:
5548 base = XEXP (XEXP (x, 0), 0);
5549 output_addr_const (file, base);
5550 break;
5551 case CONST_INT:
5552 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5553 break;
5554 default:
5555 gcc_unreachable ();
5558 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5560 case LABEL_REF:
5561 case SYMBOL_REF:
5562 base = XEXP (XEXP (x, 0), 1);
5563 output_addr_const (file, base);
5564 break;
5565 case CONST_INT:
5566 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5567 break;
5568 default:
5569 gcc_unreachable ();
5572 /* How bogus. The compiler is apparently responsible for
5573 rounding the constant if it uses an LR field selector.
5575 The linker and/or assembler seem a better place since
5576 they have to do this kind of thing already.
5578 If we fail to do this, HP's optimizing linker may eliminate
5579 an addil, but not update the ldw/stw/ldo instruction that
5580 uses the result of the addil. */
5581 if (round_constant)
5582 offset = ((offset + 0x1000) & ~0x1fff);
5584 switch (GET_CODE (XEXP (x, 0)))
5586 case PLUS:
5587 if (offset < 0)
5589 offset = -offset;
5590 sep = "-";
5592 else
5593 sep = "+";
5594 break;
5596 case MINUS:
5597 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5598 sep = "-";
5599 break;
5601 default:
5602 gcc_unreachable ();
5605 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5606 fputs ("-$global$", file);
5607 if (offset)
5608 fprintf (file, "%s%d", sep, offset);
5610 else
5611 output_addr_const (file, x);
5614 /* Output boilerplate text to appear at the beginning of the file.
5615 There are several possible versions. */
5616 #define aputs(x) fputs(x, asm_out_file)
5617 static inline void
5618 pa_file_start_level (void)
5620 if (TARGET_64BIT)
5621 aputs ("\t.LEVEL 2.0w\n");
5622 else if (TARGET_PA_20)
5623 aputs ("\t.LEVEL 2.0\n");
5624 else if (TARGET_PA_11)
5625 aputs ("\t.LEVEL 1.1\n");
5626 else
5627 aputs ("\t.LEVEL 1.0\n");
5630 static inline void
5631 pa_file_start_space (int sortspace)
5633 aputs ("\t.SPACE $PRIVATE$");
5634 if (sortspace)
5635 aputs (",SORT=16");
5636 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31");
5637 if (flag_tm)
5638 aputs ("\n\t.SUBSPA $TM_CLONE_TABLE$,QUAD=1,ALIGN=8,ACCESS=31");
5639 aputs ("\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5640 "\n\t.SPACE $TEXT$");
5641 if (sortspace)
5642 aputs (",SORT=8");
5643 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5644 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5647 static inline void
5648 pa_file_start_file (int want_version)
5650 if (write_symbols != NO_DEBUG)
5652 output_file_directive (asm_out_file, main_input_filename);
5653 if (want_version)
5654 aputs ("\t.version\t\"01.01\"\n");
5658 static inline void
5659 pa_file_start_mcount (const char *aswhat)
5661 if (profile_flag)
5662 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5665 static void
5666 pa_elf_file_start (void)
5668 pa_file_start_level ();
5669 pa_file_start_mcount ("ENTRY");
5670 pa_file_start_file (0);
5673 static void
5674 pa_som_file_start (void)
5676 pa_file_start_level ();
5677 pa_file_start_space (0);
5678 aputs ("\t.IMPORT $global$,DATA\n"
5679 "\t.IMPORT $$dyncall,MILLICODE\n");
5680 pa_file_start_mcount ("CODE");
5681 pa_file_start_file (0);
5684 static void
5685 pa_linux_file_start (void)
5687 pa_file_start_file (1);
5688 pa_file_start_level ();
5689 pa_file_start_mcount ("CODE");
5692 static void
5693 pa_hpux64_gas_file_start (void)
5695 pa_file_start_level ();
5696 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5697 if (profile_flag)
5698 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5699 #endif
5700 pa_file_start_file (1);
5703 static void
5704 pa_hpux64_hpas_file_start (void)
5706 pa_file_start_level ();
5707 pa_file_start_space (1);
5708 pa_file_start_mcount ("CODE");
5709 pa_file_start_file (0);
5711 #undef aputs
5713 /* Search the deferred plabel list for SYMBOL and return its internal
5714 label. If an entry for SYMBOL is not found, a new entry is created. */
5717 pa_get_deferred_plabel (rtx symbol)
5719 const char *fname = XSTR (symbol, 0);
5720 size_t i;
5722 /* See if we have already put this function on the list of deferred
5723 plabels. This list is generally small, so a liner search is not
5724 too ugly. If it proves too slow replace it with something faster. */
5725 for (i = 0; i < n_deferred_plabels; i++)
5726 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5727 break;
5729 /* If the deferred plabel list is empty, or this entry was not found
5730 on the list, create a new entry on the list. */
5731 if (deferred_plabels == NULL || i == n_deferred_plabels)
5733 tree id;
5735 if (deferred_plabels == 0)
5736 deferred_plabels = ggc_alloc<deferred_plabel> ();
5737 else
5738 deferred_plabels = GGC_RESIZEVEC (struct deferred_plabel,
5739 deferred_plabels,
5740 n_deferred_plabels + 1);
5742 i = n_deferred_plabels++;
5743 deferred_plabels[i].internal_label = gen_label_rtx ();
5744 deferred_plabels[i].symbol = symbol;
5746 /* Gross. We have just implicitly taken the address of this
5747 function. Mark it in the same manner as assemble_name. */
5748 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5749 if (id)
5750 mark_referenced (id);
5753 return deferred_plabels[i].internal_label;
5756 static void
5757 output_deferred_plabels (void)
5759 size_t i;
5761 /* If we have some deferred plabels, then we need to switch into the
5762 data or readonly data section, and align it to a 4 byte boundary
5763 before outputting the deferred plabels. */
5764 if (n_deferred_plabels)
5766 switch_to_section (flag_pic ? data_section : readonly_data_section);
5767 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5770 /* Now output the deferred plabels. */
5771 for (i = 0; i < n_deferred_plabels; i++)
5773 targetm.asm_out.internal_label (asm_out_file, "L",
5774 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5775 assemble_integer (deferred_plabels[i].symbol,
5776 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5780 /* Initialize optabs to point to emulation routines. */
5782 static void
5783 pa_init_libfuncs (void)
5785 if (HPUX_LONG_DOUBLE_LIBRARY)
5787 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5788 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5789 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5790 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5791 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5792 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5793 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5794 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5795 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5797 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5798 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5799 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5800 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5801 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5802 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5803 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5805 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5806 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5807 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5808 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5810 set_conv_libfunc (sfix_optab, SImode, TFmode,
5811 TARGET_64BIT ? "__U_Qfcnvfxt_quad_to_sgl"
5812 : "_U_Qfcnvfxt_quad_to_sgl");
5813 set_conv_libfunc (sfix_optab, DImode, TFmode,
5814 "_U_Qfcnvfxt_quad_to_dbl");
5815 set_conv_libfunc (ufix_optab, SImode, TFmode,
5816 "_U_Qfcnvfxt_quad_to_usgl");
5817 set_conv_libfunc (ufix_optab, DImode, TFmode,
5818 "_U_Qfcnvfxt_quad_to_udbl");
5820 set_conv_libfunc (sfloat_optab, TFmode, SImode,
5821 "_U_Qfcnvxf_sgl_to_quad");
5822 set_conv_libfunc (sfloat_optab, TFmode, DImode,
5823 "_U_Qfcnvxf_dbl_to_quad");
5824 set_conv_libfunc (ufloat_optab, TFmode, SImode,
5825 "_U_Qfcnvxf_usgl_to_quad");
5826 set_conv_libfunc (ufloat_optab, TFmode, DImode,
5827 "_U_Qfcnvxf_udbl_to_quad");
5830 if (TARGET_SYNC_LIBCALL)
5831 init_sync_libfuncs (8);
5834 /* HP's millicode routines mean something special to the assembler.
5835 Keep track of which ones we have used. */
5837 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5838 static void import_milli (enum millicodes);
5839 static char imported[(int) end1000];
5840 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5841 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5842 #define MILLI_START 10
5844 static void
5845 import_milli (enum millicodes code)
5847 char str[sizeof (import_string)];
5849 if (!imported[(int) code])
5851 imported[(int) code] = 1;
5852 strcpy (str, import_string);
5853 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5854 output_asm_insn (str, 0);
5858 /* The register constraints have put the operands and return value in
5859 the proper registers. */
5861 const char *
5862 pa_output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx_insn *insn)
5864 import_milli (mulI);
5865 return pa_output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5868 /* Emit the rtl for doing a division by a constant. */
5870 /* Do magic division millicodes exist for this value? */
5871 const int pa_magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5873 /* We'll use an array to keep track of the magic millicodes and
5874 whether or not we've used them already. [n][0] is signed, [n][1] is
5875 unsigned. */
5877 static int div_milli[16][2];
5880 pa_emit_hpdiv_const (rtx *operands, int unsignedp)
5882 if (GET_CODE (operands[2]) == CONST_INT
5883 && INTVAL (operands[2]) > 0
5884 && INTVAL (operands[2]) < 16
5885 && pa_magic_milli[INTVAL (operands[2])])
5887 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5889 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5890 emit
5891 (gen_rtx_PARALLEL
5892 (VOIDmode,
5893 gen_rtvec (6, gen_rtx_SET (gen_rtx_REG (SImode, 29),
5894 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5895 SImode,
5896 gen_rtx_REG (SImode, 26),
5897 operands[2])),
5898 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5899 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5900 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5901 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5902 gen_rtx_CLOBBER (VOIDmode, ret))));
5903 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5904 return 1;
5906 return 0;
5909 const char *
5910 pa_output_div_insn (rtx *operands, int unsignedp, rtx_insn *insn)
5912 int divisor;
5914 /* If the divisor is a constant, try to use one of the special
5915 opcodes .*/
5916 if (GET_CODE (operands[0]) == CONST_INT)
5918 static char buf[100];
5919 divisor = INTVAL (operands[0]);
5920 if (!div_milli[divisor][unsignedp])
5922 div_milli[divisor][unsignedp] = 1;
5923 if (unsignedp)
5924 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5925 else
5926 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5928 if (unsignedp)
5930 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5931 INTVAL (operands[0]));
5932 return pa_output_millicode_call (insn,
5933 gen_rtx_SYMBOL_REF (SImode, buf));
5935 else
5937 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5938 INTVAL (operands[0]));
5939 return pa_output_millicode_call (insn,
5940 gen_rtx_SYMBOL_REF (SImode, buf));
5943 /* Divisor isn't a special constant. */
5944 else
5946 if (unsignedp)
5948 import_milli (divU);
5949 return pa_output_millicode_call (insn,
5950 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5952 else
5954 import_milli (divI);
5955 return pa_output_millicode_call (insn,
5956 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5961 /* Output a $$rem millicode to do mod. */
5963 const char *
5964 pa_output_mod_insn (int unsignedp, rtx_insn *insn)
5966 if (unsignedp)
5968 import_milli (remU);
5969 return pa_output_millicode_call (insn,
5970 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5972 else
5974 import_milli (remI);
5975 return pa_output_millicode_call (insn,
5976 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5980 void
5981 pa_output_arg_descriptor (rtx_insn *call_insn)
5983 const char *arg_regs[4];
5984 machine_mode arg_mode;
5985 rtx link;
5986 int i, output_flag = 0;
5987 int regno;
5989 /* We neither need nor want argument location descriptors for the
5990 64bit runtime environment or the ELF32 environment. */
5991 if (TARGET_64BIT || TARGET_ELF32)
5992 return;
5994 for (i = 0; i < 4; i++)
5995 arg_regs[i] = 0;
5997 /* Specify explicitly that no argument relocations should take place
5998 if using the portable runtime calling conventions. */
5999 if (TARGET_PORTABLE_RUNTIME)
6001 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
6002 asm_out_file);
6003 return;
6006 gcc_assert (CALL_P (call_insn));
6007 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
6008 link; link = XEXP (link, 1))
6010 rtx use = XEXP (link, 0);
6012 if (! (GET_CODE (use) == USE
6013 && GET_CODE (XEXP (use, 0)) == REG
6014 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
6015 continue;
6017 arg_mode = GET_MODE (XEXP (use, 0));
6018 regno = REGNO (XEXP (use, 0));
6019 if (regno >= 23 && regno <= 26)
6021 arg_regs[26 - regno] = "GR";
6022 if (arg_mode == DImode)
6023 arg_regs[25 - regno] = "GR";
6025 else if (regno >= 32 && regno <= 39)
6027 if (arg_mode == SFmode)
6028 arg_regs[(regno - 32) / 2] = "FR";
6029 else
6031 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
6032 arg_regs[(regno - 34) / 2] = "FR";
6033 arg_regs[(regno - 34) / 2 + 1] = "FU";
6034 #else
6035 arg_regs[(regno - 34) / 2] = "FU";
6036 arg_regs[(regno - 34) / 2 + 1] = "FR";
6037 #endif
6041 fputs ("\t.CALL ", asm_out_file);
6042 for (i = 0; i < 4; i++)
6044 if (arg_regs[i])
6046 if (output_flag++)
6047 fputc (',', asm_out_file);
6048 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
6051 fputc ('\n', asm_out_file);
6054 /* Inform reload about cases where moving X with a mode MODE to or from
6055 a register in RCLASS requires an extra scratch or immediate register.
6056 Return the class needed for the immediate register. */
6058 static reg_class_t
6059 pa_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
6060 machine_mode mode, secondary_reload_info *sri)
6062 int regno;
6063 enum reg_class rclass = (enum reg_class) rclass_i;
6065 /* Handle the easy stuff first. */
6066 if (rclass == R1_REGS)
6067 return NO_REGS;
6069 if (REG_P (x))
6071 regno = REGNO (x);
6072 if (rclass == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
6073 return NO_REGS;
6075 else
6076 regno = -1;
6078 /* If we have something like (mem (mem (...)), we can safely assume the
6079 inner MEM will end up in a general register after reloading, so there's
6080 no need for a secondary reload. */
6081 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
6082 return NO_REGS;
6084 /* Trying to load a constant into a FP register during PIC code
6085 generation requires %r1 as a scratch register. For float modes,
6086 the only legitimate constant is CONST0_RTX. However, there are
6087 a few patterns that accept constant double operands. */
6088 if (flag_pic
6089 && FP_REG_CLASS_P (rclass)
6090 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
6092 switch (mode)
6094 case E_SImode:
6095 sri->icode = CODE_FOR_reload_insi_r1;
6096 break;
6098 case E_DImode:
6099 sri->icode = CODE_FOR_reload_indi_r1;
6100 break;
6102 case E_SFmode:
6103 sri->icode = CODE_FOR_reload_insf_r1;
6104 break;
6106 case E_DFmode:
6107 sri->icode = CODE_FOR_reload_indf_r1;
6108 break;
6110 default:
6111 gcc_unreachable ();
6113 return NO_REGS;
6116 /* Secondary reloads of symbolic expressions require %r1 as a scratch
6117 register when we're generating PIC code or when the operand isn't
6118 readonly. */
6119 if (pa_symbolic_expression_p (x))
6121 if (GET_CODE (x) == HIGH)
6122 x = XEXP (x, 0);
6124 if (flag_pic || !read_only_operand (x, VOIDmode))
6126 switch (mode)
6128 case E_SImode:
6129 sri->icode = CODE_FOR_reload_insi_r1;
6130 break;
6132 case E_DImode:
6133 sri->icode = CODE_FOR_reload_indi_r1;
6134 break;
6136 default:
6137 gcc_unreachable ();
6139 return NO_REGS;
6143 /* Profiling showed the PA port spends about 1.3% of its compilation
6144 time in true_regnum from calls inside pa_secondary_reload_class. */
6145 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
6146 regno = true_regnum (x);
6148 /* Handle reloads for floating point loads and stores. */
6149 if ((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
6150 && FP_REG_CLASS_P (rclass))
6152 if (MEM_P (x))
6154 x = XEXP (x, 0);
6156 /* We don't need a secondary reload for indexed memory addresses.
6158 When INT14_OK_STRICT is true, it might appear that we could
6159 directly allow register indirect memory addresses. However,
6160 this doesn't work because we don't support SUBREGs in
6161 floating-point register copies and reload doesn't tell us
6162 when it's going to use a SUBREG. */
6163 if (IS_INDEX_ADDR_P (x))
6164 return NO_REGS;
6167 /* Request a secondary reload with a general scratch register
6168 for everything else. ??? Could symbolic operands be handled
6169 directly when generating non-pic PA 2.0 code? */
6170 sri->icode = (in_p
6171 ? direct_optab_handler (reload_in_optab, mode)
6172 : direct_optab_handler (reload_out_optab, mode));
6173 return NO_REGS;
6176 /* A SAR<->FP register copy requires an intermediate general register
6177 and secondary memory. We need a secondary reload with a general
6178 scratch register for spills. */
6179 if (rclass == SHIFT_REGS)
6181 /* Handle spill. */
6182 if (regno >= FIRST_PSEUDO_REGISTER || regno < 0)
6184 sri->icode = (in_p
6185 ? direct_optab_handler (reload_in_optab, mode)
6186 : direct_optab_handler (reload_out_optab, mode));
6187 return NO_REGS;
6190 /* Handle FP copy. */
6191 if (FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))
6192 return GENERAL_REGS;
6195 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
6196 && REGNO_REG_CLASS (regno) == SHIFT_REGS
6197 && FP_REG_CLASS_P (rclass))
6198 return GENERAL_REGS;
6200 return NO_REGS;
6203 /* Implement TARGET_SECONDARY_MEMORY_NEEDED. */
6205 static bool
6206 pa_secondary_memory_needed (machine_mode mode ATTRIBUTE_UNUSED,
6207 reg_class_t class1 ATTRIBUTE_UNUSED,
6208 reg_class_t class2 ATTRIBUTE_UNUSED)
6210 #ifdef PA_SECONDARY_MEMORY_NEEDED
6211 return PA_SECONDARY_MEMORY_NEEDED (mode, class1, class2);
6212 #else
6213 return false;
6214 #endif
6217 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
6218 is only marked as live on entry by df-scan when it is a fixed
6219 register. It isn't a fixed register in the 64-bit runtime,
6220 so we need to mark it here. */
6222 static void
6223 pa_extra_live_on_entry (bitmap regs)
6225 if (TARGET_64BIT)
6226 bitmap_set_bit (regs, ARG_POINTER_REGNUM);
6229 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
6230 to prevent it from being deleted. */
6233 pa_eh_return_handler_rtx (void)
6235 rtx tmp;
6237 tmp = gen_rtx_PLUS (word_mode, hard_frame_pointer_rtx,
6238 TARGET_64BIT ? GEN_INT (-16) : GEN_INT (-20));
6239 tmp = gen_rtx_MEM (word_mode, tmp);
6240 tmp->volatil = 1;
6241 return tmp;
6244 /* In the 32-bit runtime, arguments larger than eight bytes are passed
6245 by invisible reference. As a GCC extension, we also pass anything
6246 with a zero or variable size by reference.
6248 The 64-bit runtime does not describe passing any types by invisible
6249 reference. The internals of GCC can't currently handle passing
6250 empty structures, and zero or variable length arrays when they are
6251 not passed entirely on the stack or by reference. Thus, as a GCC
6252 extension, we pass these types by reference. The HP compiler doesn't
6253 support these types, so hopefully there shouldn't be any compatibility
6254 issues. This may have to be revisited when HP releases a C99 compiler
6255 or updates the ABI. */
6257 static bool
6258 pa_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
6259 machine_mode mode, const_tree type,
6260 bool named ATTRIBUTE_UNUSED)
6262 HOST_WIDE_INT size;
6264 if (type)
6265 size = int_size_in_bytes (type);
6266 else
6267 size = GET_MODE_SIZE (mode);
6269 if (TARGET_64BIT)
6270 return size <= 0;
6271 else
6272 return size <= 0 || size > 8;
6275 /* Implement TARGET_FUNCTION_ARG_PADDING. */
6277 static pad_direction
6278 pa_function_arg_padding (machine_mode mode, const_tree type)
6280 if (mode == BLKmode
6281 || (TARGET_64BIT
6282 && type
6283 && (AGGREGATE_TYPE_P (type)
6284 || TREE_CODE (type) == COMPLEX_TYPE
6285 || TREE_CODE (type) == VECTOR_TYPE)))
6287 /* Return PAD_NONE if justification is not required. */
6288 if (type
6289 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
6290 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
6291 return PAD_NONE;
6293 /* The directions set here are ignored when a BLKmode argument larger
6294 than a word is placed in a register. Different code is used for
6295 the stack and registers. This makes it difficult to have a
6296 consistent data representation for both the stack and registers.
6297 For both runtimes, the justification and padding for arguments on
6298 the stack and in registers should be identical. */
6299 if (TARGET_64BIT)
6300 /* The 64-bit runtime specifies left justification for aggregates. */
6301 return PAD_UPWARD;
6302 else
6303 /* The 32-bit runtime architecture specifies right justification.
6304 When the argument is passed on the stack, the argument is padded
6305 with garbage on the left. The HP compiler pads with zeros. */
6306 return PAD_DOWNWARD;
6309 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
6310 return PAD_DOWNWARD;
6311 else
6312 return PAD_NONE;
6316 /* Do what is necessary for `va_start'. We look at the current function
6317 to determine if stdargs or varargs is used and fill in an initial
6318 va_list. A pointer to this constructor is returned. */
6320 static rtx
6321 hppa_builtin_saveregs (void)
6323 rtx offset, dest;
6324 tree fntype = TREE_TYPE (current_function_decl);
6325 int argadj = ((!stdarg_p (fntype))
6326 ? UNITS_PER_WORD : 0);
6328 if (argadj)
6329 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
6330 else
6331 offset = crtl->args.arg_offset_rtx;
6333 if (TARGET_64BIT)
6335 int i, off;
6337 /* Adjust for varargs/stdarg differences. */
6338 if (argadj)
6339 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, -argadj);
6340 else
6341 offset = crtl->args.arg_offset_rtx;
6343 /* We need to save %r26 .. %r19 inclusive starting at offset -64
6344 from the incoming arg pointer and growing to larger addresses. */
6345 for (i = 26, off = -64; i >= 19; i--, off += 8)
6346 emit_move_insn (gen_rtx_MEM (word_mode,
6347 plus_constant (Pmode,
6348 arg_pointer_rtx, off)),
6349 gen_rtx_REG (word_mode, i));
6351 /* The incoming args pointer points just beyond the flushback area;
6352 normally this is not a serious concern. However, when we are doing
6353 varargs/stdargs we want to make the arg pointer point to the start
6354 of the incoming argument area. */
6355 emit_move_insn (virtual_incoming_args_rtx,
6356 plus_constant (Pmode, arg_pointer_rtx, -64));
6358 /* Now return a pointer to the first anonymous argument. */
6359 return copy_to_reg (expand_binop (Pmode, add_optab,
6360 virtual_incoming_args_rtx,
6361 offset, 0, 0, OPTAB_LIB_WIDEN));
6364 /* Store general registers on the stack. */
6365 dest = gen_rtx_MEM (BLKmode,
6366 plus_constant (Pmode, crtl->args.internal_arg_pointer,
6367 -16));
6368 set_mem_alias_set (dest, get_varargs_alias_set ());
6369 set_mem_align (dest, BITS_PER_WORD);
6370 move_block_from_reg (23, dest, 4);
6372 /* move_block_from_reg will emit code to store the argument registers
6373 individually as scalar stores.
6375 However, other insns may later load from the same addresses for
6376 a structure load (passing a struct to a varargs routine).
6378 The alias code assumes that such aliasing can never happen, so we
6379 have to keep memory referencing insns from moving up beyond the
6380 last argument register store. So we emit a blockage insn here. */
6381 emit_insn (gen_blockage ());
6383 return copy_to_reg (expand_binop (Pmode, add_optab,
6384 crtl->args.internal_arg_pointer,
6385 offset, 0, 0, OPTAB_LIB_WIDEN));
6388 static void
6389 hppa_va_start (tree valist, rtx nextarg)
6391 nextarg = expand_builtin_saveregs ();
6392 std_expand_builtin_va_start (valist, nextarg);
6395 static tree
6396 hppa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
6397 gimple_seq *post_p)
6399 if (TARGET_64BIT)
6401 /* Args grow upward. We can use the generic routines. */
6402 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6404 else /* !TARGET_64BIT */
6406 tree ptr = build_pointer_type (type);
6407 tree valist_type;
6408 tree t, u;
6409 unsigned int size, ofs;
6410 bool indirect;
6412 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6413 if (indirect)
6415 type = ptr;
6416 ptr = build_pointer_type (type);
6418 size = int_size_in_bytes (type);
6419 valist_type = TREE_TYPE (valist);
6421 /* Args grow down. Not handled by generic routines. */
6423 u = fold_convert (sizetype, size_in_bytes (type));
6424 u = fold_build1 (NEGATE_EXPR, sizetype, u);
6425 t = fold_build_pointer_plus (valist, u);
6427 /* Align to 4 or 8 byte boundary depending on argument size. */
6429 u = build_int_cst (TREE_TYPE (t), (HOST_WIDE_INT)(size > 4 ? -8 : -4));
6430 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, u);
6431 t = fold_convert (valist_type, t);
6433 t = build2 (MODIFY_EXPR, valist_type, valist, t);
6435 ofs = (8 - size) % 4;
6436 if (ofs != 0)
6437 t = fold_build_pointer_plus_hwi (t, ofs);
6439 t = fold_convert (ptr, t);
6440 t = build_va_arg_indirect_ref (t);
6442 if (indirect)
6443 t = build_va_arg_indirect_ref (t);
6445 return t;
6449 /* True if MODE is valid for the target. By "valid", we mean able to
6450 be manipulated in non-trivial ways. In particular, this means all
6451 the arithmetic is supported.
6453 Currently, TImode is not valid as the HP 64-bit runtime documentation
6454 doesn't document the alignment and calling conventions for this type.
6455 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6456 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6458 static bool
6459 pa_scalar_mode_supported_p (scalar_mode mode)
6461 int precision = GET_MODE_PRECISION (mode);
6463 switch (GET_MODE_CLASS (mode))
6465 case MODE_PARTIAL_INT:
6466 case MODE_INT:
6467 if (precision == CHAR_TYPE_SIZE)
6468 return true;
6469 if (precision == SHORT_TYPE_SIZE)
6470 return true;
6471 if (precision == INT_TYPE_SIZE)
6472 return true;
6473 if (precision == LONG_TYPE_SIZE)
6474 return true;
6475 if (precision == LONG_LONG_TYPE_SIZE)
6476 return true;
6477 return false;
6479 case MODE_FLOAT:
6480 if (precision == FLOAT_TYPE_SIZE)
6481 return true;
6482 if (precision == DOUBLE_TYPE_SIZE)
6483 return true;
6484 if (precision == LONG_DOUBLE_TYPE_SIZE)
6485 return true;
6486 return false;
6488 case MODE_DECIMAL_FLOAT:
6489 return false;
6491 default:
6492 gcc_unreachable ();
6496 /* Return TRUE if INSN, a jump insn, has an unfilled delay slot and
6497 it branches into the delay slot. Otherwise, return FALSE. */
6499 static bool
6500 branch_to_delay_slot_p (rtx_insn *insn)
6502 rtx_insn *jump_insn;
6504 if (dbr_sequence_length ())
6505 return FALSE;
6507 jump_insn = next_active_insn (JUMP_LABEL_AS_INSN (insn));
6508 while (insn)
6510 insn = next_active_insn (insn);
6511 if (jump_insn == insn)
6512 return TRUE;
6514 /* We can't rely on the length of asms. So, we return FALSE when
6515 the branch is followed by an asm. */
6516 if (!insn
6517 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6518 || asm_noperands (PATTERN (insn)) >= 0
6519 || get_attr_length (insn) > 0)
6520 break;
6523 return FALSE;
6526 /* Return TRUE if INSN, a forward jump insn, needs a nop in its delay slot.
6528 This occurs when INSN has an unfilled delay slot and is followed
6529 by an asm. Disaster can occur if the asm is empty and the jump
6530 branches into the delay slot. So, we add a nop in the delay slot
6531 when this occurs. */
6533 static bool
6534 branch_needs_nop_p (rtx_insn *insn)
6536 rtx_insn *jump_insn;
6538 if (dbr_sequence_length ())
6539 return FALSE;
6541 jump_insn = next_active_insn (JUMP_LABEL_AS_INSN (insn));
6542 while (insn)
6544 insn = next_active_insn (insn);
6545 if (!insn || jump_insn == insn)
6546 return TRUE;
6548 if (!(GET_CODE (PATTERN (insn)) == ASM_INPUT
6549 || asm_noperands (PATTERN (insn)) >= 0)
6550 && get_attr_length (insn) > 0)
6551 break;
6554 return FALSE;
6557 /* Return TRUE if INSN, a forward jump insn, can use nullification
6558 to skip the following instruction. This avoids an extra cycle due
6559 to a mis-predicted branch when we fall through. */
6561 static bool
6562 use_skip_p (rtx_insn *insn)
6564 rtx_insn *jump_insn = next_active_insn (JUMP_LABEL_AS_INSN (insn));
6566 while (insn)
6568 insn = next_active_insn (insn);
6570 /* We can't rely on the length of asms, so we can't skip asms. */
6571 if (!insn
6572 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6573 || asm_noperands (PATTERN (insn)) >= 0)
6574 break;
6575 if (get_attr_length (insn) == 4
6576 && jump_insn == next_active_insn (insn))
6577 return TRUE;
6578 if (get_attr_length (insn) > 0)
6579 break;
6582 return FALSE;
6585 /* This routine handles all the normal conditional branch sequences we
6586 might need to generate. It handles compare immediate vs compare
6587 register, nullification of delay slots, varying length branches,
6588 negated branches, and all combinations of the above. It returns the
6589 output appropriate to emit the branch corresponding to all given
6590 parameters. */
6592 const char *
6593 pa_output_cbranch (rtx *operands, int negated, rtx_insn *insn)
6595 static char buf[100];
6596 bool useskip;
6597 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6598 int length = get_attr_length (insn);
6599 int xdelay;
6601 /* A conditional branch to the following instruction (e.g. the delay slot)
6602 is asking for a disaster. This can happen when not optimizing and
6603 when jump optimization fails.
6605 While it is usually safe to emit nothing, this can fail if the
6606 preceding instruction is a nullified branch with an empty delay
6607 slot and the same branch target as this branch. We could check
6608 for this but jump optimization should eliminate nop jumps. It
6609 is always safe to emit a nop. */
6610 if (branch_to_delay_slot_p (insn))
6611 return "nop";
6613 /* The doubleword form of the cmpib instruction doesn't have the LEU
6614 and GTU conditions while the cmpb instruction does. Since we accept
6615 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6616 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6617 operands[2] = gen_rtx_REG (DImode, 0);
6618 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
6619 operands[1] = gen_rtx_REG (DImode, 0);
6621 /* If this is a long branch with its delay slot unfilled, set `nullify'
6622 as it can nullify the delay slot and save a nop. */
6623 if (length == 8 && dbr_sequence_length () == 0)
6624 nullify = 1;
6626 /* If this is a short forward conditional branch which did not get
6627 its delay slot filled, the delay slot can still be nullified. */
6628 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6629 nullify = forward_branch_p (insn);
6631 /* A forward branch over a single nullified insn can be done with a
6632 comclr instruction. This avoids a single cycle penalty due to
6633 mis-predicted branch if we fall through (branch not taken). */
6634 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6636 switch (length)
6638 /* All short conditional branches except backwards with an unfilled
6639 delay slot. */
6640 case 4:
6641 if (useskip)
6642 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6643 else
6644 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6645 if (GET_MODE (operands[1]) == DImode)
6646 strcat (buf, "*");
6647 if (negated)
6648 strcat (buf, "%B3");
6649 else
6650 strcat (buf, "%S3");
6651 if (useskip)
6652 strcat (buf, " %2,%r1,%%r0");
6653 else if (nullify)
6655 if (branch_needs_nop_p (insn))
6656 strcat (buf, ",n %2,%r1,%0%#");
6657 else
6658 strcat (buf, ",n %2,%r1,%0");
6660 else
6661 strcat (buf, " %2,%r1,%0");
6662 break;
6664 /* All long conditionals. Note a short backward branch with an
6665 unfilled delay slot is treated just like a long backward branch
6666 with an unfilled delay slot. */
6667 case 8:
6668 /* Handle weird backwards branch with a filled delay slot
6669 which is nullified. */
6670 if (dbr_sequence_length () != 0
6671 && ! forward_branch_p (insn)
6672 && nullify)
6674 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6675 if (GET_MODE (operands[1]) == DImode)
6676 strcat (buf, "*");
6677 if (negated)
6678 strcat (buf, "%S3");
6679 else
6680 strcat (buf, "%B3");
6681 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6683 /* Handle short backwards branch with an unfilled delay slot.
6684 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6685 taken and untaken branches. */
6686 else if (dbr_sequence_length () == 0
6687 && ! forward_branch_p (insn)
6688 && INSN_ADDRESSES_SET_P ()
6689 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6690 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6692 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6693 if (GET_MODE (operands[1]) == DImode)
6694 strcat (buf, "*");
6695 if (negated)
6696 strcat (buf, "%B3 %2,%r1,%0%#");
6697 else
6698 strcat (buf, "%S3 %2,%r1,%0%#");
6700 else
6702 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6703 if (GET_MODE (operands[1]) == DImode)
6704 strcat (buf, "*");
6705 if (negated)
6706 strcat (buf, "%S3");
6707 else
6708 strcat (buf, "%B3");
6709 if (nullify)
6710 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6711 else
6712 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6714 break;
6716 default:
6717 /* The reversed conditional branch must branch over one additional
6718 instruction if the delay slot is filled and needs to be extracted
6719 by pa_output_lbranch. If the delay slot is empty or this is a
6720 nullified forward branch, the instruction after the reversed
6721 condition branch must be nullified. */
6722 if (dbr_sequence_length () == 0
6723 || (nullify && forward_branch_p (insn)))
6725 nullify = 1;
6726 xdelay = 0;
6727 operands[4] = GEN_INT (length);
6729 else
6731 xdelay = 1;
6732 operands[4] = GEN_INT (length + 4);
6735 /* Create a reversed conditional branch which branches around
6736 the following insns. */
6737 if (GET_MODE (operands[1]) != DImode)
6739 if (nullify)
6741 if (negated)
6742 strcpy (buf,
6743 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6744 else
6745 strcpy (buf,
6746 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6748 else
6750 if (negated)
6751 strcpy (buf,
6752 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6753 else
6754 strcpy (buf,
6755 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6758 else
6760 if (nullify)
6762 if (negated)
6763 strcpy (buf,
6764 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6765 else
6766 strcpy (buf,
6767 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6769 else
6771 if (negated)
6772 strcpy (buf,
6773 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6774 else
6775 strcpy (buf,
6776 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6780 output_asm_insn (buf, operands);
6781 return pa_output_lbranch (operands[0], insn, xdelay);
6783 return buf;
6786 /* Output a PIC pc-relative instruction sequence to load the address of
6787 OPERANDS[0] to register OPERANDS[2]. OPERANDS[0] is a symbol ref
6788 or a code label. OPERANDS[1] specifies the register to use to load
6789 the program counter. OPERANDS[3] may be used for label generation
6790 The sequence is always three instructions in length. The program
6791 counter recorded for PA 1.X is eight bytes more than that for PA 2.0.
6792 Register %r1 is clobbered. */
6794 static void
6795 pa_output_pic_pcrel_sequence (rtx *operands)
6797 gcc_assert (SYMBOL_REF_P (operands[0]) || LABEL_P (operands[0]));
6798 if (TARGET_PA_20)
6800 /* We can use mfia to determine the current program counter. */
6801 if (TARGET_SOM || !TARGET_GAS)
6803 operands[3] = gen_label_rtx ();
6804 targetm.asm_out.internal_label (asm_out_file, "L",
6805 CODE_LABEL_NUMBER (operands[3]));
6806 output_asm_insn ("mfia %1", operands);
6807 output_asm_insn ("addil L'%0-%l3,%1", operands);
6808 output_asm_insn ("ldo R'%0-%l3(%%r1),%2", operands);
6810 else
6812 output_asm_insn ("mfia %1", operands);
6813 output_asm_insn ("addil L'%0-$PIC_pcrel$0+12,%1", operands);
6814 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+16(%%r1),%2", operands);
6817 else
6819 /* We need to use a branch to determine the current program counter. */
6820 output_asm_insn ("{bl|b,l} .+8,%1", operands);
6821 if (TARGET_SOM || !TARGET_GAS)
6823 operands[3] = gen_label_rtx ();
6824 output_asm_insn ("addil L'%0-%l3,%1", operands);
6825 targetm.asm_out.internal_label (asm_out_file, "L",
6826 CODE_LABEL_NUMBER (operands[3]));
6827 output_asm_insn ("ldo R'%0-%l3(%%r1),%2", operands);
6829 else
6831 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%1", operands);
6832 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%2", operands);
6837 /* This routine handles output of long unconditional branches that
6838 exceed the maximum range of a simple branch instruction. Since
6839 we don't have a register available for the branch, we save register
6840 %r1 in the frame marker, load the branch destination DEST into %r1,
6841 execute the branch, and restore %r1 in the delay slot of the branch.
6843 Since long branches may have an insn in the delay slot and the
6844 delay slot is used to restore %r1, we in general need to extract
6845 this insn and execute it before the branch. However, to facilitate
6846 use of this function by conditional branches, we also provide an
6847 option to not extract the delay insn so that it will be emitted
6848 after the long branch. So, if there is an insn in the delay slot,
6849 it is extracted if XDELAY is nonzero.
6851 The lengths of the various long-branch sequences are 20, 16 and 24
6852 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6854 const char *
6855 pa_output_lbranch (rtx dest, rtx_insn *insn, int xdelay)
6857 rtx xoperands[4];
6859 xoperands[0] = dest;
6861 /* First, free up the delay slot. */
6862 if (xdelay && dbr_sequence_length () != 0)
6864 /* We can't handle a jump in the delay slot. */
6865 gcc_assert (! JUMP_P (NEXT_INSN (insn)));
6867 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6868 optimize, 0, NULL);
6870 /* Now delete the delay insn. */
6871 SET_INSN_DELETED (NEXT_INSN (insn));
6874 /* Output an insn to save %r1. The runtime documentation doesn't
6875 specify whether the "Clean Up" slot in the callers frame can
6876 be clobbered by the callee. It isn't copied by HP's builtin
6877 alloca, so this suggests that it can be clobbered if necessary.
6878 The "Static Link" location is copied by HP builtin alloca, so
6879 we avoid using it. Using the cleanup slot might be a problem
6880 if we have to interoperate with languages that pass cleanup
6881 information. However, it should be possible to handle these
6882 situations with GCC's asm feature.
6884 The "Current RP" slot is reserved for the called procedure, so
6885 we try to use it when we don't have a frame of our own. It's
6886 rather unlikely that we won't have a frame when we need to emit
6887 a very long branch.
6889 Really the way to go long term is a register scavenger; goto
6890 the target of the jump and find a register which we can use
6891 as a scratch to hold the value in %r1. Then, we wouldn't have
6892 to free up the delay slot or clobber a slot that may be needed
6893 for other purposes. */
6894 if (TARGET_64BIT)
6896 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6897 /* Use the return pointer slot in the frame marker. */
6898 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6899 else
6900 /* Use the slot at -40 in the frame marker since HP builtin
6901 alloca doesn't copy it. */
6902 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6904 else
6906 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6907 /* Use the return pointer slot in the frame marker. */
6908 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6909 else
6910 /* Use the "Clean Up" slot in the frame marker. In GCC,
6911 the only other use of this location is for copying a
6912 floating point double argument from a floating-point
6913 register to two general registers. The copy is done
6914 as an "atomic" operation when outputting a call, so it
6915 won't interfere with our using the location here. */
6916 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6919 if (TARGET_PORTABLE_RUNTIME)
6921 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6922 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6923 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6925 else if (flag_pic)
6927 xoperands[1] = gen_rtx_REG (Pmode, 1);
6928 xoperands[2] = xoperands[1];
6929 pa_output_pic_pcrel_sequence (xoperands);
6930 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6932 else
6933 /* Now output a very long branch to the original target. */
6934 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6936 /* Now restore the value of %r1 in the delay slot. */
6937 if (TARGET_64BIT)
6939 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6940 return "ldd -16(%%r30),%%r1";
6941 else
6942 return "ldd -40(%%r30),%%r1";
6944 else
6946 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6947 return "ldw -20(%%r30),%%r1";
6948 else
6949 return "ldw -12(%%r30),%%r1";
6953 /* This routine handles all the branch-on-bit conditional branch sequences we
6954 might need to generate. It handles nullification of delay slots,
6955 varying length branches, negated branches and all combinations of the
6956 above. it returns the appropriate output template to emit the branch. */
6958 const char *
6959 pa_output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn, int which)
6961 static char buf[100];
6962 bool useskip;
6963 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6964 int length = get_attr_length (insn);
6965 int xdelay;
6967 /* A conditional branch to the following instruction (e.g. the delay slot) is
6968 asking for a disaster. I do not think this can happen as this pattern
6969 is only used when optimizing; jump optimization should eliminate the
6970 jump. But be prepared just in case. */
6972 if (branch_to_delay_slot_p (insn))
6973 return "nop";
6975 /* If this is a long branch with its delay slot unfilled, set `nullify'
6976 as it can nullify the delay slot and save a nop. */
6977 if (length == 8 && dbr_sequence_length () == 0)
6978 nullify = 1;
6980 /* If this is a short forward conditional branch which did not get
6981 its delay slot filled, the delay slot can still be nullified. */
6982 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6983 nullify = forward_branch_p (insn);
6985 /* A forward branch over a single nullified insn can be done with a
6986 extrs instruction. This avoids a single cycle penalty due to
6987 mis-predicted branch if we fall through (branch not taken). */
6988 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6990 switch (length)
6993 /* All short conditional branches except backwards with an unfilled
6994 delay slot. */
6995 case 4:
6996 if (useskip)
6997 strcpy (buf, "{extrs,|extrw,s,}");
6998 else
6999 strcpy (buf, "bb,");
7000 if (useskip && GET_MODE (operands[0]) == DImode)
7001 strcpy (buf, "extrd,s,*");
7002 else if (GET_MODE (operands[0]) == DImode)
7003 strcpy (buf, "bb,*");
7004 if ((which == 0 && negated)
7005 || (which == 1 && ! negated))
7006 strcat (buf, ">=");
7007 else
7008 strcat (buf, "<");
7009 if (useskip)
7010 strcat (buf, " %0,%1,1,%%r0");
7011 else if (nullify && negated)
7013 if (branch_needs_nop_p (insn))
7014 strcat (buf, ",n %0,%1,%3%#");
7015 else
7016 strcat (buf, ",n %0,%1,%3");
7018 else if (nullify && ! negated)
7020 if (branch_needs_nop_p (insn))
7021 strcat (buf, ",n %0,%1,%2%#");
7022 else
7023 strcat (buf, ",n %0,%1,%2");
7025 else if (! nullify && negated)
7026 strcat (buf, " %0,%1,%3");
7027 else if (! nullify && ! negated)
7028 strcat (buf, " %0,%1,%2");
7029 break;
7031 /* All long conditionals. Note a short backward branch with an
7032 unfilled delay slot is treated just like a long backward branch
7033 with an unfilled delay slot. */
7034 case 8:
7035 /* Handle weird backwards branch with a filled delay slot
7036 which is nullified. */
7037 if (dbr_sequence_length () != 0
7038 && ! forward_branch_p (insn)
7039 && nullify)
7041 strcpy (buf, "bb,");
7042 if (GET_MODE (operands[0]) == DImode)
7043 strcat (buf, "*");
7044 if ((which == 0 && negated)
7045 || (which == 1 && ! negated))
7046 strcat (buf, "<");
7047 else
7048 strcat (buf, ">=");
7049 if (negated)
7050 strcat (buf, ",n %0,%1,.+12\n\tb %3");
7051 else
7052 strcat (buf, ",n %0,%1,.+12\n\tb %2");
7054 /* Handle short backwards branch with an unfilled delay slot.
7055 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7056 taken and untaken branches. */
7057 else if (dbr_sequence_length () == 0
7058 && ! forward_branch_p (insn)
7059 && INSN_ADDRESSES_SET_P ()
7060 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7061 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7063 strcpy (buf, "bb,");
7064 if (GET_MODE (operands[0]) == DImode)
7065 strcat (buf, "*");
7066 if ((which == 0 && negated)
7067 || (which == 1 && ! negated))
7068 strcat (buf, ">=");
7069 else
7070 strcat (buf, "<");
7071 if (negated)
7072 strcat (buf, " %0,%1,%3%#");
7073 else
7074 strcat (buf, " %0,%1,%2%#");
7076 else
7078 if (GET_MODE (operands[0]) == DImode)
7079 strcpy (buf, "extrd,s,*");
7080 else
7081 strcpy (buf, "{extrs,|extrw,s,}");
7082 if ((which == 0 && negated)
7083 || (which == 1 && ! negated))
7084 strcat (buf, "<");
7085 else
7086 strcat (buf, ">=");
7087 if (nullify && negated)
7088 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
7089 else if (nullify && ! negated)
7090 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
7091 else if (negated)
7092 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
7093 else
7094 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
7096 break;
7098 default:
7099 /* The reversed conditional branch must branch over one additional
7100 instruction if the delay slot is filled and needs to be extracted
7101 by pa_output_lbranch. If the delay slot is empty or this is a
7102 nullified forward branch, the instruction after the reversed
7103 condition branch must be nullified. */
7104 if (dbr_sequence_length () == 0
7105 || (nullify && forward_branch_p (insn)))
7107 nullify = 1;
7108 xdelay = 0;
7109 operands[4] = GEN_INT (length);
7111 else
7113 xdelay = 1;
7114 operands[4] = GEN_INT (length + 4);
7117 if (GET_MODE (operands[0]) == DImode)
7118 strcpy (buf, "bb,*");
7119 else
7120 strcpy (buf, "bb,");
7121 if ((which == 0 && negated)
7122 || (which == 1 && !negated))
7123 strcat (buf, "<");
7124 else
7125 strcat (buf, ">=");
7126 if (nullify)
7127 strcat (buf, ",n %0,%1,.+%4");
7128 else
7129 strcat (buf, " %0,%1,.+%4");
7130 output_asm_insn (buf, operands);
7131 return pa_output_lbranch (negated ? operands[3] : operands[2],
7132 insn, xdelay);
7134 return buf;
7137 /* This routine handles all the branch-on-variable-bit conditional branch
7138 sequences we might need to generate. It handles nullification of delay
7139 slots, varying length branches, negated branches and all combinations
7140 of the above. it returns the appropriate output template to emit the
7141 branch. */
7143 const char *
7144 pa_output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn,
7145 int which)
7147 static char buf[100];
7148 bool useskip;
7149 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7150 int length = get_attr_length (insn);
7151 int xdelay;
7153 /* A conditional branch to the following instruction (e.g. the delay slot) is
7154 asking for a disaster. I do not think this can happen as this pattern
7155 is only used when optimizing; jump optimization should eliminate the
7156 jump. But be prepared just in case. */
7158 if (branch_to_delay_slot_p (insn))
7159 return "nop";
7161 /* If this is a long branch with its delay slot unfilled, set `nullify'
7162 as it can nullify the delay slot and save a nop. */
7163 if (length == 8 && dbr_sequence_length () == 0)
7164 nullify = 1;
7166 /* If this is a short forward conditional branch which did not get
7167 its delay slot filled, the delay slot can still be nullified. */
7168 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7169 nullify = forward_branch_p (insn);
7171 /* A forward branch over a single nullified insn can be done with a
7172 extrs instruction. This avoids a single cycle penalty due to
7173 mis-predicted branch if we fall through (branch not taken). */
7174 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
7176 switch (length)
7179 /* All short conditional branches except backwards with an unfilled
7180 delay slot. */
7181 case 4:
7182 if (useskip)
7183 strcpy (buf, "{vextrs,|extrw,s,}");
7184 else
7185 strcpy (buf, "{bvb,|bb,}");
7186 if (useskip && GET_MODE (operands[0]) == DImode)
7187 strcpy (buf, "extrd,s,*");
7188 else if (GET_MODE (operands[0]) == DImode)
7189 strcpy (buf, "bb,*");
7190 if ((which == 0 && negated)
7191 || (which == 1 && ! negated))
7192 strcat (buf, ">=");
7193 else
7194 strcat (buf, "<");
7195 if (useskip)
7196 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
7197 else if (nullify && negated)
7199 if (branch_needs_nop_p (insn))
7200 strcat (buf, "{,n %0,%3%#|,n %0,%%sar,%3%#}");
7201 else
7202 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
7204 else if (nullify && ! negated)
7206 if (branch_needs_nop_p (insn))
7207 strcat (buf, "{,n %0,%2%#|,n %0,%%sar,%2%#}");
7208 else
7209 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
7211 else if (! nullify && negated)
7212 strcat (buf, "{ %0,%3| %0,%%sar,%3}");
7213 else if (! nullify && ! negated)
7214 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
7215 break;
7217 /* All long conditionals. Note a short backward branch with an
7218 unfilled delay slot is treated just like a long backward branch
7219 with an unfilled delay slot. */
7220 case 8:
7221 /* Handle weird backwards branch with a filled delay slot
7222 which is nullified. */
7223 if (dbr_sequence_length () != 0
7224 && ! forward_branch_p (insn)
7225 && nullify)
7227 strcpy (buf, "{bvb,|bb,}");
7228 if (GET_MODE (operands[0]) == DImode)
7229 strcat (buf, "*");
7230 if ((which == 0 && negated)
7231 || (which == 1 && ! negated))
7232 strcat (buf, "<");
7233 else
7234 strcat (buf, ">=");
7235 if (negated)
7236 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
7237 else
7238 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
7240 /* Handle short backwards branch with an unfilled delay slot.
7241 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7242 taken and untaken branches. */
7243 else if (dbr_sequence_length () == 0
7244 && ! forward_branch_p (insn)
7245 && INSN_ADDRESSES_SET_P ()
7246 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7247 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7249 strcpy (buf, "{bvb,|bb,}");
7250 if (GET_MODE (operands[0]) == DImode)
7251 strcat (buf, "*");
7252 if ((which == 0 && negated)
7253 || (which == 1 && ! negated))
7254 strcat (buf, ">=");
7255 else
7256 strcat (buf, "<");
7257 if (negated)
7258 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
7259 else
7260 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
7262 else
7264 strcpy (buf, "{vextrs,|extrw,s,}");
7265 if (GET_MODE (operands[0]) == DImode)
7266 strcpy (buf, "extrd,s,*");
7267 if ((which == 0 && negated)
7268 || (which == 1 && ! negated))
7269 strcat (buf, "<");
7270 else
7271 strcat (buf, ">=");
7272 if (nullify && negated)
7273 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
7274 else if (nullify && ! negated)
7275 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
7276 else if (negated)
7277 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
7278 else
7279 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
7281 break;
7283 default:
7284 /* The reversed conditional branch must branch over one additional
7285 instruction if the delay slot is filled and needs to be extracted
7286 by pa_output_lbranch. If the delay slot is empty or this is a
7287 nullified forward branch, the instruction after the reversed
7288 condition branch must be nullified. */
7289 if (dbr_sequence_length () == 0
7290 || (nullify && forward_branch_p (insn)))
7292 nullify = 1;
7293 xdelay = 0;
7294 operands[4] = GEN_INT (length);
7296 else
7298 xdelay = 1;
7299 operands[4] = GEN_INT (length + 4);
7302 if (GET_MODE (operands[0]) == DImode)
7303 strcpy (buf, "bb,*");
7304 else
7305 strcpy (buf, "{bvb,|bb,}");
7306 if ((which == 0 && negated)
7307 || (which == 1 && !negated))
7308 strcat (buf, "<");
7309 else
7310 strcat (buf, ">=");
7311 if (nullify)
7312 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
7313 else
7314 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
7315 output_asm_insn (buf, operands);
7316 return pa_output_lbranch (negated ? operands[3] : operands[2],
7317 insn, xdelay);
7319 return buf;
7322 /* Return the output template for emitting a dbra type insn.
7324 Note it may perform some output operations on its own before
7325 returning the final output string. */
7326 const char *
7327 pa_output_dbra (rtx *operands, rtx_insn *insn, int which_alternative)
7329 int length = get_attr_length (insn);
7331 /* A conditional branch to the following instruction (e.g. the delay slot) is
7332 asking for a disaster. Be prepared! */
7334 if (branch_to_delay_slot_p (insn))
7336 if (which_alternative == 0)
7337 return "ldo %1(%0),%0";
7338 else if (which_alternative == 1)
7340 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
7341 output_asm_insn ("ldw -16(%%r30),%4", operands);
7342 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7343 return "{fldws|fldw} -16(%%r30),%0";
7345 else
7347 output_asm_insn ("ldw %0,%4", operands);
7348 return "ldo %1(%4),%4\n\tstw %4,%0";
7352 if (which_alternative == 0)
7354 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7355 int xdelay;
7357 /* If this is a long branch with its delay slot unfilled, set `nullify'
7358 as it can nullify the delay slot and save a nop. */
7359 if (length == 8 && dbr_sequence_length () == 0)
7360 nullify = 1;
7362 /* If this is a short forward conditional branch which did not get
7363 its delay slot filled, the delay slot can still be nullified. */
7364 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7365 nullify = forward_branch_p (insn);
7367 switch (length)
7369 case 4:
7370 if (nullify)
7372 if (branch_needs_nop_p (insn))
7373 return "addib,%C2,n %1,%0,%3%#";
7374 else
7375 return "addib,%C2,n %1,%0,%3";
7377 else
7378 return "addib,%C2 %1,%0,%3";
7380 case 8:
7381 /* Handle weird backwards branch with a fulled delay slot
7382 which is nullified. */
7383 if (dbr_sequence_length () != 0
7384 && ! forward_branch_p (insn)
7385 && nullify)
7386 return "addib,%N2,n %1,%0,.+12\n\tb %3";
7387 /* Handle short backwards branch with an unfilled delay slot.
7388 Using a addb;nop rather than addi;bl saves 1 cycle for both
7389 taken and untaken branches. */
7390 else if (dbr_sequence_length () == 0
7391 && ! forward_branch_p (insn)
7392 && INSN_ADDRESSES_SET_P ()
7393 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7394 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7395 return "addib,%C2 %1,%0,%3%#";
7397 /* Handle normal cases. */
7398 if (nullify)
7399 return "addi,%N2 %1,%0,%0\n\tb,n %3";
7400 else
7401 return "addi,%N2 %1,%0,%0\n\tb %3";
7403 default:
7404 /* The reversed conditional branch must branch over one additional
7405 instruction if the delay slot is filled and needs to be extracted
7406 by pa_output_lbranch. If the delay slot is empty or this is a
7407 nullified forward branch, the instruction after the reversed
7408 condition branch must be nullified. */
7409 if (dbr_sequence_length () == 0
7410 || (nullify && forward_branch_p (insn)))
7412 nullify = 1;
7413 xdelay = 0;
7414 operands[4] = GEN_INT (length);
7416 else
7418 xdelay = 1;
7419 operands[4] = GEN_INT (length + 4);
7422 if (nullify)
7423 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
7424 else
7425 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
7427 return pa_output_lbranch (operands[3], insn, xdelay);
7431 /* Deal with gross reload from FP register case. */
7432 else if (which_alternative == 1)
7434 /* Move loop counter from FP register to MEM then into a GR,
7435 increment the GR, store the GR into MEM, and finally reload
7436 the FP register from MEM from within the branch's delay slot. */
7437 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
7438 operands);
7439 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7440 if (length == 24)
7441 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
7442 else if (length == 28)
7443 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7444 else
7446 operands[5] = GEN_INT (length - 16);
7447 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
7448 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7449 return pa_output_lbranch (operands[3], insn, 0);
7452 /* Deal with gross reload from memory case. */
7453 else
7455 /* Reload loop counter from memory, the store back to memory
7456 happens in the branch's delay slot. */
7457 output_asm_insn ("ldw %0,%4", operands);
7458 if (length == 12)
7459 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
7460 else if (length == 16)
7461 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
7462 else
7464 operands[5] = GEN_INT (length - 4);
7465 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
7466 return pa_output_lbranch (operands[3], insn, 0);
7471 /* Return the output template for emitting a movb type insn.
7473 Note it may perform some output operations on its own before
7474 returning the final output string. */
7475 const char *
7476 pa_output_movb (rtx *operands, rtx_insn *insn, int which_alternative,
7477 int reverse_comparison)
7479 int length = get_attr_length (insn);
7481 /* A conditional branch to the following instruction (e.g. the delay slot) is
7482 asking for a disaster. Be prepared! */
7484 if (branch_to_delay_slot_p (insn))
7486 if (which_alternative == 0)
7487 return "copy %1,%0";
7488 else if (which_alternative == 1)
7490 output_asm_insn ("stw %1,-16(%%r30)", operands);
7491 return "{fldws|fldw} -16(%%r30),%0";
7493 else if (which_alternative == 2)
7494 return "stw %1,%0";
7495 else
7496 return "mtsar %r1";
7499 /* Support the second variant. */
7500 if (reverse_comparison)
7501 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
7503 if (which_alternative == 0)
7505 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7506 int xdelay;
7508 /* If this is a long branch with its delay slot unfilled, set `nullify'
7509 as it can nullify the delay slot and save a nop. */
7510 if (length == 8 && dbr_sequence_length () == 0)
7511 nullify = 1;
7513 /* If this is a short forward conditional branch which did not get
7514 its delay slot filled, the delay slot can still be nullified. */
7515 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7516 nullify = forward_branch_p (insn);
7518 switch (length)
7520 case 4:
7521 if (nullify)
7523 if (branch_needs_nop_p (insn))
7524 return "movb,%C2,n %1,%0,%3%#";
7525 else
7526 return "movb,%C2,n %1,%0,%3";
7528 else
7529 return "movb,%C2 %1,%0,%3";
7531 case 8:
7532 /* Handle weird backwards branch with a filled delay slot
7533 which is nullified. */
7534 if (dbr_sequence_length () != 0
7535 && ! forward_branch_p (insn)
7536 && nullify)
7537 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7539 /* Handle short backwards branch with an unfilled delay slot.
7540 Using a movb;nop rather than or;bl saves 1 cycle for both
7541 taken and untaken branches. */
7542 else if (dbr_sequence_length () == 0
7543 && ! forward_branch_p (insn)
7544 && INSN_ADDRESSES_SET_P ()
7545 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7546 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7547 return "movb,%C2 %1,%0,%3%#";
7548 /* Handle normal cases. */
7549 if (nullify)
7550 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7551 else
7552 return "or,%N2 %1,%%r0,%0\n\tb %3";
7554 default:
7555 /* The reversed conditional branch must branch over one additional
7556 instruction if the delay slot is filled and needs to be extracted
7557 by pa_output_lbranch. If the delay slot is empty or this is a
7558 nullified forward branch, the instruction after the reversed
7559 condition branch must be nullified. */
7560 if (dbr_sequence_length () == 0
7561 || (nullify && forward_branch_p (insn)))
7563 nullify = 1;
7564 xdelay = 0;
7565 operands[4] = GEN_INT (length);
7567 else
7569 xdelay = 1;
7570 operands[4] = GEN_INT (length + 4);
7573 if (nullify)
7574 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
7575 else
7576 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
7578 return pa_output_lbranch (operands[3], insn, xdelay);
7581 /* Deal with gross reload for FP destination register case. */
7582 else if (which_alternative == 1)
7584 /* Move source register to MEM, perform the branch test, then
7585 finally load the FP register from MEM from within the branch's
7586 delay slot. */
7587 output_asm_insn ("stw %1,-16(%%r30)", operands);
7588 if (length == 12)
7589 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7590 else if (length == 16)
7591 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7592 else
7594 operands[4] = GEN_INT (length - 4);
7595 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
7596 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7597 return pa_output_lbranch (operands[3], insn, 0);
7600 /* Deal with gross reload from memory case. */
7601 else if (which_alternative == 2)
7603 /* Reload loop counter from memory, the store back to memory
7604 happens in the branch's delay slot. */
7605 if (length == 8)
7606 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7607 else if (length == 12)
7608 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7609 else
7611 operands[4] = GEN_INT (length);
7612 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7613 operands);
7614 return pa_output_lbranch (operands[3], insn, 0);
7617 /* Handle SAR as a destination. */
7618 else
7620 if (length == 8)
7621 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7622 else if (length == 12)
7623 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7624 else
7626 operands[4] = GEN_INT (length);
7627 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7628 operands);
7629 return pa_output_lbranch (operands[3], insn, 0);
7634 /* Copy any FP arguments in INSN into integer registers. */
7635 static void
7636 copy_fp_args (rtx_insn *insn)
7638 rtx link;
7639 rtx xoperands[2];
7641 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7643 int arg_mode, regno;
7644 rtx use = XEXP (link, 0);
7646 if (! (GET_CODE (use) == USE
7647 && GET_CODE (XEXP (use, 0)) == REG
7648 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7649 continue;
7651 arg_mode = GET_MODE (XEXP (use, 0));
7652 regno = REGNO (XEXP (use, 0));
7654 /* Is it a floating point register? */
7655 if (regno >= 32 && regno <= 39)
7657 /* Copy the FP register into an integer register via memory. */
7658 if (arg_mode == SFmode)
7660 xoperands[0] = XEXP (use, 0);
7661 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
7662 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
7663 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7665 else
7667 xoperands[0] = XEXP (use, 0);
7668 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
7669 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
7670 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
7671 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7677 /* Compute length of the FP argument copy sequence for INSN. */
7678 static int
7679 length_fp_args (rtx_insn *insn)
7681 int length = 0;
7682 rtx link;
7684 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7686 int arg_mode, regno;
7687 rtx use = XEXP (link, 0);
7689 if (! (GET_CODE (use) == USE
7690 && GET_CODE (XEXP (use, 0)) == REG
7691 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7692 continue;
7694 arg_mode = GET_MODE (XEXP (use, 0));
7695 regno = REGNO (XEXP (use, 0));
7697 /* Is it a floating point register? */
7698 if (regno >= 32 && regno <= 39)
7700 if (arg_mode == SFmode)
7701 length += 8;
7702 else
7703 length += 12;
7707 return length;
7710 /* Return the attribute length for the millicode call instruction INSN.
7711 The length must match the code generated by pa_output_millicode_call.
7712 We include the delay slot in the returned length as it is better to
7713 over estimate the length than to under estimate it. */
7716 pa_attr_length_millicode_call (rtx_insn *insn)
7718 unsigned long distance = -1;
7719 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7721 if (INSN_ADDRESSES_SET_P ())
7723 distance = (total + insn_current_reference_address (insn));
7724 if (distance < total)
7725 distance = -1;
7728 if (TARGET_64BIT)
7730 if (!TARGET_LONG_CALLS && distance < 7600000)
7731 return 8;
7733 return 20;
7735 else if (TARGET_PORTABLE_RUNTIME)
7736 return 24;
7737 else
7739 if (!TARGET_LONG_CALLS && distance < MAX_PCREL17F_OFFSET)
7740 return 8;
7742 if (!flag_pic)
7743 return 12;
7745 return 24;
7749 /* INSN is a function call.
7751 CALL_DEST is the routine we are calling. */
7753 const char *
7754 pa_output_millicode_call (rtx_insn *insn, rtx call_dest)
7756 int attr_length = get_attr_length (insn);
7757 int seq_length = dbr_sequence_length ();
7758 rtx xoperands[4];
7760 xoperands[0] = call_dest;
7762 /* Handle the common case where we are sure that the branch will
7763 reach the beginning of the $CODE$ subspace. The within reach
7764 form of the $$sh_func_adrs call has a length of 28. Because it
7765 has an attribute type of sh_func_adrs, it never has a nonzero
7766 sequence length (i.e., the delay slot is never filled). */
7767 if (!TARGET_LONG_CALLS
7768 && (attr_length == 8
7769 || (attr_length == 28
7770 && get_attr_type (insn) == TYPE_SH_FUNC_ADRS)))
7772 xoperands[1] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7773 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7775 else
7777 if (TARGET_64BIT)
7779 /* It might seem that one insn could be saved by accessing
7780 the millicode function using the linkage table. However,
7781 this doesn't work in shared libraries and other dynamically
7782 loaded objects. Using a pc-relative sequence also avoids
7783 problems related to the implicit use of the gp register. */
7784 xoperands[1] = gen_rtx_REG (Pmode, 1);
7785 xoperands[2] = xoperands[1];
7786 pa_output_pic_pcrel_sequence (xoperands);
7787 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7789 else if (TARGET_PORTABLE_RUNTIME)
7791 /* Pure portable runtime doesn't allow be/ble; we also don't
7792 have PIC support in the assembler/linker, so this sequence
7793 is needed. */
7795 /* Get the address of our target into %r1. */
7796 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7797 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7799 /* Get our return address into %r31. */
7800 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7801 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7803 /* Jump to our target address in %r1. */
7804 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7806 else if (!flag_pic)
7808 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7809 if (TARGET_PA_20)
7810 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7811 else
7812 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7814 else
7816 xoperands[1] = gen_rtx_REG (Pmode, 31);
7817 xoperands[2] = gen_rtx_REG (Pmode, 1);
7818 pa_output_pic_pcrel_sequence (xoperands);
7820 /* Adjust return address. */
7821 output_asm_insn ("ldo {16|24}(%%r31),%%r31", xoperands);
7823 /* Jump to our target address in %r1. */
7824 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7828 if (seq_length == 0)
7829 output_asm_insn ("nop", xoperands);
7831 return "";
7834 /* Return the attribute length of the call instruction INSN. The SIBCALL
7835 flag indicates whether INSN is a regular call or a sibling call. The
7836 length returned must be longer than the code actually generated by
7837 pa_output_call. Since branch shortening is done before delay branch
7838 sequencing, there is no way to determine whether or not the delay
7839 slot will be filled during branch shortening. Even when the delay
7840 slot is filled, we may have to add a nop if the delay slot contains
7841 a branch that can't reach its target. Thus, we always have to include
7842 the delay slot in the length estimate. This used to be done in
7843 pa_adjust_insn_length but we do it here now as some sequences always
7844 fill the delay slot and we can save four bytes in the estimate for
7845 these sequences. */
7848 pa_attr_length_call (rtx_insn *insn, int sibcall)
7850 int local_call;
7851 rtx call, call_dest;
7852 tree call_decl;
7853 int length = 0;
7854 rtx pat = PATTERN (insn);
7855 unsigned long distance = -1;
7857 gcc_assert (CALL_P (insn));
7859 if (INSN_ADDRESSES_SET_P ())
7861 unsigned long total;
7863 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7864 distance = (total + insn_current_reference_address (insn));
7865 if (distance < total)
7866 distance = -1;
7869 gcc_assert (GET_CODE (pat) == PARALLEL);
7871 /* Get the call rtx. */
7872 call = XVECEXP (pat, 0, 0);
7873 if (GET_CODE (call) == SET)
7874 call = SET_SRC (call);
7876 gcc_assert (GET_CODE (call) == CALL);
7878 /* Determine if this is a local call. */
7879 call_dest = XEXP (XEXP (call, 0), 0);
7880 call_decl = SYMBOL_REF_DECL (call_dest);
7881 local_call = call_decl && targetm.binds_local_p (call_decl);
7883 /* pc-relative branch. */
7884 if (!TARGET_LONG_CALLS
7885 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7886 || distance < MAX_PCREL17F_OFFSET))
7887 length += 8;
7889 /* 64-bit plabel sequence. */
7890 else if (TARGET_64BIT && !local_call)
7891 length += sibcall ? 28 : 24;
7893 /* non-pic long absolute branch sequence. */
7894 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7895 length += 12;
7897 /* long pc-relative branch sequence. */
7898 else if (TARGET_LONG_PIC_SDIFF_CALL
7899 || (TARGET_GAS && !TARGET_SOM && local_call))
7901 length += 20;
7903 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7904 length += 8;
7907 /* 32-bit plabel sequence. */
7908 else
7910 length += 32;
7912 if (TARGET_SOM)
7913 length += length_fp_args (insn);
7915 if (flag_pic)
7916 length += 4;
7918 if (!TARGET_PA_20)
7920 if (!sibcall)
7921 length += 8;
7923 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7924 length += 8;
7928 return length;
7931 /* INSN is a function call.
7933 CALL_DEST is the routine we are calling. */
7935 const char *
7936 pa_output_call (rtx_insn *insn, rtx call_dest, int sibcall)
7938 int seq_length = dbr_sequence_length ();
7939 tree call_decl = SYMBOL_REF_DECL (call_dest);
7940 int local_call = call_decl && targetm.binds_local_p (call_decl);
7941 rtx xoperands[4];
7943 xoperands[0] = call_dest;
7945 /* Handle the common case where we're sure that the branch will reach
7946 the beginning of the "$CODE$" subspace. This is the beginning of
7947 the current function if we are in a named section. */
7948 if (!TARGET_LONG_CALLS && pa_attr_length_call (insn, sibcall) == 8)
7950 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7951 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7953 else
7955 if (TARGET_64BIT && !local_call)
7957 /* ??? As far as I can tell, the HP linker doesn't support the
7958 long pc-relative sequence described in the 64-bit runtime
7959 architecture. So, we use a slightly longer indirect call. */
7960 xoperands[0] = pa_get_deferred_plabel (call_dest);
7961 xoperands[1] = gen_label_rtx ();
7963 /* If this isn't a sibcall, we put the load of %r27 into the
7964 delay slot. We can't do this in a sibcall as we don't
7965 have a second call-clobbered scratch register available.
7966 We don't need to do anything when generating fast indirect
7967 calls. */
7968 if (seq_length != 0 && !sibcall)
7970 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7971 optimize, 0, NULL);
7973 /* Now delete the delay insn. */
7974 SET_INSN_DELETED (NEXT_INSN (insn));
7975 seq_length = 0;
7978 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7979 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7980 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7982 if (sibcall)
7984 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7985 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7986 output_asm_insn ("bve (%%r1)", xoperands);
7988 else
7990 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7991 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7992 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7993 seq_length = 1;
7996 else
7998 int indirect_call = 0;
8000 /* Emit a long call. There are several different sequences
8001 of increasing length and complexity. In most cases,
8002 they don't allow an instruction in the delay slot. */
8003 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
8004 && !TARGET_LONG_PIC_SDIFF_CALL
8005 && !(TARGET_GAS && !TARGET_SOM && local_call)
8006 && !TARGET_64BIT)
8007 indirect_call = 1;
8009 if (seq_length != 0
8010 && !sibcall
8011 && (!TARGET_PA_20
8012 || indirect_call
8013 || ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)))
8015 /* A non-jump insn in the delay slot. By definition we can
8016 emit this insn before the call (and in fact before argument
8017 relocating. */
8018 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
8019 NULL);
8021 /* Now delete the delay insn. */
8022 SET_INSN_DELETED (NEXT_INSN (insn));
8023 seq_length = 0;
8026 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
8028 /* This is the best sequence for making long calls in
8029 non-pic code. Unfortunately, GNU ld doesn't provide
8030 the stub needed for external calls, and GAS's support
8031 for this with the SOM linker is buggy. It is safe
8032 to use this for local calls. */
8033 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8034 if (sibcall)
8035 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
8036 else
8038 if (TARGET_PA_20)
8039 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
8040 xoperands);
8041 else
8042 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
8044 output_asm_insn ("copy %%r31,%%r2", xoperands);
8045 seq_length = 1;
8048 else
8050 /* The HP assembler and linker can handle relocations for
8051 the difference of two symbols. The HP assembler
8052 recognizes the sequence as a pc-relative call and
8053 the linker provides stubs when needed. */
8055 /* GAS currently can't generate the relocations that
8056 are needed for the SOM linker under HP-UX using this
8057 sequence. The GNU linker doesn't generate the stubs
8058 that are needed for external calls on TARGET_ELF32
8059 with this sequence. For now, we have to use a longer
8060 plabel sequence when using GAS for non local calls. */
8061 if (TARGET_LONG_PIC_SDIFF_CALL
8062 || (TARGET_GAS && !TARGET_SOM && local_call))
8064 xoperands[1] = gen_rtx_REG (Pmode, 1);
8065 xoperands[2] = xoperands[1];
8066 pa_output_pic_pcrel_sequence (xoperands);
8068 else
8070 /* Emit a long plabel-based call sequence. This is
8071 essentially an inline implementation of $$dyncall.
8072 We don't actually try to call $$dyncall as this is
8073 as difficult as calling the function itself. */
8074 xoperands[0] = pa_get_deferred_plabel (call_dest);
8075 xoperands[1] = gen_label_rtx ();
8077 /* Since the call is indirect, FP arguments in registers
8078 need to be copied to the general registers. Then, the
8079 argument relocation stub will copy them back. */
8080 if (TARGET_SOM)
8081 copy_fp_args (insn);
8083 if (flag_pic)
8085 output_asm_insn ("addil LT'%0,%%r19", xoperands);
8086 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
8087 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
8089 else
8091 output_asm_insn ("addil LR'%0-$global$,%%r27",
8092 xoperands);
8093 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
8094 xoperands);
8097 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
8098 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
8099 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
8100 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
8102 if (!sibcall && !TARGET_PA_20)
8104 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
8105 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8106 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
8107 else
8108 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
8112 if (TARGET_PA_20)
8114 if (sibcall)
8115 output_asm_insn ("bve (%%r1)", xoperands);
8116 else
8118 if (indirect_call)
8120 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
8121 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
8122 seq_length = 1;
8124 else
8125 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
8128 else
8130 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
8131 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
8132 xoperands);
8134 if (sibcall)
8136 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8137 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
8138 else
8139 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
8141 else
8143 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8144 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
8145 else
8146 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
8148 if (indirect_call)
8149 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
8150 else
8151 output_asm_insn ("copy %%r31,%%r2", xoperands);
8152 seq_length = 1;
8159 if (seq_length == 0)
8160 output_asm_insn ("nop", xoperands);
8162 return "";
8165 /* Return the attribute length of the indirect call instruction INSN.
8166 The length must match the code generated by output_indirect call.
8167 The returned length includes the delay slot. Currently, the delay
8168 slot of an indirect call sequence is not exposed and it is used by
8169 the sequence itself. */
8172 pa_attr_length_indirect_call (rtx_insn *insn)
8174 unsigned long distance = -1;
8175 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
8177 if (INSN_ADDRESSES_SET_P ())
8179 distance = (total + insn_current_reference_address (insn));
8180 if (distance < total)
8181 distance = -1;
8184 if (TARGET_64BIT)
8185 return 12;
8187 if (TARGET_FAST_INDIRECT_CALLS)
8188 return 8;
8190 if (TARGET_PORTABLE_RUNTIME)
8191 return 16;
8193 /* Inline version of $$dyncall. */
8194 if ((TARGET_NO_SPACE_REGS || TARGET_PA_20) && !optimize_size)
8195 return 20;
8197 if (!TARGET_LONG_CALLS
8198 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
8199 || distance < MAX_PCREL17F_OFFSET))
8200 return 8;
8202 /* Out of reach, can use ble. */
8203 if (!flag_pic)
8204 return 12;
8206 /* Inline version of $$dyncall. */
8207 if (TARGET_NO_SPACE_REGS || TARGET_PA_20)
8208 return 20;
8210 if (!optimize_size)
8211 return 36;
8213 /* Long PIC pc-relative call. */
8214 return 20;
8217 const char *
8218 pa_output_indirect_call (rtx_insn *insn, rtx call_dest)
8220 rtx xoperands[4];
8221 int length;
8223 if (TARGET_64BIT)
8225 xoperands[0] = call_dest;
8226 output_asm_insn ("ldd 16(%0),%%r2\n\t"
8227 "bve,l (%%r2),%%r2\n\t"
8228 "ldd 24(%0),%%r27", xoperands);
8229 return "";
8232 /* First the special case for kernels, level 0 systems, etc. */
8233 if (TARGET_FAST_INDIRECT_CALLS)
8235 pa_output_arg_descriptor (insn);
8236 if (TARGET_PA_20)
8237 return "bve,l,n (%%r22),%%r2\n\tnop";
8238 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8241 if (TARGET_PORTABLE_RUNTIME)
8243 output_asm_insn ("ldil L'$$dyncall,%%r31\n\t"
8244 "ldo R'$$dyncall(%%r31),%%r31", xoperands);
8245 pa_output_arg_descriptor (insn);
8246 return "blr %%r0,%%r2\n\tbv,n %%r0(%%r31)";
8249 /* Maybe emit a fast inline version of $$dyncall. */
8250 if ((TARGET_NO_SPACE_REGS || TARGET_PA_20) && !optimize_size)
8252 output_asm_insn ("bb,>=,n %%r22,30,.+12\n\t"
8253 "ldw 2(%%r22),%%r19\n\t"
8254 "ldw -2(%%r22),%%r22", xoperands);
8255 pa_output_arg_descriptor (insn);
8256 if (TARGET_NO_SPACE_REGS)
8258 if (TARGET_PA_20)
8259 return "bve,l,n (%%r22),%%r2\n\tnop";
8260 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8262 return "bve,l (%%r22),%%r2\n\tstw %%r2,-24(%%sp)";
8265 /* Now the normal case -- we can reach $$dyncall directly or
8266 we're sure that we can get there via a long-branch stub.
8268 No need to check target flags as the length uniquely identifies
8269 the remaining cases. */
8270 length = pa_attr_length_indirect_call (insn);
8271 if (length == 8)
8273 pa_output_arg_descriptor (insn);
8275 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
8276 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
8277 variant of the B,L instruction can't be used on the SOM target. */
8278 if (TARGET_PA_20 && !TARGET_SOM)
8279 return "b,l,n $$dyncall,%%r2\n\tnop";
8280 else
8281 return "bl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
8284 /* Long millicode call, but we are not generating PIC or portable runtime
8285 code. */
8286 if (length == 12)
8288 output_asm_insn ("ldil L'$$dyncall,%%r2", xoperands);
8289 pa_output_arg_descriptor (insn);
8290 return "ble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
8293 /* Maybe emit a fast inline version of $$dyncall. The long PIC
8294 pc-relative call sequence is five instructions. The inline PA 2.0
8295 version of $$dyncall is also five instructions. The PA 1.X versions
8296 are longer but still an overall win. */
8297 if (TARGET_NO_SPACE_REGS || TARGET_PA_20 || !optimize_size)
8299 output_asm_insn ("bb,>=,n %%r22,30,.+12\n\t"
8300 "ldw 2(%%r22),%%r19\n\t"
8301 "ldw -2(%%r22),%%r22", xoperands);
8302 if (TARGET_NO_SPACE_REGS)
8304 pa_output_arg_descriptor (insn);
8305 if (TARGET_PA_20)
8306 return "bve,l,n (%%r22),%%r2\n\tnop";
8307 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8309 if (TARGET_PA_20)
8311 pa_output_arg_descriptor (insn);
8312 return "bve,l (%%r22),%%r2\n\tstw %%r2,-24(%%sp)";
8314 output_asm_insn ("bl .+8,%%r2\n\t"
8315 "ldo 16(%%r2),%%r2\n\t"
8316 "ldsid (%%r22),%%r1\n\t"
8317 "mtsp %%r1,%%sr0", xoperands);
8318 pa_output_arg_descriptor (insn);
8319 return "be 0(%%sr0,%%r22)\n\tstw %%r2,-24(%%sp)";
8322 /* We need a long PIC call to $$dyncall. */
8323 xoperands[0] = gen_rtx_SYMBOL_REF (Pmode, "$$dyncall");
8324 xoperands[1] = gen_rtx_REG (Pmode, 2);
8325 xoperands[2] = gen_rtx_REG (Pmode, 1);
8326 pa_output_pic_pcrel_sequence (xoperands);
8327 pa_output_arg_descriptor (insn);
8328 return "bv %%r0(%%r1)\n\tldo {12|20}(%%r2),%%r2";
8331 /* In HPUX 8.0's shared library scheme, special relocations are needed
8332 for function labels if they might be passed to a function
8333 in a shared library (because shared libraries don't live in code
8334 space), and special magic is needed to construct their address. */
8336 void
8337 pa_encode_label (rtx sym)
8339 const char *str = XSTR (sym, 0);
8340 int len = strlen (str) + 1;
8341 char *newstr, *p;
8343 p = newstr = XALLOCAVEC (char, len + 1);
8344 *p++ = '@';
8345 strcpy (p, str);
8347 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
8350 static void
8351 pa_encode_section_info (tree decl, rtx rtl, int first)
8353 int old_referenced = 0;
8355 if (!first && MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
8356 old_referenced
8357 = SYMBOL_REF_FLAGS (XEXP (rtl, 0)) & SYMBOL_FLAG_REFERENCED;
8359 default_encode_section_info (decl, rtl, first);
8361 if (first && TEXT_SPACE_P (decl))
8363 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
8364 if (TREE_CODE (decl) == FUNCTION_DECL)
8365 pa_encode_label (XEXP (rtl, 0));
8367 else if (old_referenced)
8368 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= old_referenced;
8371 /* This is sort of inverse to pa_encode_section_info. */
8373 static const char *
8374 pa_strip_name_encoding (const char *str)
8376 str += (*str == '@');
8377 str += (*str == '*');
8378 return str;
8381 /* Returns 1 if OP is a function label involved in a simple addition
8382 with a constant. Used to keep certain patterns from matching
8383 during instruction combination. */
8385 pa_is_function_label_plus_const (rtx op)
8387 /* Strip off any CONST. */
8388 if (GET_CODE (op) == CONST)
8389 op = XEXP (op, 0);
8391 return (GET_CODE (op) == PLUS
8392 && function_label_operand (XEXP (op, 0), VOIDmode)
8393 && GET_CODE (XEXP (op, 1)) == CONST_INT);
8396 /* Output assembly code for a thunk to FUNCTION. */
8398 static void
8399 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
8400 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
8401 tree function)
8403 static unsigned int current_thunk_number;
8404 int val_14 = VAL_14_BITS_P (delta);
8405 unsigned int old_last_address = last_address, nbytes = 0;
8406 char label[17];
8407 rtx xoperands[4];
8409 xoperands[0] = XEXP (DECL_RTL (function), 0);
8410 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
8411 xoperands[2] = GEN_INT (delta);
8413 final_start_function (emit_barrier (), file, 1);
8415 /* Output the thunk. We know that the function is in the same
8416 translation unit (i.e., the same space) as the thunk, and that
8417 thunks are output after their method. Thus, we don't need an
8418 external branch to reach the function. With SOM and GAS,
8419 functions and thunks are effectively in different sections.
8420 Thus, we can always use a IA-relative branch and the linker
8421 will add a long branch stub if necessary.
8423 However, we have to be careful when generating PIC code on the
8424 SOM port to ensure that the sequence does not transfer to an
8425 import stub for the target function as this could clobber the
8426 return value saved at SP-24. This would also apply to the
8427 32-bit linux port if the multi-space model is implemented. */
8428 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8429 && !(flag_pic && TREE_PUBLIC (function))
8430 && (TARGET_GAS || last_address < 262132))
8431 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8432 && ((targetm_common.have_named_sections
8433 && DECL_SECTION_NAME (thunk_fndecl) != NULL
8434 /* The GNU 64-bit linker has rather poor stub management.
8435 So, we use a long branch from thunks that aren't in
8436 the same section as the target function. */
8437 && ((!TARGET_64BIT
8438 && (DECL_SECTION_NAME (thunk_fndecl)
8439 != DECL_SECTION_NAME (function)))
8440 || ((DECL_SECTION_NAME (thunk_fndecl)
8441 == DECL_SECTION_NAME (function))
8442 && last_address < 262132)))
8443 /* In this case, we need to be able to reach the start of
8444 the stub table even though the function is likely closer
8445 and can be jumped to directly. */
8446 || (targetm_common.have_named_sections
8447 && DECL_SECTION_NAME (thunk_fndecl) == NULL
8448 && DECL_SECTION_NAME (function) == NULL
8449 && total_code_bytes < MAX_PCREL17F_OFFSET)
8450 /* Likewise. */
8451 || (!targetm_common.have_named_sections
8452 && total_code_bytes < MAX_PCREL17F_OFFSET))))
8454 if (!val_14)
8455 output_asm_insn ("addil L'%2,%%r26", xoperands);
8457 output_asm_insn ("b %0", xoperands);
8459 if (val_14)
8461 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8462 nbytes += 8;
8464 else
8466 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8467 nbytes += 12;
8470 else if (TARGET_64BIT)
8472 rtx xop[4];
8474 /* We only have one call-clobbered scratch register, so we can't
8475 make use of the delay slot if delta doesn't fit in 14 bits. */
8476 if (!val_14)
8478 output_asm_insn ("addil L'%2,%%r26", xoperands);
8479 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8482 /* Load function address into %r1. */
8483 xop[0] = xoperands[0];
8484 xop[1] = gen_rtx_REG (Pmode, 1);
8485 xop[2] = xop[1];
8486 pa_output_pic_pcrel_sequence (xop);
8488 if (val_14)
8490 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8491 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8492 nbytes += 20;
8494 else
8496 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
8497 nbytes += 24;
8500 else if (TARGET_PORTABLE_RUNTIME)
8502 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8503 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
8505 if (!val_14)
8506 output_asm_insn ("ldil L'%2,%%r26", xoperands);
8508 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8510 if (val_14)
8512 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8513 nbytes += 16;
8515 else
8517 output_asm_insn ("ldo R'%2(%%r26),%%r26", xoperands);
8518 nbytes += 20;
8521 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8523 /* The function is accessible from outside this module. The only
8524 way to avoid an import stub between the thunk and function is to
8525 call the function directly with an indirect sequence similar to
8526 that used by $$dyncall. This is possible because $$dyncall acts
8527 as the import stub in an indirect call. */
8528 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
8529 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8530 output_asm_insn ("addil LT'%3,%%r19", xoperands);
8531 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
8532 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8533 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
8534 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
8535 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
8536 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8538 if (!val_14)
8540 output_asm_insn ("addil L'%2,%%r26", xoperands);
8541 nbytes += 4;
8544 if (TARGET_PA_20)
8546 output_asm_insn ("bve (%%r22)", xoperands);
8547 nbytes += 36;
8549 else if (TARGET_NO_SPACE_REGS)
8551 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
8552 nbytes += 36;
8554 else
8556 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
8557 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
8558 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
8559 nbytes += 44;
8562 if (val_14)
8563 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8564 else
8565 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8567 else if (flag_pic)
8569 rtx xop[4];
8571 /* Load function address into %r22. */
8572 xop[0] = xoperands[0];
8573 xop[1] = gen_rtx_REG (Pmode, 1);
8574 xop[2] = gen_rtx_REG (Pmode, 22);
8575 pa_output_pic_pcrel_sequence (xop);
8577 if (!val_14)
8578 output_asm_insn ("addil L'%2,%%r26", xoperands);
8580 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8582 if (val_14)
8584 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8585 nbytes += 20;
8587 else
8589 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8590 nbytes += 24;
8593 else
8595 if (!val_14)
8596 output_asm_insn ("addil L'%2,%%r26", xoperands);
8598 output_asm_insn ("ldil L'%0,%%r22", xoperands);
8599 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
8601 if (val_14)
8603 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8604 nbytes += 12;
8606 else
8608 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8609 nbytes += 16;
8613 final_end_function ();
8615 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8617 switch_to_section (data_section);
8618 output_asm_insn (".align 4", xoperands);
8619 ASM_OUTPUT_LABEL (file, label);
8620 output_asm_insn (".word P'%0", xoperands);
8623 current_thunk_number++;
8624 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8625 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8626 last_address += nbytes;
8627 if (old_last_address > last_address)
8628 last_address = UINT_MAX;
8629 update_total_code_bytes (nbytes);
8632 /* Only direct calls to static functions are allowed to be sibling (tail)
8633 call optimized.
8635 This restriction is necessary because some linker generated stubs will
8636 store return pointers into rp' in some cases which might clobber a
8637 live value already in rp'.
8639 In a sibcall the current function and the target function share stack
8640 space. Thus if the path to the current function and the path to the
8641 target function save a value in rp', they save the value into the
8642 same stack slot, which has undesirable consequences.
8644 Because of the deferred binding nature of shared libraries any function
8645 with external scope could be in a different load module and thus require
8646 rp' to be saved when calling that function. So sibcall optimizations
8647 can only be safe for static function.
8649 Note that GCC never needs return value relocations, so we don't have to
8650 worry about static calls with return value relocations (which require
8651 saving rp').
8653 It is safe to perform a sibcall optimization when the target function
8654 will never return. */
8655 static bool
8656 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8658 if (TARGET_PORTABLE_RUNTIME)
8659 return false;
8661 /* Sibcalls are not ok because the arg pointer register is not a fixed
8662 register. This prevents the sibcall optimization from occurring. In
8663 addition, there are problems with stub placement using GNU ld. This
8664 is because a normal sibcall branch uses a 17-bit relocation while
8665 a regular call branch uses a 22-bit relocation. As a result, more
8666 care needs to be taken in the placement of long-branch stubs. */
8667 if (TARGET_64BIT)
8668 return false;
8670 /* Sibcalls are only ok within a translation unit. */
8671 return (decl && !TREE_PUBLIC (decl));
8674 /* ??? Addition is not commutative on the PA due to the weird implicit
8675 space register selection rules for memory addresses. Therefore, we
8676 don't consider a + b == b + a, as this might be inside a MEM. */
8677 static bool
8678 pa_commutative_p (const_rtx x, int outer_code)
8680 return (COMMUTATIVE_P (x)
8681 && (TARGET_NO_SPACE_REGS
8682 || (outer_code != UNKNOWN && outer_code != MEM)
8683 || GET_CODE (x) != PLUS));
8686 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8687 use in fmpyadd instructions. */
8689 pa_fmpyaddoperands (rtx *operands)
8691 machine_mode mode = GET_MODE (operands[0]);
8693 /* Must be a floating point mode. */
8694 if (mode != SFmode && mode != DFmode)
8695 return 0;
8697 /* All modes must be the same. */
8698 if (! (mode == GET_MODE (operands[1])
8699 && mode == GET_MODE (operands[2])
8700 && mode == GET_MODE (operands[3])
8701 && mode == GET_MODE (operands[4])
8702 && mode == GET_MODE (operands[5])))
8703 return 0;
8705 /* All operands must be registers. */
8706 if (! (GET_CODE (operands[1]) == REG
8707 && GET_CODE (operands[2]) == REG
8708 && GET_CODE (operands[3]) == REG
8709 && GET_CODE (operands[4]) == REG
8710 && GET_CODE (operands[5]) == REG))
8711 return 0;
8713 /* Only 2 real operands to the addition. One of the input operands must
8714 be the same as the output operand. */
8715 if (! rtx_equal_p (operands[3], operands[4])
8716 && ! rtx_equal_p (operands[3], operands[5]))
8717 return 0;
8719 /* Inout operand of add cannot conflict with any operands from multiply. */
8720 if (rtx_equal_p (operands[3], operands[0])
8721 || rtx_equal_p (operands[3], operands[1])
8722 || rtx_equal_p (operands[3], operands[2]))
8723 return 0;
8725 /* multiply cannot feed into addition operands. */
8726 if (rtx_equal_p (operands[4], operands[0])
8727 || rtx_equal_p (operands[5], operands[0]))
8728 return 0;
8730 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8731 if (mode == SFmode
8732 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8733 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8734 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8735 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8736 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8737 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8738 return 0;
8740 /* Passed. Operands are suitable for fmpyadd. */
8741 return 1;
8744 #if !defined(USE_COLLECT2)
8745 static void
8746 pa_asm_out_constructor (rtx symbol, int priority)
8748 if (!function_label_operand (symbol, VOIDmode))
8749 pa_encode_label (symbol);
8751 #ifdef CTORS_SECTION_ASM_OP
8752 default_ctor_section_asm_out_constructor (symbol, priority);
8753 #else
8754 # ifdef TARGET_ASM_NAMED_SECTION
8755 default_named_section_asm_out_constructor (symbol, priority);
8756 # else
8757 default_stabs_asm_out_constructor (symbol, priority);
8758 # endif
8759 #endif
8762 static void
8763 pa_asm_out_destructor (rtx symbol, int priority)
8765 if (!function_label_operand (symbol, VOIDmode))
8766 pa_encode_label (symbol);
8768 #ifdef DTORS_SECTION_ASM_OP
8769 default_dtor_section_asm_out_destructor (symbol, priority);
8770 #else
8771 # ifdef TARGET_ASM_NAMED_SECTION
8772 default_named_section_asm_out_destructor (symbol, priority);
8773 # else
8774 default_stabs_asm_out_destructor (symbol, priority);
8775 # endif
8776 #endif
8778 #endif
8780 /* This function places uninitialized global data in the bss section.
8781 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8782 function on the SOM port to prevent uninitialized global data from
8783 being placed in the data section. */
8785 void
8786 pa_asm_output_aligned_bss (FILE *stream,
8787 const char *name,
8788 unsigned HOST_WIDE_INT size,
8789 unsigned int align)
8791 switch_to_section (bss_section);
8792 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8794 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8795 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8796 #endif
8798 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8799 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8800 #endif
8802 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8803 ASM_OUTPUT_LABEL (stream, name);
8804 fprintf (stream, "\t.block " HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8807 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8808 that doesn't allow the alignment of global common storage to be directly
8809 specified. The SOM linker aligns common storage based on the rounded
8810 value of the NUM_BYTES parameter in the .comm directive. It's not
8811 possible to use the .align directive as it doesn't affect the alignment
8812 of the label associated with a .comm directive. */
8814 void
8815 pa_asm_output_aligned_common (FILE *stream,
8816 const char *name,
8817 unsigned HOST_WIDE_INT size,
8818 unsigned int align)
8820 unsigned int max_common_align;
8822 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8823 if (align > max_common_align)
8825 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8826 "for global common data. Using %u",
8827 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8828 align = max_common_align;
8831 switch_to_section (bss_section);
8833 assemble_name (stream, name);
8834 fprintf (stream, "\t.comm " HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8835 MAX (size, align / BITS_PER_UNIT));
8838 /* We can't use .comm for local common storage as the SOM linker effectively
8839 treats the symbol as universal and uses the same storage for local symbols
8840 with the same name in different object files. The .block directive
8841 reserves an uninitialized block of storage. However, it's not common
8842 storage. Fortunately, GCC never requests common storage with the same
8843 name in any given translation unit. */
8845 void
8846 pa_asm_output_aligned_local (FILE *stream,
8847 const char *name,
8848 unsigned HOST_WIDE_INT size,
8849 unsigned int align)
8851 switch_to_section (bss_section);
8852 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8854 #ifdef LOCAL_ASM_OP
8855 fprintf (stream, "%s", LOCAL_ASM_OP);
8856 assemble_name (stream, name);
8857 fprintf (stream, "\n");
8858 #endif
8860 ASM_OUTPUT_LABEL (stream, name);
8861 fprintf (stream, "\t.block " HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8864 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8865 use in fmpysub instructions. */
8867 pa_fmpysuboperands (rtx *operands)
8869 machine_mode mode = GET_MODE (operands[0]);
8871 /* Must be a floating point mode. */
8872 if (mode != SFmode && mode != DFmode)
8873 return 0;
8875 /* All modes must be the same. */
8876 if (! (mode == GET_MODE (operands[1])
8877 && mode == GET_MODE (operands[2])
8878 && mode == GET_MODE (operands[3])
8879 && mode == GET_MODE (operands[4])
8880 && mode == GET_MODE (operands[5])))
8881 return 0;
8883 /* All operands must be registers. */
8884 if (! (GET_CODE (operands[1]) == REG
8885 && GET_CODE (operands[2]) == REG
8886 && GET_CODE (operands[3]) == REG
8887 && GET_CODE (operands[4]) == REG
8888 && GET_CODE (operands[5]) == REG))
8889 return 0;
8891 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8892 operation, so operands[4] must be the same as operand[3]. */
8893 if (! rtx_equal_p (operands[3], operands[4]))
8894 return 0;
8896 /* multiply cannot feed into subtraction. */
8897 if (rtx_equal_p (operands[5], operands[0]))
8898 return 0;
8900 /* Inout operand of sub cannot conflict with any operands from multiply. */
8901 if (rtx_equal_p (operands[3], operands[0])
8902 || rtx_equal_p (operands[3], operands[1])
8903 || rtx_equal_p (operands[3], operands[2]))
8904 return 0;
8906 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8907 if (mode == SFmode
8908 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8909 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8910 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8911 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8912 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8913 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8914 return 0;
8916 /* Passed. Operands are suitable for fmpysub. */
8917 return 1;
8920 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8921 constants for a MULT embedded inside a memory address. */
8923 pa_mem_shadd_constant_p (int val)
8925 if (val == 2 || val == 4 || val == 8)
8926 return 1;
8927 else
8928 return 0;
8931 /* Return 1 if the given constant is 1, 2, or 3. These are the valid
8932 constants for shadd instructions. */
8934 pa_shadd_constant_p (int val)
8936 if (val == 1 || val == 2 || val == 3)
8937 return 1;
8938 else
8939 return 0;
8942 /* Return TRUE if INSN branches forward. */
8944 static bool
8945 forward_branch_p (rtx_insn *insn)
8947 rtx lab = JUMP_LABEL (insn);
8949 /* The INSN must have a jump label. */
8950 gcc_assert (lab != NULL_RTX);
8952 if (INSN_ADDRESSES_SET_P ())
8953 return INSN_ADDRESSES (INSN_UID (lab)) > INSN_ADDRESSES (INSN_UID (insn));
8955 while (insn)
8957 if (insn == lab)
8958 return true;
8959 else
8960 insn = NEXT_INSN (insn);
8963 return false;
8966 /* Output an unconditional move and branch insn. */
8968 const char *
8969 pa_output_parallel_movb (rtx *operands, rtx_insn *insn)
8971 int length = get_attr_length (insn);
8973 /* These are the cases in which we win. */
8974 if (length == 4)
8975 return "mov%I1b,tr %1,%0,%2";
8977 /* None of the following cases win, but they don't lose either. */
8978 if (length == 8)
8980 if (dbr_sequence_length () == 0)
8982 /* Nothing in the delay slot, fake it by putting the combined
8983 insn (the copy or add) in the delay slot of a bl. */
8984 if (GET_CODE (operands[1]) == CONST_INT)
8985 return "b %2\n\tldi %1,%0";
8986 else
8987 return "b %2\n\tcopy %1,%0";
8989 else
8991 /* Something in the delay slot, but we've got a long branch. */
8992 if (GET_CODE (operands[1]) == CONST_INT)
8993 return "ldi %1,%0\n\tb %2";
8994 else
8995 return "copy %1,%0\n\tb %2";
8999 if (GET_CODE (operands[1]) == CONST_INT)
9000 output_asm_insn ("ldi %1,%0", operands);
9001 else
9002 output_asm_insn ("copy %1,%0", operands);
9003 return pa_output_lbranch (operands[2], insn, 1);
9006 /* Output an unconditional add and branch insn. */
9008 const char *
9009 pa_output_parallel_addb (rtx *operands, rtx_insn *insn)
9011 int length = get_attr_length (insn);
9013 /* To make life easy we want operand0 to be the shared input/output
9014 operand and operand1 to be the readonly operand. */
9015 if (operands[0] == operands[1])
9016 operands[1] = operands[2];
9018 /* These are the cases in which we win. */
9019 if (length == 4)
9020 return "add%I1b,tr %1,%0,%3";
9022 /* None of the following cases win, but they don't lose either. */
9023 if (length == 8)
9025 if (dbr_sequence_length () == 0)
9026 /* Nothing in the delay slot, fake it by putting the combined
9027 insn (the copy or add) in the delay slot of a bl. */
9028 return "b %3\n\tadd%I1 %1,%0,%0";
9029 else
9030 /* Something in the delay slot, but we've got a long branch. */
9031 return "add%I1 %1,%0,%0\n\tb %3";
9034 output_asm_insn ("add%I1 %1,%0,%0", operands);
9035 return pa_output_lbranch (operands[3], insn, 1);
9038 /* We use this hook to perform a PA specific optimization which is difficult
9039 to do in earlier passes. */
9041 static void
9042 pa_reorg (void)
9044 remove_useless_addtr_insns (1);
9046 if (pa_cpu < PROCESSOR_8000)
9047 pa_combine_instructions ();
9050 /* The PA has a number of odd instructions which can perform multiple
9051 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
9052 it may be profitable to combine two instructions into one instruction
9053 with two outputs. It's not profitable PA2.0 machines because the
9054 two outputs would take two slots in the reorder buffers.
9056 This routine finds instructions which can be combined and combines
9057 them. We only support some of the potential combinations, and we
9058 only try common ways to find suitable instructions.
9060 * addb can add two registers or a register and a small integer
9061 and jump to a nearby (+-8k) location. Normally the jump to the
9062 nearby location is conditional on the result of the add, but by
9063 using the "true" condition we can make the jump unconditional.
9064 Thus addb can perform two independent operations in one insn.
9066 * movb is similar to addb in that it can perform a reg->reg
9067 or small immediate->reg copy and jump to a nearby (+-8k location).
9069 * fmpyadd and fmpysub can perform a FP multiply and either an
9070 FP add or FP sub if the operands of the multiply and add/sub are
9071 independent (there are other minor restrictions). Note both
9072 the fmpy and fadd/fsub can in theory move to better spots according
9073 to data dependencies, but for now we require the fmpy stay at a
9074 fixed location.
9076 * Many of the memory operations can perform pre & post updates
9077 of index registers. GCC's pre/post increment/decrement addressing
9078 is far too simple to take advantage of all the possibilities. This
9079 pass may not be suitable since those insns may not be independent.
9081 * comclr can compare two ints or an int and a register, nullify
9082 the following instruction and zero some other register. This
9083 is more difficult to use as it's harder to find an insn which
9084 will generate a comclr than finding something like an unconditional
9085 branch. (conditional moves & long branches create comclr insns).
9087 * Most arithmetic operations can conditionally skip the next
9088 instruction. They can be viewed as "perform this operation
9089 and conditionally jump to this nearby location" (where nearby
9090 is an insns away). These are difficult to use due to the
9091 branch length restrictions. */
9093 static void
9094 pa_combine_instructions (void)
9096 rtx_insn *anchor;
9098 /* This can get expensive since the basic algorithm is on the
9099 order of O(n^2) (or worse). Only do it for -O2 or higher
9100 levels of optimization. */
9101 if (optimize < 2)
9102 return;
9104 /* Walk down the list of insns looking for "anchor" insns which
9105 may be combined with "floating" insns. As the name implies,
9106 "anchor" instructions don't move, while "floating" insns may
9107 move around. */
9108 rtx par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
9109 rtx_insn *new_rtx = make_insn_raw (par);
9111 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
9113 enum attr_pa_combine_type anchor_attr;
9114 enum attr_pa_combine_type floater_attr;
9116 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
9117 Also ignore any special USE insns. */
9118 if ((! NONJUMP_INSN_P (anchor) && ! JUMP_P (anchor) && ! CALL_P (anchor))
9119 || GET_CODE (PATTERN (anchor)) == USE
9120 || GET_CODE (PATTERN (anchor)) == CLOBBER)
9121 continue;
9123 anchor_attr = get_attr_pa_combine_type (anchor);
9124 /* See if anchor is an insn suitable for combination. */
9125 if (anchor_attr == PA_COMBINE_TYPE_FMPY
9126 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
9127 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9128 && ! forward_branch_p (anchor)))
9130 rtx_insn *floater;
9132 for (floater = PREV_INSN (anchor);
9133 floater;
9134 floater = PREV_INSN (floater))
9136 if (NOTE_P (floater)
9137 || (NONJUMP_INSN_P (floater)
9138 && (GET_CODE (PATTERN (floater)) == USE
9139 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9140 continue;
9142 /* Anything except a regular INSN will stop our search. */
9143 if (! NONJUMP_INSN_P (floater))
9145 floater = NULL;
9146 break;
9149 /* See if FLOATER is suitable for combination with the
9150 anchor. */
9151 floater_attr = get_attr_pa_combine_type (floater);
9152 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9153 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9154 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9155 && floater_attr == PA_COMBINE_TYPE_FMPY))
9157 /* If ANCHOR and FLOATER can be combined, then we're
9158 done with this pass. */
9159 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9160 SET_DEST (PATTERN (floater)),
9161 XEXP (SET_SRC (PATTERN (floater)), 0),
9162 XEXP (SET_SRC (PATTERN (floater)), 1)))
9163 break;
9166 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9167 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
9169 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
9171 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9172 SET_DEST (PATTERN (floater)),
9173 XEXP (SET_SRC (PATTERN (floater)), 0),
9174 XEXP (SET_SRC (PATTERN (floater)), 1)))
9175 break;
9177 else
9179 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9180 SET_DEST (PATTERN (floater)),
9181 SET_SRC (PATTERN (floater)),
9182 SET_SRC (PATTERN (floater))))
9183 break;
9188 /* If we didn't find anything on the backwards scan try forwards. */
9189 if (!floater
9190 && (anchor_attr == PA_COMBINE_TYPE_FMPY
9191 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
9193 for (floater = anchor; floater; floater = NEXT_INSN (floater))
9195 if (NOTE_P (floater)
9196 || (NONJUMP_INSN_P (floater)
9197 && (GET_CODE (PATTERN (floater)) == USE
9198 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9200 continue;
9202 /* Anything except a regular INSN will stop our search. */
9203 if (! NONJUMP_INSN_P (floater))
9205 floater = NULL;
9206 break;
9209 /* See if FLOATER is suitable for combination with the
9210 anchor. */
9211 floater_attr = get_attr_pa_combine_type (floater);
9212 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9213 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9214 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9215 && floater_attr == PA_COMBINE_TYPE_FMPY))
9217 /* If ANCHOR and FLOATER can be combined, then we're
9218 done with this pass. */
9219 if (pa_can_combine_p (new_rtx, anchor, floater, 1,
9220 SET_DEST (PATTERN (floater)),
9221 XEXP (SET_SRC (PATTERN (floater)),
9223 XEXP (SET_SRC (PATTERN (floater)),
9224 1)))
9225 break;
9230 /* FLOATER will be nonzero if we found a suitable floating
9231 insn for combination with ANCHOR. */
9232 if (floater
9233 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9234 || anchor_attr == PA_COMBINE_TYPE_FMPY))
9236 /* Emit the new instruction and delete the old anchor. */
9237 rtvec vtemp = gen_rtvec (2, copy_rtx (PATTERN (anchor)),
9238 copy_rtx (PATTERN (floater)));
9239 rtx temp = gen_rtx_PARALLEL (VOIDmode, vtemp);
9240 emit_insn_before (temp, anchor);
9242 SET_INSN_DELETED (anchor);
9244 /* Emit a special USE insn for FLOATER, then delete
9245 the floating insn. */
9246 temp = copy_rtx (PATTERN (floater));
9247 emit_insn_before (gen_rtx_USE (VOIDmode, temp), floater);
9248 delete_insn (floater);
9250 continue;
9252 else if (floater
9253 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
9255 /* Emit the new_jump instruction and delete the old anchor. */
9256 rtvec vtemp = gen_rtvec (2, copy_rtx (PATTERN (anchor)),
9257 copy_rtx (PATTERN (floater)));
9258 rtx temp = gen_rtx_PARALLEL (VOIDmode, vtemp);
9259 temp = emit_jump_insn_before (temp, anchor);
9261 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
9262 SET_INSN_DELETED (anchor);
9264 /* Emit a special USE insn for FLOATER, then delete
9265 the floating insn. */
9266 temp = copy_rtx (PATTERN (floater));
9267 emit_insn_before (gen_rtx_USE (VOIDmode, temp), floater);
9268 delete_insn (floater);
9269 continue;
9275 static int
9276 pa_can_combine_p (rtx_insn *new_rtx, rtx_insn *anchor, rtx_insn *floater,
9277 int reversed, rtx dest,
9278 rtx src1, rtx src2)
9280 int insn_code_number;
9281 rtx_insn *start, *end;
9283 /* Create a PARALLEL with the patterns of ANCHOR and
9284 FLOATER, try to recognize it, then test constraints
9285 for the resulting pattern.
9287 If the pattern doesn't match or the constraints
9288 aren't met keep searching for a suitable floater
9289 insn. */
9290 XVECEXP (PATTERN (new_rtx), 0, 0) = PATTERN (anchor);
9291 XVECEXP (PATTERN (new_rtx), 0, 1) = PATTERN (floater);
9292 INSN_CODE (new_rtx) = -1;
9293 insn_code_number = recog_memoized (new_rtx);
9294 basic_block bb = BLOCK_FOR_INSN (anchor);
9295 if (insn_code_number < 0
9296 || (extract_insn (new_rtx),
9297 !constrain_operands (1, get_preferred_alternatives (new_rtx, bb))))
9298 return 0;
9300 if (reversed)
9302 start = anchor;
9303 end = floater;
9305 else
9307 start = floater;
9308 end = anchor;
9311 /* There's up to three operands to consider. One
9312 output and two inputs.
9314 The output must not be used between FLOATER & ANCHOR
9315 exclusive. The inputs must not be set between
9316 FLOATER and ANCHOR exclusive. */
9318 if (reg_used_between_p (dest, start, end))
9319 return 0;
9321 if (reg_set_between_p (src1, start, end))
9322 return 0;
9324 if (reg_set_between_p (src2, start, end))
9325 return 0;
9327 /* If we get here, then everything is good. */
9328 return 1;
9331 /* Return nonzero if references for INSN are delayed.
9333 Millicode insns are actually function calls with some special
9334 constraints on arguments and register usage.
9336 Millicode calls always expect their arguments in the integer argument
9337 registers, and always return their result in %r29 (ret1). They
9338 are expected to clobber their arguments, %r1, %r29, and the return
9339 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9341 This function tells reorg that the references to arguments and
9342 millicode calls do not appear to happen until after the millicode call.
9343 This allows reorg to put insns which set the argument registers into the
9344 delay slot of the millicode call -- thus they act more like traditional
9345 CALL_INSNs.
9347 Note we cannot consider side effects of the insn to be delayed because
9348 the branch and link insn will clobber the return pointer. If we happened
9349 to use the return pointer in the delay slot of the call, then we lose.
9351 get_attr_type will try to recognize the given insn, so make sure to
9352 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9353 in particular. */
9355 pa_insn_refs_are_delayed (rtx_insn *insn)
9357 return ((NONJUMP_INSN_P (insn)
9358 && GET_CODE (PATTERN (insn)) != SEQUENCE
9359 && GET_CODE (PATTERN (insn)) != USE
9360 && GET_CODE (PATTERN (insn)) != CLOBBER
9361 && get_attr_type (insn) == TYPE_MILLI));
9364 /* Promote the return value, but not the arguments. */
9366 static machine_mode
9367 pa_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
9368 machine_mode mode,
9369 int *punsignedp ATTRIBUTE_UNUSED,
9370 const_tree fntype ATTRIBUTE_UNUSED,
9371 int for_return)
9373 if (for_return == 0)
9374 return mode;
9375 return promote_mode (type, mode, punsignedp);
9378 /* On the HP-PA the value is found in register(s) 28(-29), unless
9379 the mode is SF or DF. Then the value is returned in fr4 (32).
9381 This must perform the same promotions as PROMOTE_MODE, else promoting
9382 return values in TARGET_PROMOTE_FUNCTION_MODE will not work correctly.
9384 Small structures must be returned in a PARALLEL on PA64 in order
9385 to match the HP Compiler ABI. */
9387 static rtx
9388 pa_function_value (const_tree valtype,
9389 const_tree func ATTRIBUTE_UNUSED,
9390 bool outgoing ATTRIBUTE_UNUSED)
9392 machine_mode valmode;
9394 if (AGGREGATE_TYPE_P (valtype)
9395 || TREE_CODE (valtype) == COMPLEX_TYPE
9396 || TREE_CODE (valtype) == VECTOR_TYPE)
9398 HOST_WIDE_INT valsize = int_size_in_bytes (valtype);
9400 /* Handle aggregates that fit exactly in a word or double word. */
9401 if ((valsize & (UNITS_PER_WORD - 1)) == 0)
9402 return gen_rtx_REG (TYPE_MODE (valtype), 28);
9404 if (TARGET_64BIT)
9406 /* Aggregates with a size less than or equal to 128 bits are
9407 returned in GR 28(-29). They are left justified. The pad
9408 bits are undefined. Larger aggregates are returned in
9409 memory. */
9410 rtx loc[2];
9411 int i, offset = 0;
9412 int ub = valsize <= UNITS_PER_WORD ? 1 : 2;
9414 for (i = 0; i < ub; i++)
9416 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9417 gen_rtx_REG (DImode, 28 + i),
9418 GEN_INT (offset));
9419 offset += 8;
9422 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9424 else if (valsize > UNITS_PER_WORD)
9426 /* Aggregates 5 to 8 bytes in size are returned in general
9427 registers r28-r29 in the same manner as other non
9428 floating-point objects. The data is right-justified and
9429 zero-extended to 64 bits. This is opposite to the normal
9430 justification used on big endian targets and requires
9431 special treatment. */
9432 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9433 gen_rtx_REG (DImode, 28), const0_rtx);
9434 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9438 if ((INTEGRAL_TYPE_P (valtype)
9439 && GET_MODE_BITSIZE (TYPE_MODE (valtype)) < BITS_PER_WORD)
9440 || POINTER_TYPE_P (valtype))
9441 valmode = word_mode;
9442 else
9443 valmode = TYPE_MODE (valtype);
9445 if (TREE_CODE (valtype) == REAL_TYPE
9446 && !AGGREGATE_TYPE_P (valtype)
9447 && TYPE_MODE (valtype) != TFmode
9448 && !TARGET_SOFT_FLOAT)
9449 return gen_rtx_REG (valmode, 32);
9451 return gen_rtx_REG (valmode, 28);
9454 /* Implement the TARGET_LIBCALL_VALUE hook. */
9456 static rtx
9457 pa_libcall_value (machine_mode mode,
9458 const_rtx fun ATTRIBUTE_UNUSED)
9460 if (! TARGET_SOFT_FLOAT
9461 && (mode == SFmode || mode == DFmode))
9462 return gen_rtx_REG (mode, 32);
9463 else
9464 return gen_rtx_REG (mode, 28);
9467 /* Implement the TARGET_FUNCTION_VALUE_REGNO_P hook. */
9469 static bool
9470 pa_function_value_regno_p (const unsigned int regno)
9472 if (regno == 28
9473 || (! TARGET_SOFT_FLOAT && regno == 32))
9474 return true;
9476 return false;
9479 /* Update the data in CUM to advance over an argument
9480 of mode MODE and data type TYPE.
9481 (TYPE is null for libcalls where that information may not be available.) */
9483 static void
9484 pa_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
9485 const_tree type, bool named ATTRIBUTE_UNUSED)
9487 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9488 int arg_size = pa_function_arg_size (mode, type);
9490 cum->nargs_prototype--;
9491 cum->words += (arg_size
9492 + ((cum->words & 01)
9493 && type != NULL_TREE
9494 && arg_size > 1));
9497 /* Return the location of a parameter that is passed in a register or NULL
9498 if the parameter has any component that is passed in memory.
9500 This is new code and will be pushed to into the net sources after
9501 further testing.
9503 ??? We might want to restructure this so that it looks more like other
9504 ports. */
9505 static rtx
9506 pa_function_arg (cumulative_args_t cum_v, machine_mode mode,
9507 const_tree type, bool named ATTRIBUTE_UNUSED)
9509 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9510 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9511 int alignment = 0;
9512 int arg_size;
9513 int fpr_reg_base;
9514 int gpr_reg_base;
9515 rtx retval;
9517 if (mode == VOIDmode)
9518 return NULL_RTX;
9520 arg_size = pa_function_arg_size (mode, type);
9522 /* If this arg would be passed partially or totally on the stack, then
9523 this routine should return zero. pa_arg_partial_bytes will
9524 handle arguments which are split between regs and stack slots if
9525 the ABI mandates split arguments. */
9526 if (!TARGET_64BIT)
9528 /* The 32-bit ABI does not split arguments. */
9529 if (cum->words + arg_size > max_arg_words)
9530 return NULL_RTX;
9532 else
9534 if (arg_size > 1)
9535 alignment = cum->words & 1;
9536 if (cum->words + alignment >= max_arg_words)
9537 return NULL_RTX;
9540 /* The 32bit ABIs and the 64bit ABIs are rather different,
9541 particularly in their handling of FP registers. We might
9542 be able to cleverly share code between them, but I'm not
9543 going to bother in the hope that splitting them up results
9544 in code that is more easily understood. */
9546 if (TARGET_64BIT)
9548 /* Advance the base registers to their current locations.
9550 Remember, gprs grow towards smaller register numbers while
9551 fprs grow to higher register numbers. Also remember that
9552 although FP regs are 32-bit addressable, we pretend that
9553 the registers are 64-bits wide. */
9554 gpr_reg_base = 26 - cum->words;
9555 fpr_reg_base = 32 + cum->words;
9557 /* Arguments wider than one word and small aggregates need special
9558 treatment. */
9559 if (arg_size > 1
9560 || mode == BLKmode
9561 || (type && (AGGREGATE_TYPE_P (type)
9562 || TREE_CODE (type) == COMPLEX_TYPE
9563 || TREE_CODE (type) == VECTOR_TYPE)))
9565 /* Double-extended precision (80-bit), quad-precision (128-bit)
9566 and aggregates including complex numbers are aligned on
9567 128-bit boundaries. The first eight 64-bit argument slots
9568 are associated one-to-one, with general registers r26
9569 through r19, and also with floating-point registers fr4
9570 through fr11. Arguments larger than one word are always
9571 passed in general registers.
9573 Using a PARALLEL with a word mode register results in left
9574 justified data on a big-endian target. */
9576 rtx loc[8];
9577 int i, offset = 0, ub = arg_size;
9579 /* Align the base register. */
9580 gpr_reg_base -= alignment;
9582 ub = MIN (ub, max_arg_words - cum->words - alignment);
9583 for (i = 0; i < ub; i++)
9585 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9586 gen_rtx_REG (DImode, gpr_reg_base),
9587 GEN_INT (offset));
9588 gpr_reg_base -= 1;
9589 offset += 8;
9592 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9595 else
9597 /* If the argument is larger than a word, then we know precisely
9598 which registers we must use. */
9599 if (arg_size > 1)
9601 if (cum->words)
9603 gpr_reg_base = 23;
9604 fpr_reg_base = 38;
9606 else
9608 gpr_reg_base = 25;
9609 fpr_reg_base = 34;
9612 /* Structures 5 to 8 bytes in size are passed in the general
9613 registers in the same manner as other non floating-point
9614 objects. The data is right-justified and zero-extended
9615 to 64 bits. This is opposite to the normal justification
9616 used on big endian targets and requires special treatment.
9617 We now define BLOCK_REG_PADDING to pad these objects.
9618 Aggregates, complex and vector types are passed in the same
9619 manner as structures. */
9620 if (mode == BLKmode
9621 || (type && (AGGREGATE_TYPE_P (type)
9622 || TREE_CODE (type) == COMPLEX_TYPE
9623 || TREE_CODE (type) == VECTOR_TYPE)))
9625 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9626 gen_rtx_REG (DImode, gpr_reg_base),
9627 const0_rtx);
9628 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9631 else
9633 /* We have a single word (32 bits). A simple computation
9634 will get us the register #s we need. */
9635 gpr_reg_base = 26 - cum->words;
9636 fpr_reg_base = 32 + 2 * cum->words;
9640 /* Determine if the argument needs to be passed in both general and
9641 floating point registers. */
9642 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9643 /* If we are doing soft-float with portable runtime, then there
9644 is no need to worry about FP regs. */
9645 && !TARGET_SOFT_FLOAT
9646 /* The parameter must be some kind of scalar float, else we just
9647 pass it in integer registers. */
9648 && GET_MODE_CLASS (mode) == MODE_FLOAT
9649 /* The target function must not have a prototype. */
9650 && cum->nargs_prototype <= 0
9651 /* libcalls do not need to pass items in both FP and general
9652 registers. */
9653 && type != NULL_TREE
9654 /* All this hair applies to "outgoing" args only. This includes
9655 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9656 && !cum->incoming)
9657 /* Also pass outgoing floating arguments in both registers in indirect
9658 calls with the 32 bit ABI and the HP assembler since there is no
9659 way to the specify argument locations in static functions. */
9660 || (!TARGET_64BIT
9661 && !TARGET_GAS
9662 && !cum->incoming
9663 && cum->indirect
9664 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9666 retval
9667 = gen_rtx_PARALLEL
9668 (mode,
9669 gen_rtvec (2,
9670 gen_rtx_EXPR_LIST (VOIDmode,
9671 gen_rtx_REG (mode, fpr_reg_base),
9672 const0_rtx),
9673 gen_rtx_EXPR_LIST (VOIDmode,
9674 gen_rtx_REG (mode, gpr_reg_base),
9675 const0_rtx)));
9677 else
9679 /* See if we should pass this parameter in a general register. */
9680 if (TARGET_SOFT_FLOAT
9681 /* Indirect calls in the normal 32bit ABI require all arguments
9682 to be passed in general registers. */
9683 || (!TARGET_PORTABLE_RUNTIME
9684 && !TARGET_64BIT
9685 && !TARGET_ELF32
9686 && cum->indirect)
9687 /* If the parameter is not a scalar floating-point parameter,
9688 then it belongs in GPRs. */
9689 || GET_MODE_CLASS (mode) != MODE_FLOAT
9690 /* Structure with single SFmode field belongs in GPR. */
9691 || (type && AGGREGATE_TYPE_P (type)))
9692 retval = gen_rtx_REG (mode, gpr_reg_base);
9693 else
9694 retval = gen_rtx_REG (mode, fpr_reg_base);
9696 return retval;
9699 /* Arguments larger than one word are double word aligned. */
9701 static unsigned int
9702 pa_function_arg_boundary (machine_mode mode, const_tree type)
9704 bool singleword = (type
9705 ? (integer_zerop (TYPE_SIZE (type))
9706 || !TREE_CONSTANT (TYPE_SIZE (type))
9707 || int_size_in_bytes (type) <= UNITS_PER_WORD)
9708 : GET_MODE_SIZE (mode) <= UNITS_PER_WORD);
9710 return singleword ? PARM_BOUNDARY : MAX_PARM_BOUNDARY;
9713 /* If this arg would be passed totally in registers or totally on the stack,
9714 then this routine should return zero. */
9716 static int
9717 pa_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
9718 tree type, bool named ATTRIBUTE_UNUSED)
9720 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9721 unsigned int max_arg_words = 8;
9722 unsigned int offset = 0;
9724 if (!TARGET_64BIT)
9725 return 0;
9727 if (pa_function_arg_size (mode, type) > 1 && (cum->words & 1))
9728 offset = 1;
9730 if (cum->words + offset + pa_function_arg_size (mode, type) <= max_arg_words)
9731 /* Arg fits fully into registers. */
9732 return 0;
9733 else if (cum->words + offset >= max_arg_words)
9734 /* Arg fully on the stack. */
9735 return 0;
9736 else
9737 /* Arg is split. */
9738 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9742 /* A get_unnamed_section callback for switching to the text section.
9744 This function is only used with SOM. Because we don't support
9745 named subspaces, we can only create a new subspace or switch back
9746 to the default text subspace. */
9748 static void
9749 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9751 gcc_assert (TARGET_SOM);
9752 if (TARGET_GAS)
9754 if (cfun && cfun->machine && !cfun->machine->in_nsubspa)
9756 /* We only want to emit a .nsubspa directive once at the
9757 start of the function. */
9758 cfun->machine->in_nsubspa = 1;
9760 /* Create a new subspace for the text. This provides
9761 better stub placement and one-only functions. */
9762 if (cfun->decl
9763 && DECL_ONE_ONLY (cfun->decl)
9764 && !DECL_WEAK (cfun->decl))
9766 output_section_asm_op ("\t.SPACE $TEXT$\n"
9767 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9768 "ACCESS=44,SORT=24,COMDAT");
9769 return;
9772 else
9774 /* There isn't a current function or the body of the current
9775 function has been completed. So, we are changing to the
9776 text section to output debugging information. Thus, we
9777 need to forget that we are in the text section so that
9778 varasm.c will call us when text_section is selected again. */
9779 gcc_assert (!cfun || !cfun->machine
9780 || cfun->machine->in_nsubspa == 2);
9781 in_section = NULL;
9783 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9784 return;
9786 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9789 /* A get_unnamed_section callback for switching to comdat data
9790 sections. This function is only used with SOM. */
9792 static void
9793 som_output_comdat_data_section_asm_op (const void *data)
9795 in_section = NULL;
9796 output_section_asm_op (data);
9799 /* Implement TARGET_ASM_INIT_SECTIONS. */
9801 static void
9802 pa_som_asm_init_sections (void)
9804 text_section
9805 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9807 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9808 is not being generated. */
9809 som_readonly_data_section
9810 = get_unnamed_section (0, output_section_asm_op,
9811 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9813 /* When secondary definitions are not supported, SOM makes readonly
9814 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9815 the comdat flag. */
9816 som_one_only_readonly_data_section
9817 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9818 "\t.SPACE $TEXT$\n"
9819 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9820 "ACCESS=0x2c,SORT=16,COMDAT");
9823 /* When secondary definitions are not supported, SOM makes data one-only
9824 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9825 som_one_only_data_section
9826 = get_unnamed_section (SECTION_WRITE,
9827 som_output_comdat_data_section_asm_op,
9828 "\t.SPACE $PRIVATE$\n"
9829 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9830 "ACCESS=31,SORT=24,COMDAT");
9832 if (flag_tm)
9833 som_tm_clone_table_section
9834 = get_unnamed_section (0, output_section_asm_op,
9835 "\t.SPACE $PRIVATE$\n\t.SUBSPA $TM_CLONE_TABLE$");
9837 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9838 which reference data within the $TEXT$ space (for example constant
9839 strings in the $LIT$ subspace).
9841 The assemblers (GAS and HP as) both have problems with handling
9842 the difference of two symbols which is the other correct way to
9843 reference constant data during PIC code generation.
9845 So, there's no way to reference constant data which is in the
9846 $TEXT$ space during PIC generation. Instead place all constant
9847 data into the $PRIVATE$ subspace (this reduces sharing, but it
9848 works correctly). */
9849 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9851 /* We must not have a reference to an external symbol defined in a
9852 shared library in a readonly section, else the SOM linker will
9853 complain.
9855 So, we force exception information into the data section. */
9856 exception_section = data_section;
9859 /* Implement TARGET_ASM_TM_CLONE_TABLE_SECTION. */
9861 static section *
9862 pa_som_tm_clone_table_section (void)
9864 return som_tm_clone_table_section;
9867 /* On hpux10, the linker will give an error if we have a reference
9868 in the read-only data section to a symbol defined in a shared
9869 library. Therefore, expressions that might require a reloc can
9870 not be placed in the read-only data section. */
9872 static section *
9873 pa_select_section (tree exp, int reloc,
9874 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9876 if (TREE_CODE (exp) == VAR_DECL
9877 && TREE_READONLY (exp)
9878 && !TREE_THIS_VOLATILE (exp)
9879 && DECL_INITIAL (exp)
9880 && (DECL_INITIAL (exp) == error_mark_node
9881 || TREE_CONSTANT (DECL_INITIAL (exp)))
9882 && !reloc)
9884 if (TARGET_SOM
9885 && DECL_ONE_ONLY (exp)
9886 && !DECL_WEAK (exp))
9887 return som_one_only_readonly_data_section;
9888 else
9889 return readonly_data_section;
9891 else if (CONSTANT_CLASS_P (exp) && !reloc)
9892 return readonly_data_section;
9893 else if (TARGET_SOM
9894 && TREE_CODE (exp) == VAR_DECL
9895 && DECL_ONE_ONLY (exp)
9896 && !DECL_WEAK (exp))
9897 return som_one_only_data_section;
9898 else
9899 return data_section;
9902 /* Implement pa_reloc_rw_mask. */
9904 static int
9905 pa_reloc_rw_mask (void)
9907 /* We force (const (plus (symbol) (const_int))) to memory when the
9908 const_int doesn't fit in a 14-bit integer. The SOM linker can't
9909 handle this construct in read-only memory and we want to avoid
9910 this for ELF. So, we always force an RTX needing relocation to
9911 the data section. */
9912 return 3;
9915 static void
9916 pa_globalize_label (FILE *stream, const char *name)
9918 /* We only handle DATA objects here, functions are globalized in
9919 ASM_DECLARE_FUNCTION_NAME. */
9920 if (! FUNCTION_NAME_P (name))
9922 fputs ("\t.EXPORT ", stream);
9923 assemble_name (stream, name);
9924 fputs (",DATA\n", stream);
9928 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9930 static rtx
9931 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9932 int incoming ATTRIBUTE_UNUSED)
9934 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9937 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9939 bool
9940 pa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9942 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9943 PA64 ABI says that objects larger than 128 bits are returned in memory.
9944 Note, int_size_in_bytes can return -1 if the size of the object is
9945 variable or larger than the maximum value that can be expressed as
9946 a HOST_WIDE_INT. It can also return zero for an empty type. The
9947 simplest way to handle variable and empty types is to pass them in
9948 memory. This avoids problems in defining the boundaries of argument
9949 slots, allocating registers, etc. */
9950 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9951 || int_size_in_bytes (type) <= 0);
9954 /* Structure to hold declaration and name of external symbols that are
9955 emitted by GCC. We generate a vector of these symbols and output them
9956 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9957 This avoids putting out names that are never really used. */
9959 typedef struct GTY(()) extern_symbol
9961 tree decl;
9962 const char *name;
9963 } extern_symbol;
9965 /* Define gc'd vector type for extern_symbol. */
9967 /* Vector of extern_symbol pointers. */
9968 static GTY(()) vec<extern_symbol, va_gc> *extern_symbols;
9970 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9971 /* Mark DECL (name NAME) as an external reference (assembler output
9972 file FILE). This saves the names to output at the end of the file
9973 if actually referenced. */
9975 void
9976 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9978 gcc_assert (file == asm_out_file);
9979 extern_symbol p = {decl, name};
9980 vec_safe_push (extern_symbols, p);
9982 #endif
9984 /* Output text required at the end of an assembler file.
9985 This includes deferred plabels and .import directives for
9986 all external symbols that were actually referenced. */
9988 static void
9989 pa_file_end (void)
9991 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9992 unsigned int i;
9993 extern_symbol *p;
9995 if (!NO_DEFERRED_PROFILE_COUNTERS)
9996 output_deferred_profile_counters ();
9997 #endif
9999 output_deferred_plabels ();
10001 #ifdef ASM_OUTPUT_EXTERNAL_REAL
10002 for (i = 0; vec_safe_iterate (extern_symbols, i, &p); i++)
10004 tree decl = p->decl;
10006 if (!TREE_ASM_WRITTEN (decl)
10007 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
10008 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
10011 vec_free (extern_symbols);
10012 #endif
10014 if (NEED_INDICATE_EXEC_STACK)
10015 file_end_indicate_exec_stack ();
10018 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
10020 static bool
10021 pa_can_change_mode_class (machine_mode from, machine_mode to,
10022 reg_class_t rclass)
10024 if (from == to)
10025 return true;
10027 if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to))
10028 return true;
10030 /* Reject changes to/from modes with zero size. */
10031 if (!GET_MODE_SIZE (from) || !GET_MODE_SIZE (to))
10032 return false;
10034 /* Reject changes to/from complex and vector modes. */
10035 if (COMPLEX_MODE_P (from) || VECTOR_MODE_P (from)
10036 || COMPLEX_MODE_P (to) || VECTOR_MODE_P (to))
10037 return false;
10039 /* There is no way to load QImode or HImode values directly from memory
10040 to a FP register. SImode loads to the FP registers are not zero
10041 extended. On the 64-bit target, this conflicts with the definition
10042 of LOAD_EXTEND_OP. Thus, we can't allow changing between modes with
10043 different sizes in the floating-point registers. */
10044 if (MAYBE_FP_REG_CLASS_P (rclass))
10045 return false;
10047 /* TARGET_HARD_REGNO_MODE_OK places modes with sizes larger than a word
10048 in specific sets of registers. Thus, we cannot allow changing
10049 to a larger mode when it's larger than a word. */
10050 if (GET_MODE_SIZE (to) > UNITS_PER_WORD
10051 && GET_MODE_SIZE (to) > GET_MODE_SIZE (from))
10052 return false;
10054 return true;
10057 /* Implement TARGET_MODES_TIEABLE_P.
10059 We should return FALSE for QImode and HImode because these modes
10060 are not ok in the floating-point registers. However, this prevents
10061 tieing these modes to SImode and DImode in the general registers.
10062 So, this isn't a good idea. We rely on TARGET_HARD_REGNO_MODE_OK and
10063 TARGET_CAN_CHANGE_MODE_CLASS to prevent these modes from being used
10064 in the floating-point registers. */
10066 static bool
10067 pa_modes_tieable_p (machine_mode mode1, machine_mode mode2)
10069 /* Don't tie modes in different classes. */
10070 if (GET_MODE_CLASS (mode1) != GET_MODE_CLASS (mode2))
10071 return false;
10073 return true;
10077 /* Length in units of the trampoline instruction code. */
10079 #define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 32 : 40))
10082 /* Output assembler code for a block containing the constant parts
10083 of a trampoline, leaving space for the variable parts.\
10085 The trampoline sets the static chain pointer to STATIC_CHAIN_REGNUM
10086 and then branches to the specified routine.
10088 This code template is copied from text segment to stack location
10089 and then patched with pa_trampoline_init to contain valid values,
10090 and then entered as a subroutine.
10092 It is best to keep this as small as possible to avoid having to
10093 flush multiple lines in the cache. */
10095 static void
10096 pa_asm_trampoline_template (FILE *f)
10098 if (!TARGET_64BIT)
10100 fputs ("\tldw 36(%r22),%r21\n", f);
10101 fputs ("\tbb,>=,n %r21,30,.+16\n", f);
10102 if (ASSEMBLER_DIALECT == 0)
10103 fputs ("\tdepi 0,31,2,%r21\n", f);
10104 else
10105 fputs ("\tdepwi 0,31,2,%r21\n", f);
10106 fputs ("\tldw 4(%r21),%r19\n", f);
10107 fputs ("\tldw 0(%r21),%r21\n", f);
10108 if (TARGET_PA_20)
10110 fputs ("\tbve (%r21)\n", f);
10111 fputs ("\tldw 40(%r22),%r29\n", f);
10112 fputs ("\t.word 0\n", f);
10113 fputs ("\t.word 0\n", f);
10115 else
10117 fputs ("\tldsid (%r21),%r1\n", f);
10118 fputs ("\tmtsp %r1,%sr0\n", f);
10119 fputs ("\tbe 0(%sr0,%r21)\n", f);
10120 fputs ("\tldw 40(%r22),%r29\n", f);
10122 fputs ("\t.word 0\n", f);
10123 fputs ("\t.word 0\n", f);
10124 fputs ("\t.word 0\n", f);
10125 fputs ("\t.word 0\n", f);
10127 else
10129 fputs ("\t.dword 0\n", f);
10130 fputs ("\t.dword 0\n", f);
10131 fputs ("\t.dword 0\n", f);
10132 fputs ("\t.dword 0\n", f);
10133 fputs ("\tmfia %r31\n", f);
10134 fputs ("\tldd 24(%r31),%r1\n", f);
10135 fputs ("\tldd 24(%r1),%r27\n", f);
10136 fputs ("\tldd 16(%r1),%r1\n", f);
10137 fputs ("\tbve (%r1)\n", f);
10138 fputs ("\tldd 32(%r31),%r31\n", f);
10139 fputs ("\t.dword 0 ; fptr\n", f);
10140 fputs ("\t.dword 0 ; static link\n", f);
10144 /* Emit RTL insns to initialize the variable parts of a trampoline.
10145 FNADDR is an RTX for the address of the function's pure code.
10146 CXT is an RTX for the static chain value for the function.
10148 Move the function address to the trampoline template at offset 36.
10149 Move the static chain value to trampoline template at offset 40.
10150 Move the trampoline address to trampoline template at offset 44.
10151 Move r19 to trampoline template at offset 48. The latter two
10152 words create a plabel for the indirect call to the trampoline.
10154 A similar sequence is used for the 64-bit port but the plabel is
10155 at the beginning of the trampoline.
10157 Finally, the cache entries for the trampoline code are flushed.
10158 This is necessary to ensure that the trampoline instruction sequence
10159 is written to memory prior to any attempts at prefetching the code
10160 sequence. */
10162 static void
10163 pa_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
10165 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
10166 rtx start_addr = gen_reg_rtx (Pmode);
10167 rtx end_addr = gen_reg_rtx (Pmode);
10168 rtx line_length = gen_reg_rtx (Pmode);
10169 rtx r_tramp, tmp;
10171 emit_block_move (m_tramp, assemble_trampoline_template (),
10172 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
10173 r_tramp = force_reg (Pmode, XEXP (m_tramp, 0));
10175 if (!TARGET_64BIT)
10177 tmp = adjust_address (m_tramp, Pmode, 36);
10178 emit_move_insn (tmp, fnaddr);
10179 tmp = adjust_address (m_tramp, Pmode, 40);
10180 emit_move_insn (tmp, chain_value);
10182 /* Create a fat pointer for the trampoline. */
10183 tmp = adjust_address (m_tramp, Pmode, 44);
10184 emit_move_insn (tmp, r_tramp);
10185 tmp = adjust_address (m_tramp, Pmode, 48);
10186 emit_move_insn (tmp, gen_rtx_REG (Pmode, 19));
10188 /* fdc and fic only use registers for the address to flush,
10189 they do not accept integer displacements. We align the
10190 start and end addresses to the beginning of their respective
10191 cache lines to minimize the number of lines flushed. */
10192 emit_insn (gen_andsi3 (start_addr, r_tramp,
10193 GEN_INT (-MIN_CACHELINE_SIZE)));
10194 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp,
10195 TRAMPOLINE_CODE_SIZE-1));
10196 emit_insn (gen_andsi3 (end_addr, tmp,
10197 GEN_INT (-MIN_CACHELINE_SIZE)));
10198 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10199 emit_insn (gen_dcacheflushsi (start_addr, end_addr, line_length));
10200 emit_insn (gen_icacheflushsi (start_addr, end_addr, line_length,
10201 gen_reg_rtx (Pmode),
10202 gen_reg_rtx (Pmode)));
10204 else
10206 tmp = adjust_address (m_tramp, Pmode, 56);
10207 emit_move_insn (tmp, fnaddr);
10208 tmp = adjust_address (m_tramp, Pmode, 64);
10209 emit_move_insn (tmp, chain_value);
10211 /* Create a fat pointer for the trampoline. */
10212 tmp = adjust_address (m_tramp, Pmode, 16);
10213 emit_move_insn (tmp, force_reg (Pmode, plus_constant (Pmode,
10214 r_tramp, 32)));
10215 tmp = adjust_address (m_tramp, Pmode, 24);
10216 emit_move_insn (tmp, gen_rtx_REG (Pmode, 27));
10218 /* fdc and fic only use registers for the address to flush,
10219 they do not accept integer displacements. We align the
10220 start and end addresses to the beginning of their respective
10221 cache lines to minimize the number of lines flushed. */
10222 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp, 32));
10223 emit_insn (gen_anddi3 (start_addr, tmp,
10224 GEN_INT (-MIN_CACHELINE_SIZE)));
10225 tmp = force_reg (Pmode, plus_constant (Pmode, tmp,
10226 TRAMPOLINE_CODE_SIZE - 1));
10227 emit_insn (gen_anddi3 (end_addr, tmp,
10228 GEN_INT (-MIN_CACHELINE_SIZE)));
10229 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10230 emit_insn (gen_dcacheflushdi (start_addr, end_addr, line_length));
10231 emit_insn (gen_icacheflushdi (start_addr, end_addr, line_length,
10232 gen_reg_rtx (Pmode),
10233 gen_reg_rtx (Pmode)));
10236 #ifdef HAVE_ENABLE_EXECUTE_STACK
10237  emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
10238 LCT_NORMAL, VOIDmode, XEXP (m_tramp, 0), Pmode);
10239 #endif
10242 /* Perform any machine-specific adjustment in the address of the trampoline.
10243 ADDR contains the address that was passed to pa_trampoline_init.
10244 Adjust the trampoline address to point to the plabel at offset 44. */
10246 static rtx
10247 pa_trampoline_adjust_address (rtx addr)
10249 if (!TARGET_64BIT)
10250 addr = memory_address (Pmode, plus_constant (Pmode, addr, 46));
10251 return addr;
10254 static rtx
10255 pa_delegitimize_address (rtx orig_x)
10257 rtx x = delegitimize_mem_from_attrs (orig_x);
10259 if (GET_CODE (x) == LO_SUM
10260 && GET_CODE (XEXP (x, 1)) == UNSPEC
10261 && XINT (XEXP (x, 1), 1) == UNSPEC_DLTIND14R)
10262 return gen_const_mem (Pmode, XVECEXP (XEXP (x, 1), 0, 0));
10263 return x;
10266 static rtx
10267 pa_internal_arg_pointer (void)
10269 /* The argument pointer and the hard frame pointer are the same in
10270 the 32-bit runtime, so we don't need a copy. */
10271 if (TARGET_64BIT)
10272 return copy_to_reg (virtual_incoming_args_rtx);
10273 else
10274 return virtual_incoming_args_rtx;
10277 /* Given FROM and TO register numbers, say whether this elimination is allowed.
10278 Frame pointer elimination is automatically handled. */
10280 static bool
10281 pa_can_eliminate (const int from, const int to)
10283 /* The argument cannot be eliminated in the 64-bit runtime. */
10284 if (TARGET_64BIT && from == ARG_POINTER_REGNUM)
10285 return false;
10287 return (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
10288 ? ! frame_pointer_needed
10289 : true);
10292 /* Define the offset between two registers, FROM to be eliminated and its
10293 replacement TO, at the start of a routine. */
10294 HOST_WIDE_INT
10295 pa_initial_elimination_offset (int from, int to)
10297 HOST_WIDE_INT offset;
10299 if ((from == HARD_FRAME_POINTER_REGNUM || from == FRAME_POINTER_REGNUM)
10300 && to == STACK_POINTER_REGNUM)
10301 offset = -pa_compute_frame_size (get_frame_size (), 0);
10302 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10303 offset = 0;
10304 else
10305 gcc_unreachable ();
10307 return offset;
10310 static void
10311 pa_conditional_register_usage (void)
10313 int i;
10315 if (!TARGET_64BIT && !TARGET_PA_11)
10317 for (i = 56; i <= FP_REG_LAST; i++)
10318 fixed_regs[i] = call_used_regs[i] = 1;
10319 for (i = 33; i < 56; i += 2)
10320 fixed_regs[i] = call_used_regs[i] = 1;
10322 if (TARGET_DISABLE_FPREGS || TARGET_SOFT_FLOAT)
10324 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
10325 fixed_regs[i] = call_used_regs[i] = 1;
10327 if (flag_pic)
10328 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10331 /* Target hook for c_mode_for_suffix. */
10333 static machine_mode
10334 pa_c_mode_for_suffix (char suffix)
10336 if (HPUX_LONG_DOUBLE_LIBRARY)
10338 if (suffix == 'q')
10339 return TFmode;
10342 return VOIDmode;
10345 /* Target hook for function_section. */
10347 static section *
10348 pa_function_section (tree decl, enum node_frequency freq,
10349 bool startup, bool exit)
10351 /* Put functions in text section if target doesn't have named sections. */
10352 if (!targetm_common.have_named_sections)
10353 return text_section;
10355 /* Force nested functions into the same section as the containing
10356 function. */
10357 if (decl
10358 && DECL_SECTION_NAME (decl) == NULL
10359 && DECL_CONTEXT (decl) != NULL_TREE
10360 && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL
10361 && DECL_SECTION_NAME (DECL_CONTEXT (decl)) == NULL)
10362 return function_section (DECL_CONTEXT (decl));
10364 /* Otherwise, use the default function section. */
10365 return default_function_section (decl, freq, startup, exit);
10368 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
10370 In 64-bit mode, we reject CONST_DOUBLES. We also reject CONST_INTS
10371 that need more than three instructions to load prior to reload. This
10372 limit is somewhat arbitrary. It takes three instructions to load a
10373 CONST_INT from memory but two are memory accesses. It may be better
10374 to increase the allowed range for CONST_INTS. We may also be able
10375 to handle CONST_DOUBLES. */
10377 static bool
10378 pa_legitimate_constant_p (machine_mode mode, rtx x)
10380 if (GET_MODE_CLASS (mode) == MODE_FLOAT && x != CONST0_RTX (mode))
10381 return false;
10383 if (!NEW_HP_ASSEMBLER && !TARGET_GAS && GET_CODE (x) == LABEL_REF)
10384 return false;
10386 /* TLS_MODEL_GLOBAL_DYNAMIC and TLS_MODEL_LOCAL_DYNAMIC are not
10387 legitimate constants. The other variants can't be handled by
10388 the move patterns after reload starts. */
10389 if (tls_referenced_p (x))
10390 return false;
10392 if (TARGET_64BIT && GET_CODE (x) == CONST_DOUBLE)
10393 return false;
10395 if (TARGET_64BIT
10396 && HOST_BITS_PER_WIDE_INT > 32
10397 && GET_CODE (x) == CONST_INT
10398 && !reload_in_progress
10399 && !reload_completed
10400 && !LEGITIMATE_64BIT_CONST_INT_P (INTVAL (x))
10401 && !pa_cint_ok_for_move (UINTVAL (x)))
10402 return false;
10404 if (function_label_operand (x, mode))
10405 return false;
10407 return true;
10410 /* Implement TARGET_SECTION_TYPE_FLAGS. */
10412 static unsigned int
10413 pa_section_type_flags (tree decl, const char *name, int reloc)
10415 unsigned int flags;
10417 flags = default_section_type_flags (decl, name, reloc);
10419 /* Function labels are placed in the constant pool. This can
10420 cause a section conflict if decls are put in ".data.rel.ro"
10421 or ".data.rel.ro.local" using the __attribute__ construct. */
10422 if (strcmp (name, ".data.rel.ro") == 0
10423 || strcmp (name, ".data.rel.ro.local") == 0)
10424 flags |= SECTION_WRITE | SECTION_RELRO;
10426 return flags;
10429 /* pa_legitimate_address_p recognizes an RTL expression that is a
10430 valid memory address for an instruction. The MODE argument is the
10431 machine mode for the MEM expression that wants to use this address.
10433 On HP PA-RISC, the legitimate address forms are REG+SMALLINT,
10434 REG+REG, and REG+(REG*SCALE). The indexed address forms are only
10435 available with floating point loads and stores, and integer loads.
10436 We get better code by allowing indexed addresses in the initial
10437 RTL generation.
10439 The acceptance of indexed addresses as legitimate implies that we
10440 must provide patterns for doing indexed integer stores, or the move
10441 expanders must force the address of an indexed store to a register.
10442 We have adopted the latter approach.
10444 Another function of pa_legitimate_address_p is to ensure that
10445 the base register is a valid pointer for indexed instructions.
10446 On targets that have non-equivalent space registers, we have to
10447 know at the time of assembler output which register in a REG+REG
10448 pair is the base register. The REG_POINTER flag is sometimes lost
10449 in reload and the following passes, so it can't be relied on during
10450 code generation. Thus, we either have to canonicalize the order
10451 of the registers in REG+REG indexed addresses, or treat REG+REG
10452 addresses separately and provide patterns for both permutations.
10454 The latter approach requires several hundred additional lines of
10455 code in pa.md. The downside to canonicalizing is that a PLUS
10456 in the wrong order can't combine to form to make a scaled indexed
10457 memory operand. As we won't need to canonicalize the operands if
10458 the REG_POINTER lossage can be fixed, it seems better canonicalize.
10460 We initially break out scaled indexed addresses in canonical order
10461 in pa_emit_move_sequence. LEGITIMIZE_ADDRESS also canonicalizes
10462 scaled indexed addresses during RTL generation. However, fold_rtx
10463 has its own opinion on how the operands of a PLUS should be ordered.
10464 If one of the operands is equivalent to a constant, it will make
10465 that operand the second operand. As the base register is likely to
10466 be equivalent to a SYMBOL_REF, we have made it the second operand.
10468 pa_legitimate_address_p accepts REG+REG as legitimate when the
10469 operands are in the order INDEX+BASE on targets with non-equivalent
10470 space registers, and in any order on targets with equivalent space
10471 registers. It accepts both MULT+BASE and BASE+MULT for scaled indexing.
10473 We treat a SYMBOL_REF as legitimate if it is part of the current
10474 function's constant-pool, because such addresses can actually be
10475 output as REG+SMALLINT. */
10477 static bool
10478 pa_legitimate_address_p (machine_mode mode, rtx x, bool strict)
10480 if ((REG_P (x)
10481 && (strict ? STRICT_REG_OK_FOR_BASE_P (x)
10482 : REG_OK_FOR_BASE_P (x)))
10483 || ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_DEC
10484 || GET_CODE (x) == PRE_INC || GET_CODE (x) == POST_INC)
10485 && REG_P (XEXP (x, 0))
10486 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10487 : REG_OK_FOR_BASE_P (XEXP (x, 0)))))
10488 return true;
10490 if (GET_CODE (x) == PLUS)
10492 rtx base, index;
10494 /* For REG+REG, the base register should be in XEXP (x, 1),
10495 so check it first. */
10496 if (REG_P (XEXP (x, 1))
10497 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 1))
10498 : REG_OK_FOR_BASE_P (XEXP (x, 1))))
10499 base = XEXP (x, 1), index = XEXP (x, 0);
10500 else if (REG_P (XEXP (x, 0))
10501 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10502 : REG_OK_FOR_BASE_P (XEXP (x, 0))))
10503 base = XEXP (x, 0), index = XEXP (x, 1);
10504 else
10505 return false;
10507 if (GET_CODE (index) == CONST_INT)
10509 if (INT_5_BITS (index))
10510 return true;
10512 /* When INT14_OK_STRICT is false, a secondary reload is needed
10513 to adjust the displacement of SImode and DImode floating point
10514 instructions but this may fail when the register also needs
10515 reloading. So, we return false when STRICT is true. We
10516 also reject long displacements for float mode addresses since
10517 the majority of accesses will use floating point instructions
10518 that don't support 14-bit offsets. */
10519 if (!INT14_OK_STRICT
10520 && (strict || !(reload_in_progress || reload_completed))
10521 && mode != QImode
10522 && mode != HImode)
10523 return false;
10525 return base14_operand (index, mode);
10528 if (!TARGET_DISABLE_INDEXING
10529 /* Only accept the "canonical" INDEX+BASE operand order
10530 on targets with non-equivalent space registers. */
10531 && (TARGET_NO_SPACE_REGS
10532 ? REG_P (index)
10533 : (base == XEXP (x, 1) && REG_P (index)
10534 && (reload_completed
10535 || (reload_in_progress && HARD_REGISTER_P (base))
10536 || REG_POINTER (base))
10537 && (reload_completed
10538 || (reload_in_progress && HARD_REGISTER_P (index))
10539 || !REG_POINTER (index))))
10540 && MODE_OK_FOR_UNSCALED_INDEXING_P (mode)
10541 && (strict ? STRICT_REG_OK_FOR_INDEX_P (index)
10542 : REG_OK_FOR_INDEX_P (index))
10543 && borx_reg_operand (base, Pmode)
10544 && borx_reg_operand (index, Pmode))
10545 return true;
10547 if (!TARGET_DISABLE_INDEXING
10548 && GET_CODE (index) == MULT
10549 /* Only accept base operands with the REG_POINTER flag prior to
10550 reload on targets with non-equivalent space registers. */
10551 && (TARGET_NO_SPACE_REGS
10552 || (base == XEXP (x, 1)
10553 && (reload_completed
10554 || (reload_in_progress && HARD_REGISTER_P (base))
10555 || REG_POINTER (base))))
10556 && REG_P (XEXP (index, 0))
10557 && GET_MODE (XEXP (index, 0)) == Pmode
10558 && MODE_OK_FOR_SCALED_INDEXING_P (mode)
10559 && (strict ? STRICT_REG_OK_FOR_INDEX_P (XEXP (index, 0))
10560 : REG_OK_FOR_INDEX_P (XEXP (index, 0)))
10561 && GET_CODE (XEXP (index, 1)) == CONST_INT
10562 && INTVAL (XEXP (index, 1))
10563 == (HOST_WIDE_INT) GET_MODE_SIZE (mode)
10564 && borx_reg_operand (base, Pmode))
10565 return true;
10567 return false;
10570 if (GET_CODE (x) == LO_SUM)
10572 rtx y = XEXP (x, 0);
10574 if (GET_CODE (y) == SUBREG)
10575 y = SUBREG_REG (y);
10577 if (REG_P (y)
10578 && (strict ? STRICT_REG_OK_FOR_BASE_P (y)
10579 : REG_OK_FOR_BASE_P (y)))
10581 /* Needed for -fPIC */
10582 if (mode == Pmode
10583 && GET_CODE (XEXP (x, 1)) == UNSPEC)
10584 return true;
10586 if (!INT14_OK_STRICT
10587 && (strict || !(reload_in_progress || reload_completed))
10588 && mode != QImode
10589 && mode != HImode)
10590 return false;
10592 if (CONSTANT_P (XEXP (x, 1)))
10593 return true;
10595 return false;
10598 if (GET_CODE (x) == CONST_INT && INT_5_BITS (x))
10599 return true;
10601 return false;
10604 /* Look for machine dependent ways to make the invalid address AD a
10605 valid address.
10607 For the PA, transform:
10609 memory(X + <large int>)
10611 into:
10613 if (<large int> & mask) >= 16
10614 Y = (<large int> & ~mask) + mask + 1 Round up.
10615 else
10616 Y = (<large int> & ~mask) Round down.
10617 Z = X + Y
10618 memory (Z + (<large int> - Y));
10620 This makes reload inheritance and reload_cse work better since Z
10621 can be reused.
10623 There may be more opportunities to improve code with this hook. */
10626 pa_legitimize_reload_address (rtx ad, machine_mode mode,
10627 int opnum, int type,
10628 int ind_levels ATTRIBUTE_UNUSED)
10630 long offset, newoffset, mask;
10631 rtx new_rtx, temp = NULL_RTX;
10633 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
10634 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
10636 if (optimize && GET_CODE (ad) == PLUS)
10637 temp = simplify_binary_operation (PLUS, Pmode,
10638 XEXP (ad, 0), XEXP (ad, 1));
10640 new_rtx = temp ? temp : ad;
10642 if (optimize
10643 && GET_CODE (new_rtx) == PLUS
10644 && GET_CODE (XEXP (new_rtx, 0)) == REG
10645 && GET_CODE (XEXP (new_rtx, 1)) == CONST_INT)
10647 offset = INTVAL (XEXP ((new_rtx), 1));
10649 /* Choose rounding direction. Round up if we are >= halfway. */
10650 if ((offset & mask) >= ((mask + 1) / 2))
10651 newoffset = (offset & ~mask) + mask + 1;
10652 else
10653 newoffset = offset & ~mask;
10655 /* Ensure that long displacements are aligned. */
10656 if (mask == 0x3fff
10657 && (GET_MODE_CLASS (mode) == MODE_FLOAT
10658 || (TARGET_64BIT && (mode) == DImode)))
10659 newoffset &= ~(GET_MODE_SIZE (mode) - 1);
10661 if (newoffset != 0 && VAL_14_BITS_P (newoffset))
10663 temp = gen_rtx_PLUS (Pmode, XEXP (new_rtx, 0),
10664 GEN_INT (newoffset));
10665 ad = gen_rtx_PLUS (Pmode, temp, GEN_INT (offset - newoffset));
10666 push_reload (XEXP (ad, 0), 0, &XEXP (ad, 0), 0,
10667 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
10668 opnum, (enum reload_type) type);
10669 return ad;
10673 return NULL_RTX;
10676 /* Output address vector. */
10678 void
10679 pa_output_addr_vec (rtx lab, rtx body)
10681 int idx, vlen = XVECLEN (body, 0);
10683 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10684 if (TARGET_GAS)
10685 fputs ("\t.begin_brtab\n", asm_out_file);
10686 for (idx = 0; idx < vlen; idx++)
10688 ASM_OUTPUT_ADDR_VEC_ELT
10689 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10691 if (TARGET_GAS)
10692 fputs ("\t.end_brtab\n", asm_out_file);
10695 /* Output address difference vector. */
10697 void
10698 pa_output_addr_diff_vec (rtx lab, rtx body)
10700 rtx base = XEXP (XEXP (body, 0), 0);
10701 int idx, vlen = XVECLEN (body, 1);
10703 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10704 if (TARGET_GAS)
10705 fputs ("\t.begin_brtab\n", asm_out_file);
10706 for (idx = 0; idx < vlen; idx++)
10708 ASM_OUTPUT_ADDR_DIFF_ELT
10709 (asm_out_file,
10710 body,
10711 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
10712 CODE_LABEL_NUMBER (base));
10714 if (TARGET_GAS)
10715 fputs ("\t.end_brtab\n", asm_out_file);
10718 /* This is a helper function for the other atomic operations. This function
10719 emits a loop that contains SEQ that iterates until a compare-and-swap
10720 operation at the end succeeds. MEM is the memory to be modified. SEQ is
10721 a set of instructions that takes a value from OLD_REG as an input and
10722 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
10723 set to the current contents of MEM. After SEQ, a compare-and-swap will
10724 attempt to update MEM with NEW_REG. The function returns true when the
10725 loop was generated successfully. */
10727 static bool
10728 pa_expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
10730 machine_mode mode = GET_MODE (mem);
10731 rtx_code_label *label;
10732 rtx cmp_reg, success, oldval;
10734 /* The loop we want to generate looks like
10736 cmp_reg = mem;
10737 label:
10738 old_reg = cmp_reg;
10739 seq;
10740 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
10741 if (success)
10742 goto label;
10744 Note that we only do the plain load from memory once. Subsequent
10745 iterations use the value loaded by the compare-and-swap pattern. */
10747 label = gen_label_rtx ();
10748 cmp_reg = gen_reg_rtx (mode);
10750 emit_move_insn (cmp_reg, mem);
10751 emit_label (label);
10752 emit_move_insn (old_reg, cmp_reg);
10753 if (seq)
10754 emit_insn (seq);
10756 success = NULL_RTX;
10757 oldval = cmp_reg;
10758 if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
10759 new_reg, false, MEMMODEL_SYNC_SEQ_CST,
10760 MEMMODEL_RELAXED))
10761 return false;
10763 if (oldval != cmp_reg)
10764 emit_move_insn (cmp_reg, oldval);
10766 /* Mark this jump predicted not taken. */
10767 emit_cmp_and_jump_insns (success, const0_rtx, EQ, const0_rtx,
10768 GET_MODE (success), 1, label,
10769 profile_probability::guessed_never ());
10770 return true;
10773 /* This function tries to implement an atomic exchange operation using a
10774 compare_and_swap loop. VAL is written to *MEM. The previous contents of
10775 *MEM are returned, using TARGET if possible. No memory model is required
10776 since a compare_and_swap loop is seq-cst. */
10779 pa_maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val)
10781 machine_mode mode = GET_MODE (mem);
10783 if (can_compare_and_swap_p (mode, true))
10785 if (!target || !register_operand (target, mode))
10786 target = gen_reg_rtx (mode);
10787 if (pa_expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
10788 return target;
10791 return NULL_RTX;
10794 /* Implement TARGET_CALLEE_COPIES. The callee is responsible for copying
10795 arguments passed by hidden reference in the 32-bit HP runtime. Users
10796 can override this behavior for better compatibility with openmp at the
10797 risk of library incompatibilities. Arguments are always passed by value
10798 in the 64-bit HP runtime. */
10800 static bool
10801 pa_callee_copies (cumulative_args_t cum ATTRIBUTE_UNUSED,
10802 machine_mode mode ATTRIBUTE_UNUSED,
10803 const_tree type ATTRIBUTE_UNUSED,
10804 bool named ATTRIBUTE_UNUSED)
10806 return !TARGET_CALLER_COPIES;
10809 /* Implement TARGET_HARD_REGNO_NREGS. */
10811 static unsigned int
10812 pa_hard_regno_nregs (unsigned int regno ATTRIBUTE_UNUSED, machine_mode mode)
10814 return PA_HARD_REGNO_NREGS (regno, mode);
10817 /* Implement TARGET_HARD_REGNO_MODE_OK. */
10819 static bool
10820 pa_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
10822 return PA_HARD_REGNO_MODE_OK (regno, mode);
10825 /* Implement TARGET_STARTING_FRAME_OFFSET.
10827 On the 32-bit ports, we reserve one slot for the previous frame
10828 pointer and one fill slot. The fill slot is for compatibility
10829 with HP compiled programs. On the 64-bit ports, we reserve one
10830 slot for the previous frame pointer. */
10832 static HOST_WIDE_INT
10833 pa_starting_frame_offset (void)
10835 return 8;
10838 /* Figure out the size in words of the function argument. The size
10839 returned by this function should always be greater than zero because
10840 we pass variable and zero sized objects by reference. */
10842 HOST_WIDE_INT
10843 pa_function_arg_size (machine_mode mode, const_tree type)
10845 if (mode != BLKmode)
10846 return GET_MODE_SIZE (mode);
10847 return CEIL (int_size_in_bytes (type), UNITS_PER_WORD);
10850 #include "gt-pa.h"