PR target/56858
[official-gcc.git] / gcc / config / pa / pa.c
blobe13674143fe7957f42790e4416a2951ca7b4b537
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992-2014 Free Software Foundation, Inc.
3 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
31 #include "flags.h"
32 #include "tree.h"
33 #include "stor-layout.h"
34 #include "stringpool.h"
35 #include "varasm.h"
36 #include "calls.h"
37 #include "output.h"
38 #include "dbxout.h"
39 #include "except.h"
40 #include "expr.h"
41 #include "optabs.h"
42 #include "reload.h"
43 #include "function.h"
44 #include "diagnostic-core.h"
45 #include "ggc.h"
46 #include "recog.h"
47 #include "predict.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "common/common-target.h"
51 #include "target-def.h"
52 #include "langhooks.h"
53 #include "df.h"
54 #include "opts.h"
55 #include "builtins.h"
57 /* Return nonzero if there is a bypass for the output of
58 OUT_INSN and the fp store IN_INSN. */
59 int
60 pa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
62 enum machine_mode store_mode;
63 enum machine_mode other_mode;
64 rtx set;
66 if (recog_memoized (in_insn) < 0
67 || (get_attr_type (in_insn) != TYPE_FPSTORE
68 && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD)
69 || recog_memoized (out_insn) < 0)
70 return 0;
72 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
74 set = single_set (out_insn);
75 if (!set)
76 return 0;
78 other_mode = GET_MODE (SET_SRC (set));
80 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
84 #ifndef DO_FRAME_NOTES
85 #ifdef INCOMING_RETURN_ADDR_RTX
86 #define DO_FRAME_NOTES 1
87 #else
88 #define DO_FRAME_NOTES 0
89 #endif
90 #endif
92 static void pa_option_override (void);
93 static void copy_reg_pointer (rtx, rtx);
94 static void fix_range (const char *);
95 static int hppa_register_move_cost (enum machine_mode mode, reg_class_t,
96 reg_class_t);
97 static int hppa_address_cost (rtx, enum machine_mode mode, addr_space_t, bool);
98 static bool hppa_rtx_costs (rtx, int, int, int, int *, bool);
99 static inline rtx force_mode (enum machine_mode, rtx);
100 static void pa_reorg (void);
101 static void pa_combine_instructions (void);
102 static int pa_can_combine_p (rtx, rtx, rtx, int, rtx, rtx, rtx);
103 static bool forward_branch_p (rtx);
104 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
105 static void compute_zdepdi_operands (unsigned HOST_WIDE_INT, unsigned *);
106 static int compute_movmem_length (rtx);
107 static int compute_clrmem_length (rtx);
108 static bool pa_assemble_integer (rtx, unsigned int, int);
109 static void remove_useless_addtr_insns (int);
110 static void store_reg (int, HOST_WIDE_INT, int);
111 static void store_reg_modify (int, int, HOST_WIDE_INT);
112 static void load_reg (int, HOST_WIDE_INT, int);
113 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
114 static rtx pa_function_value (const_tree, const_tree, bool);
115 static rtx pa_libcall_value (enum machine_mode, const_rtx);
116 static bool pa_function_value_regno_p (const unsigned int);
117 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
118 static void update_total_code_bytes (unsigned int);
119 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
120 static int pa_adjust_cost (rtx, rtx, rtx, int);
121 static int pa_adjust_priority (rtx, int);
122 static int pa_issue_rate (void);
123 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
124 static section *pa_som_tm_clone_table_section (void) ATTRIBUTE_UNUSED;
125 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
126 ATTRIBUTE_UNUSED;
127 static void pa_encode_section_info (tree, rtx, int);
128 static const char *pa_strip_name_encoding (const char *);
129 static bool pa_function_ok_for_sibcall (tree, tree);
130 static void pa_globalize_label (FILE *, const char *)
131 ATTRIBUTE_UNUSED;
132 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
133 HOST_WIDE_INT, tree);
134 #if !defined(USE_COLLECT2)
135 static void pa_asm_out_constructor (rtx, int);
136 static void pa_asm_out_destructor (rtx, int);
137 #endif
138 static void pa_init_builtins (void);
139 static rtx pa_expand_builtin (tree, rtx, rtx, enum machine_mode mode, int);
140 static rtx hppa_builtin_saveregs (void);
141 static void hppa_va_start (tree, rtx);
142 static tree hppa_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
143 static bool pa_scalar_mode_supported_p (enum machine_mode);
144 static bool pa_commutative_p (const_rtx x, int outer_code);
145 static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
146 static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
147 static rtx hppa_legitimize_address (rtx, rtx, enum machine_mode);
148 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
149 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
150 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
151 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
152 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
153 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
154 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
155 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
156 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
157 static void output_deferred_plabels (void);
158 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
159 #ifdef ASM_OUTPUT_EXTERNAL_REAL
160 static void pa_hpux_file_end (void);
161 #endif
162 static void pa_init_libfuncs (void);
163 static rtx pa_struct_value_rtx (tree, int);
164 static bool pa_pass_by_reference (cumulative_args_t, enum machine_mode,
165 const_tree, bool);
166 static int pa_arg_partial_bytes (cumulative_args_t, enum machine_mode,
167 tree, bool);
168 static void pa_function_arg_advance (cumulative_args_t, enum machine_mode,
169 const_tree, bool);
170 static rtx pa_function_arg (cumulative_args_t, enum machine_mode,
171 const_tree, bool);
172 static unsigned int pa_function_arg_boundary (enum machine_mode, const_tree);
173 static struct machine_function * pa_init_machine_status (void);
174 static reg_class_t pa_secondary_reload (bool, rtx, reg_class_t,
175 enum machine_mode,
176 secondary_reload_info *);
177 static void pa_extra_live_on_entry (bitmap);
178 static enum machine_mode pa_promote_function_mode (const_tree,
179 enum machine_mode, int *,
180 const_tree, int);
182 static void pa_asm_trampoline_template (FILE *);
183 static void pa_trampoline_init (rtx, tree, rtx);
184 static rtx pa_trampoline_adjust_address (rtx);
185 static rtx pa_delegitimize_address (rtx);
186 static bool pa_print_operand_punct_valid_p (unsigned char);
187 static rtx pa_internal_arg_pointer (void);
188 static bool pa_can_eliminate (const int, const int);
189 static void pa_conditional_register_usage (void);
190 static enum machine_mode pa_c_mode_for_suffix (char);
191 static section *pa_function_section (tree, enum node_frequency, bool, bool);
192 static bool pa_cannot_force_const_mem (enum machine_mode, rtx);
193 static bool pa_legitimate_constant_p (enum machine_mode, rtx);
194 static unsigned int pa_section_type_flags (tree, const char *, int);
195 static bool pa_legitimate_address_p (enum machine_mode, rtx, bool);
197 /* The following extra sections are only used for SOM. */
198 static GTY(()) section *som_readonly_data_section;
199 static GTY(()) section *som_one_only_readonly_data_section;
200 static GTY(()) section *som_one_only_data_section;
201 static GTY(()) section *som_tm_clone_table_section;
203 /* Counts for the number of callee-saved general and floating point
204 registers which were saved by the current function's prologue. */
205 static int gr_saved, fr_saved;
207 /* Boolean indicating whether the return pointer was saved by the
208 current function's prologue. */
209 static bool rp_saved;
211 static rtx find_addr_reg (rtx);
213 /* Keep track of the number of bytes we have output in the CODE subspace
214 during this compilation so we'll know when to emit inline long-calls. */
215 unsigned long total_code_bytes;
217 /* The last address of the previous function plus the number of bytes in
218 associated thunks that have been output. This is used to determine if
219 a thunk can use an IA-relative branch to reach its target function. */
220 static unsigned int last_address;
222 /* Variables to handle plabels that we discover are necessary at assembly
223 output time. They are output after the current function. */
224 struct GTY(()) deferred_plabel
226 rtx internal_label;
227 rtx symbol;
229 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
230 deferred_plabels;
231 static size_t n_deferred_plabels = 0;
233 /* Initialize the GCC target structure. */
235 #undef TARGET_OPTION_OVERRIDE
236 #define TARGET_OPTION_OVERRIDE pa_option_override
238 #undef TARGET_ASM_ALIGNED_HI_OP
239 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
240 #undef TARGET_ASM_ALIGNED_SI_OP
241 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
242 #undef TARGET_ASM_ALIGNED_DI_OP
243 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
244 #undef TARGET_ASM_UNALIGNED_HI_OP
245 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
246 #undef TARGET_ASM_UNALIGNED_SI_OP
247 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
248 #undef TARGET_ASM_UNALIGNED_DI_OP
249 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
250 #undef TARGET_ASM_INTEGER
251 #define TARGET_ASM_INTEGER pa_assemble_integer
253 #undef TARGET_ASM_FUNCTION_PROLOGUE
254 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
255 #undef TARGET_ASM_FUNCTION_EPILOGUE
256 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
258 #undef TARGET_FUNCTION_VALUE
259 #define TARGET_FUNCTION_VALUE pa_function_value
260 #undef TARGET_LIBCALL_VALUE
261 #define TARGET_LIBCALL_VALUE pa_libcall_value
262 #undef TARGET_FUNCTION_VALUE_REGNO_P
263 #define TARGET_FUNCTION_VALUE_REGNO_P pa_function_value_regno_p
265 #undef TARGET_LEGITIMIZE_ADDRESS
266 #define TARGET_LEGITIMIZE_ADDRESS hppa_legitimize_address
268 #undef TARGET_SCHED_ADJUST_COST
269 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
270 #undef TARGET_SCHED_ADJUST_PRIORITY
271 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
272 #undef TARGET_SCHED_ISSUE_RATE
273 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
275 #undef TARGET_ENCODE_SECTION_INFO
276 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
277 #undef TARGET_STRIP_NAME_ENCODING
278 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
280 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
281 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
283 #undef TARGET_COMMUTATIVE_P
284 #define TARGET_COMMUTATIVE_P pa_commutative_p
286 #undef TARGET_ASM_OUTPUT_MI_THUNK
287 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
288 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
289 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
291 #undef TARGET_ASM_FILE_END
292 #ifdef ASM_OUTPUT_EXTERNAL_REAL
293 #define TARGET_ASM_FILE_END pa_hpux_file_end
294 #else
295 #define TARGET_ASM_FILE_END output_deferred_plabels
296 #endif
298 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
299 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P pa_print_operand_punct_valid_p
301 #if !defined(USE_COLLECT2)
302 #undef TARGET_ASM_CONSTRUCTOR
303 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
304 #undef TARGET_ASM_DESTRUCTOR
305 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
306 #endif
308 #undef TARGET_INIT_BUILTINS
309 #define TARGET_INIT_BUILTINS pa_init_builtins
311 #undef TARGET_EXPAND_BUILTIN
312 #define TARGET_EXPAND_BUILTIN pa_expand_builtin
314 #undef TARGET_REGISTER_MOVE_COST
315 #define TARGET_REGISTER_MOVE_COST hppa_register_move_cost
316 #undef TARGET_RTX_COSTS
317 #define TARGET_RTX_COSTS hppa_rtx_costs
318 #undef TARGET_ADDRESS_COST
319 #define TARGET_ADDRESS_COST hppa_address_cost
321 #undef TARGET_MACHINE_DEPENDENT_REORG
322 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
324 #undef TARGET_INIT_LIBFUNCS
325 #define TARGET_INIT_LIBFUNCS pa_init_libfuncs
327 #undef TARGET_PROMOTE_FUNCTION_MODE
328 #define TARGET_PROMOTE_FUNCTION_MODE pa_promote_function_mode
329 #undef TARGET_PROMOTE_PROTOTYPES
330 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
332 #undef TARGET_STRUCT_VALUE_RTX
333 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
334 #undef TARGET_RETURN_IN_MEMORY
335 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
336 #undef TARGET_MUST_PASS_IN_STACK
337 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
338 #undef TARGET_PASS_BY_REFERENCE
339 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
340 #undef TARGET_CALLEE_COPIES
341 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
342 #undef TARGET_ARG_PARTIAL_BYTES
343 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
344 #undef TARGET_FUNCTION_ARG
345 #define TARGET_FUNCTION_ARG pa_function_arg
346 #undef TARGET_FUNCTION_ARG_ADVANCE
347 #define TARGET_FUNCTION_ARG_ADVANCE pa_function_arg_advance
348 #undef TARGET_FUNCTION_ARG_BOUNDARY
349 #define TARGET_FUNCTION_ARG_BOUNDARY pa_function_arg_boundary
351 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
352 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
353 #undef TARGET_EXPAND_BUILTIN_VA_START
354 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
355 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
356 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
358 #undef TARGET_SCALAR_MODE_SUPPORTED_P
359 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
361 #undef TARGET_CANNOT_FORCE_CONST_MEM
362 #define TARGET_CANNOT_FORCE_CONST_MEM pa_cannot_force_const_mem
364 #undef TARGET_SECONDARY_RELOAD
365 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
367 #undef TARGET_EXTRA_LIVE_ON_ENTRY
368 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
370 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
371 #define TARGET_ASM_TRAMPOLINE_TEMPLATE pa_asm_trampoline_template
372 #undef TARGET_TRAMPOLINE_INIT
373 #define TARGET_TRAMPOLINE_INIT pa_trampoline_init
374 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
375 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS pa_trampoline_adjust_address
376 #undef TARGET_DELEGITIMIZE_ADDRESS
377 #define TARGET_DELEGITIMIZE_ADDRESS pa_delegitimize_address
378 #undef TARGET_INTERNAL_ARG_POINTER
379 #define TARGET_INTERNAL_ARG_POINTER pa_internal_arg_pointer
380 #undef TARGET_CAN_ELIMINATE
381 #define TARGET_CAN_ELIMINATE pa_can_eliminate
382 #undef TARGET_CONDITIONAL_REGISTER_USAGE
383 #define TARGET_CONDITIONAL_REGISTER_USAGE pa_conditional_register_usage
384 #undef TARGET_C_MODE_FOR_SUFFIX
385 #define TARGET_C_MODE_FOR_SUFFIX pa_c_mode_for_suffix
386 #undef TARGET_ASM_FUNCTION_SECTION
387 #define TARGET_ASM_FUNCTION_SECTION pa_function_section
389 #undef TARGET_LEGITIMATE_CONSTANT_P
390 #define TARGET_LEGITIMATE_CONSTANT_P pa_legitimate_constant_p
391 #undef TARGET_SECTION_TYPE_FLAGS
392 #define TARGET_SECTION_TYPE_FLAGS pa_section_type_flags
393 #undef TARGET_LEGITIMATE_ADDRESS_P
394 #define TARGET_LEGITIMATE_ADDRESS_P pa_legitimate_address_p
396 struct gcc_target targetm = TARGET_INITIALIZER;
398 /* Parse the -mfixed-range= option string. */
400 static void
401 fix_range (const char *const_str)
403 int i, first, last;
404 char *str, *dash, *comma;
406 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
407 REG2 are either register names or register numbers. The effect
408 of this option is to mark the registers in the range from REG1 to
409 REG2 as ``fixed'' so they won't be used by the compiler. This is
410 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
412 i = strlen (const_str);
413 str = (char *) alloca (i + 1);
414 memcpy (str, const_str, i + 1);
416 while (1)
418 dash = strchr (str, '-');
419 if (!dash)
421 warning (0, "value of -mfixed-range must have form REG1-REG2");
422 return;
424 *dash = '\0';
426 comma = strchr (dash + 1, ',');
427 if (comma)
428 *comma = '\0';
430 first = decode_reg_name (str);
431 if (first < 0)
433 warning (0, "unknown register name: %s", str);
434 return;
437 last = decode_reg_name (dash + 1);
438 if (last < 0)
440 warning (0, "unknown register name: %s", dash + 1);
441 return;
444 *dash = '-';
446 if (first > last)
448 warning (0, "%s-%s is an empty range", str, dash + 1);
449 return;
452 for (i = first; i <= last; ++i)
453 fixed_regs[i] = call_used_regs[i] = 1;
455 if (!comma)
456 break;
458 *comma = ',';
459 str = comma + 1;
462 /* Check if all floating point registers have been fixed. */
463 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
464 if (!fixed_regs[i])
465 break;
467 if (i > FP_REG_LAST)
468 target_flags |= MASK_DISABLE_FPREGS;
471 /* Implement the TARGET_OPTION_OVERRIDE hook. */
473 static void
474 pa_option_override (void)
476 unsigned int i;
477 cl_deferred_option *opt;
478 vec<cl_deferred_option> *v
479 = (vec<cl_deferred_option> *) pa_deferred_options;
481 if (v)
482 FOR_EACH_VEC_ELT (*v, i, opt)
484 switch (opt->opt_index)
486 case OPT_mfixed_range_:
487 fix_range (opt->arg);
488 break;
490 default:
491 gcc_unreachable ();
495 /* Unconditional branches in the delay slot are not compatible with dwarf2
496 call frame information. There is no benefit in using this optimization
497 on PA8000 and later processors. */
498 if (pa_cpu >= PROCESSOR_8000
499 || (targetm_common.except_unwind_info (&global_options) == UI_DWARF2
500 && flag_exceptions)
501 || flag_unwind_tables)
502 target_flags &= ~MASK_JUMP_IN_DELAY;
504 if (flag_pic && TARGET_PORTABLE_RUNTIME)
506 warning (0, "PIC code generation is not supported in the portable runtime model");
509 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
511 warning (0, "PIC code generation is not compatible with fast indirect calls");
514 if (! TARGET_GAS && write_symbols != NO_DEBUG)
516 warning (0, "-g is only supported when using GAS on this processor,");
517 warning (0, "-g option disabled");
518 write_symbols = NO_DEBUG;
521 /* We only support the "big PIC" model now. And we always generate PIC
522 code when in 64bit mode. */
523 if (flag_pic == 1 || TARGET_64BIT)
524 flag_pic = 2;
526 /* Disable -freorder-blocks-and-partition as we don't support hot and
527 cold partitioning. */
528 if (flag_reorder_blocks_and_partition)
530 inform (input_location,
531 "-freorder-blocks-and-partition does not work "
532 "on this architecture");
533 flag_reorder_blocks_and_partition = 0;
534 flag_reorder_blocks = 1;
537 /* We can't guarantee that .dword is available for 32-bit targets. */
538 if (UNITS_PER_WORD == 4)
539 targetm.asm_out.aligned_op.di = NULL;
541 /* The unaligned ops are only available when using GAS. */
542 if (!TARGET_GAS)
544 targetm.asm_out.unaligned_op.hi = NULL;
545 targetm.asm_out.unaligned_op.si = NULL;
546 targetm.asm_out.unaligned_op.di = NULL;
549 init_machine_status = pa_init_machine_status;
552 enum pa_builtins
554 PA_BUILTIN_COPYSIGNQ,
555 PA_BUILTIN_FABSQ,
556 PA_BUILTIN_INFQ,
557 PA_BUILTIN_HUGE_VALQ,
558 PA_BUILTIN_max
561 static GTY(()) tree pa_builtins[(int) PA_BUILTIN_max];
563 static void
564 pa_init_builtins (void)
566 #ifdef DONT_HAVE_FPUTC_UNLOCKED
568 tree decl = builtin_decl_explicit (BUILT_IN_PUTC_UNLOCKED);
569 set_builtin_decl (BUILT_IN_FPUTC_UNLOCKED, decl,
570 builtin_decl_implicit_p (BUILT_IN_PUTC_UNLOCKED));
572 #endif
573 #if TARGET_HPUX_11
575 tree decl;
577 if ((decl = builtin_decl_explicit (BUILT_IN_FINITE)) != NULL_TREE)
578 set_user_assembler_name (decl, "_Isfinite");
579 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEF)) != NULL_TREE)
580 set_user_assembler_name (decl, "_Isfinitef");
582 #endif
584 if (HPUX_LONG_DOUBLE_LIBRARY)
586 tree decl, ftype;
588 /* Under HPUX, the __float128 type is a synonym for "long double". */
589 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
590 "__float128");
592 /* TFmode support builtins. */
593 ftype = build_function_type_list (long_double_type_node,
594 long_double_type_node,
595 NULL_TREE);
596 decl = add_builtin_function ("__builtin_fabsq", ftype,
597 PA_BUILTIN_FABSQ, BUILT_IN_MD,
598 "_U_Qfabs", NULL_TREE);
599 TREE_READONLY (decl) = 1;
600 pa_builtins[PA_BUILTIN_FABSQ] = decl;
602 ftype = build_function_type_list (long_double_type_node,
603 long_double_type_node,
604 long_double_type_node,
605 NULL_TREE);
606 decl = add_builtin_function ("__builtin_copysignq", ftype,
607 PA_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
608 "_U_Qfcopysign", NULL_TREE);
609 TREE_READONLY (decl) = 1;
610 pa_builtins[PA_BUILTIN_COPYSIGNQ] = decl;
612 ftype = build_function_type_list (long_double_type_node, NULL_TREE);
613 decl = add_builtin_function ("__builtin_infq", ftype,
614 PA_BUILTIN_INFQ, BUILT_IN_MD,
615 NULL, NULL_TREE);
616 pa_builtins[PA_BUILTIN_INFQ] = decl;
618 decl = add_builtin_function ("__builtin_huge_valq", ftype,
619 PA_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
620 NULL, NULL_TREE);
621 pa_builtins[PA_BUILTIN_HUGE_VALQ] = decl;
625 static rtx
626 pa_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
627 enum machine_mode mode ATTRIBUTE_UNUSED,
628 int ignore ATTRIBUTE_UNUSED)
630 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
631 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
633 switch (fcode)
635 case PA_BUILTIN_FABSQ:
636 case PA_BUILTIN_COPYSIGNQ:
637 return expand_call (exp, target, ignore);
639 case PA_BUILTIN_INFQ:
640 case PA_BUILTIN_HUGE_VALQ:
642 enum machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
643 REAL_VALUE_TYPE inf;
644 rtx tmp;
646 real_inf (&inf);
647 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, target_mode);
649 tmp = validize_mem (force_const_mem (target_mode, tmp));
651 if (target == 0)
652 target = gen_reg_rtx (target_mode);
654 emit_move_insn (target, tmp);
655 return target;
658 default:
659 gcc_unreachable ();
662 return NULL_RTX;
665 /* Function to init struct machine_function.
666 This will be called, via a pointer variable,
667 from push_function_context. */
669 static struct machine_function *
670 pa_init_machine_status (void)
672 return ggc_cleared_alloc<machine_function> ();
675 /* If FROM is a probable pointer register, mark TO as a probable
676 pointer register with the same pointer alignment as FROM. */
678 static void
679 copy_reg_pointer (rtx to, rtx from)
681 if (REG_POINTER (from))
682 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
685 /* Return 1 if X contains a symbolic expression. We know these
686 expressions will have one of a few well defined forms, so
687 we need only check those forms. */
689 pa_symbolic_expression_p (rtx x)
692 /* Strip off any HIGH. */
693 if (GET_CODE (x) == HIGH)
694 x = XEXP (x, 0);
696 return symbolic_operand (x, VOIDmode);
699 /* Accept any constant that can be moved in one instruction into a
700 general register. */
702 pa_cint_ok_for_move (HOST_WIDE_INT ival)
704 /* OK if ldo, ldil, or zdepi, can be used. */
705 return (VAL_14_BITS_P (ival)
706 || pa_ldil_cint_p (ival)
707 || pa_zdepi_cint_p (ival));
710 /* True iff ldil can be used to load this CONST_INT. The least
711 significant 11 bits of the value must be zero and the value must
712 not change sign when extended from 32 to 64 bits. */
714 pa_ldil_cint_p (HOST_WIDE_INT ival)
716 HOST_WIDE_INT x = ival & (((HOST_WIDE_INT) -1 << 31) | 0x7ff);
718 return x == 0 || x == ((HOST_WIDE_INT) -1 << 31);
721 /* True iff zdepi can be used to generate this CONST_INT.
722 zdepi first sign extends a 5-bit signed number to a given field
723 length, then places this field anywhere in a zero. */
725 pa_zdepi_cint_p (unsigned HOST_WIDE_INT x)
727 unsigned HOST_WIDE_INT lsb_mask, t;
729 /* This might not be obvious, but it's at least fast.
730 This function is critical; we don't have the time loops would take. */
731 lsb_mask = x & -x;
732 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
733 /* Return true iff t is a power of two. */
734 return ((t & (t - 1)) == 0);
737 /* True iff depi or extru can be used to compute (reg & mask).
738 Accept bit pattern like these:
739 0....01....1
740 1....10....0
741 1..10..01..1 */
743 pa_and_mask_p (unsigned HOST_WIDE_INT mask)
745 mask = ~mask;
746 mask += mask & -mask;
747 return (mask & (mask - 1)) == 0;
750 /* True iff depi can be used to compute (reg | MASK). */
752 pa_ior_mask_p (unsigned HOST_WIDE_INT mask)
754 mask += mask & -mask;
755 return (mask & (mask - 1)) == 0;
758 /* Legitimize PIC addresses. If the address is already
759 position-independent, we return ORIG. Newly generated
760 position-independent addresses go to REG. If we need more
761 than one register, we lose. */
763 static rtx
764 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
766 rtx pic_ref = orig;
768 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
770 /* Labels need special handling. */
771 if (pic_label_operand (orig, mode))
773 rtx insn;
775 /* We do not want to go through the movXX expanders here since that
776 would create recursion.
778 Nor do we really want to call a generator for a named pattern
779 since that requires multiple patterns if we want to support
780 multiple word sizes.
782 So instead we just emit the raw set, which avoids the movXX
783 expanders completely. */
784 mark_reg_pointer (reg, BITS_PER_UNIT);
785 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
787 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
788 add_reg_note (insn, REG_EQUAL, orig);
790 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
791 and update LABEL_NUSES because this is not done automatically. */
792 if (reload_in_progress || reload_completed)
794 /* Extract LABEL_REF. */
795 if (GET_CODE (orig) == CONST)
796 orig = XEXP (XEXP (orig, 0), 0);
797 /* Extract CODE_LABEL. */
798 orig = XEXP (orig, 0);
799 add_reg_note (insn, REG_LABEL_OPERAND, orig);
800 /* Make sure we have label and not a note. */
801 if (LABEL_P (orig))
802 LABEL_NUSES (orig)++;
804 crtl->uses_pic_offset_table = 1;
805 return reg;
807 if (GET_CODE (orig) == SYMBOL_REF)
809 rtx insn, tmp_reg;
811 gcc_assert (reg);
813 /* Before reload, allocate a temporary register for the intermediate
814 result. This allows the sequence to be deleted when the final
815 result is unused and the insns are trivially dead. */
816 tmp_reg = ((reload_in_progress || reload_completed)
817 ? reg : gen_reg_rtx (Pmode));
819 if (function_label_operand (orig, VOIDmode))
821 /* Force function label into memory in word mode. */
822 orig = XEXP (force_const_mem (word_mode, orig), 0);
823 /* Load plabel address from DLT. */
824 emit_move_insn (tmp_reg,
825 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
826 gen_rtx_HIGH (word_mode, orig)));
827 pic_ref
828 = gen_const_mem (Pmode,
829 gen_rtx_LO_SUM (Pmode, tmp_reg,
830 gen_rtx_UNSPEC (Pmode,
831 gen_rtvec (1, orig),
832 UNSPEC_DLTIND14R)));
833 emit_move_insn (reg, pic_ref);
834 /* Now load address of function descriptor. */
835 pic_ref = gen_rtx_MEM (Pmode, reg);
837 else
839 /* Load symbol reference from DLT. */
840 emit_move_insn (tmp_reg,
841 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
842 gen_rtx_HIGH (word_mode, orig)));
843 pic_ref
844 = gen_const_mem (Pmode,
845 gen_rtx_LO_SUM (Pmode, tmp_reg,
846 gen_rtx_UNSPEC (Pmode,
847 gen_rtvec (1, orig),
848 UNSPEC_DLTIND14R)));
851 crtl->uses_pic_offset_table = 1;
852 mark_reg_pointer (reg, BITS_PER_UNIT);
853 insn = emit_move_insn (reg, pic_ref);
855 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
856 set_unique_reg_note (insn, REG_EQUAL, orig);
858 return reg;
860 else if (GET_CODE (orig) == CONST)
862 rtx base;
864 if (GET_CODE (XEXP (orig, 0)) == PLUS
865 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
866 return orig;
868 gcc_assert (reg);
869 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
871 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
872 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
873 base == reg ? 0 : reg);
875 if (GET_CODE (orig) == CONST_INT)
877 if (INT_14_BITS (orig))
878 return plus_constant (Pmode, base, INTVAL (orig));
879 orig = force_reg (Pmode, orig);
881 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
882 /* Likewise, should we set special REG_NOTEs here? */
885 return pic_ref;
888 static GTY(()) rtx gen_tls_tga;
890 static rtx
891 gen_tls_get_addr (void)
893 if (!gen_tls_tga)
894 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
895 return gen_tls_tga;
898 static rtx
899 hppa_tls_call (rtx arg)
901 rtx ret;
903 ret = gen_reg_rtx (Pmode);
904 emit_library_call_value (gen_tls_get_addr (), ret,
905 LCT_CONST, Pmode, 1, arg, Pmode);
907 return ret;
910 static rtx
911 legitimize_tls_address (rtx addr)
913 rtx ret, insn, tmp, t1, t2, tp;
915 /* Currently, we can't handle anything but a SYMBOL_REF. */
916 if (GET_CODE (addr) != SYMBOL_REF)
917 return addr;
919 switch (SYMBOL_REF_TLS_MODEL (addr))
921 case TLS_MODEL_GLOBAL_DYNAMIC:
922 tmp = gen_reg_rtx (Pmode);
923 if (flag_pic)
924 emit_insn (gen_tgd_load_pic (tmp, addr));
925 else
926 emit_insn (gen_tgd_load (tmp, addr));
927 ret = hppa_tls_call (tmp);
928 break;
930 case TLS_MODEL_LOCAL_DYNAMIC:
931 ret = gen_reg_rtx (Pmode);
932 tmp = gen_reg_rtx (Pmode);
933 start_sequence ();
934 if (flag_pic)
935 emit_insn (gen_tld_load_pic (tmp, addr));
936 else
937 emit_insn (gen_tld_load (tmp, addr));
938 t1 = hppa_tls_call (tmp);
939 insn = get_insns ();
940 end_sequence ();
941 t2 = gen_reg_rtx (Pmode);
942 emit_libcall_block (insn, t2, t1,
943 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
944 UNSPEC_TLSLDBASE));
945 emit_insn (gen_tld_offset_load (ret, addr, t2));
946 break;
948 case TLS_MODEL_INITIAL_EXEC:
949 tp = gen_reg_rtx (Pmode);
950 tmp = gen_reg_rtx (Pmode);
951 ret = gen_reg_rtx (Pmode);
952 emit_insn (gen_tp_load (tp));
953 if (flag_pic)
954 emit_insn (gen_tie_load_pic (tmp, addr));
955 else
956 emit_insn (gen_tie_load (tmp, addr));
957 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
958 break;
960 case TLS_MODEL_LOCAL_EXEC:
961 tp = gen_reg_rtx (Pmode);
962 ret = gen_reg_rtx (Pmode);
963 emit_insn (gen_tp_load (tp));
964 emit_insn (gen_tle_load (ret, addr, tp));
965 break;
967 default:
968 gcc_unreachable ();
971 return ret;
974 /* Try machine-dependent ways of modifying an illegitimate address
975 to be legitimate. If we find one, return the new, valid address.
976 This macro is used in only one place: `memory_address' in explow.c.
978 OLDX is the address as it was before break_out_memory_refs was called.
979 In some cases it is useful to look at this to decide what needs to be done.
981 It is always safe for this macro to do nothing. It exists to recognize
982 opportunities to optimize the output.
984 For the PA, transform:
986 memory(X + <large int>)
988 into:
990 if (<large int> & mask) >= 16
991 Y = (<large int> & ~mask) + mask + 1 Round up.
992 else
993 Y = (<large int> & ~mask) Round down.
994 Z = X + Y
995 memory (Z + (<large int> - Y));
997 This is for CSE to find several similar references, and only use one Z.
999 X can either be a SYMBOL_REF or REG, but because combine cannot
1000 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
1001 D will not fit in 14 bits.
1003 MODE_FLOAT references allow displacements which fit in 5 bits, so use
1004 0x1f as the mask.
1006 MODE_INT references allow displacements which fit in 14 bits, so use
1007 0x3fff as the mask.
1009 This relies on the fact that most mode MODE_FLOAT references will use FP
1010 registers and most mode MODE_INT references will use integer registers.
1011 (In the rare case of an FP register used in an integer MODE, we depend
1012 on secondary reloads to clean things up.)
1015 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
1016 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
1017 addressing modes to be used).
1019 Put X and Z into registers. Then put the entire expression into
1020 a register. */
1023 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1024 enum machine_mode mode)
1026 rtx orig = x;
1028 /* We need to canonicalize the order of operands in unscaled indexed
1029 addresses since the code that checks if an address is valid doesn't
1030 always try both orders. */
1031 if (!TARGET_NO_SPACE_REGS
1032 && GET_CODE (x) == PLUS
1033 && GET_MODE (x) == Pmode
1034 && REG_P (XEXP (x, 0))
1035 && REG_P (XEXP (x, 1))
1036 && REG_POINTER (XEXP (x, 0))
1037 && !REG_POINTER (XEXP (x, 1)))
1038 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
1040 if (pa_tls_referenced_p (x))
1041 return legitimize_tls_address (x);
1042 else if (flag_pic)
1043 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
1045 /* Strip off CONST. */
1046 if (GET_CODE (x) == CONST)
1047 x = XEXP (x, 0);
1049 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1050 That should always be safe. */
1051 if (GET_CODE (x) == PLUS
1052 && GET_CODE (XEXP (x, 0)) == REG
1053 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
1055 rtx reg = force_reg (Pmode, XEXP (x, 1));
1056 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
1059 /* Note we must reject symbols which represent function addresses
1060 since the assembler/linker can't handle arithmetic on plabels. */
1061 if (GET_CODE (x) == PLUS
1062 && GET_CODE (XEXP (x, 1)) == CONST_INT
1063 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1064 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
1065 || GET_CODE (XEXP (x, 0)) == REG))
1067 rtx int_part, ptr_reg;
1068 int newoffset;
1069 int offset = INTVAL (XEXP (x, 1));
1070 int mask;
1072 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
1073 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
1075 /* Choose which way to round the offset. Round up if we
1076 are >= halfway to the next boundary. */
1077 if ((offset & mask) >= ((mask + 1) / 2))
1078 newoffset = (offset & ~ mask) + mask + 1;
1079 else
1080 newoffset = (offset & ~ mask);
1082 /* If the newoffset will not fit in 14 bits (ldo), then
1083 handling this would take 4 or 5 instructions (2 to load
1084 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1085 add the new offset and the SYMBOL_REF.) Combine can
1086 not handle 4->2 or 5->2 combinations, so do not create
1087 them. */
1088 if (! VAL_14_BITS_P (newoffset)
1089 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
1091 rtx const_part = plus_constant (Pmode, XEXP (x, 0), newoffset);
1092 rtx tmp_reg
1093 = force_reg (Pmode,
1094 gen_rtx_HIGH (Pmode, const_part));
1095 ptr_reg
1096 = force_reg (Pmode,
1097 gen_rtx_LO_SUM (Pmode,
1098 tmp_reg, const_part));
1100 else
1102 if (! VAL_14_BITS_P (newoffset))
1103 int_part = force_reg (Pmode, GEN_INT (newoffset));
1104 else
1105 int_part = GEN_INT (newoffset);
1107 ptr_reg = force_reg (Pmode,
1108 gen_rtx_PLUS (Pmode,
1109 force_reg (Pmode, XEXP (x, 0)),
1110 int_part));
1112 return plus_constant (Pmode, ptr_reg, offset - newoffset);
1115 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
1117 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
1118 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1119 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
1120 && (OBJECT_P (XEXP (x, 1))
1121 || GET_CODE (XEXP (x, 1)) == SUBREG)
1122 && GET_CODE (XEXP (x, 1)) != CONST)
1124 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1125 rtx reg1, reg2;
1127 reg1 = XEXP (x, 1);
1128 if (GET_CODE (reg1) != REG)
1129 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1131 reg2 = XEXP (XEXP (x, 0), 0);
1132 if (GET_CODE (reg2) != REG)
1133 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1135 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1136 gen_rtx_MULT (Pmode,
1137 reg2,
1138 GEN_INT (val)),
1139 reg1));
1142 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
1144 Only do so for floating point modes since this is more speculative
1145 and we lose if it's an integer store. */
1146 if (GET_CODE (x) == PLUS
1147 && GET_CODE (XEXP (x, 0)) == PLUS
1148 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
1149 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
1150 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
1151 && (mode == SFmode || mode == DFmode))
1154 /* First, try and figure out what to use as a base register. */
1155 rtx reg1, reg2, base, idx;
1157 reg1 = XEXP (XEXP (x, 0), 1);
1158 reg2 = XEXP (x, 1);
1159 base = NULL_RTX;
1160 idx = NULL_RTX;
1162 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1163 then pa_emit_move_sequence will turn on REG_POINTER so we'll know
1164 it's a base register below. */
1165 if (GET_CODE (reg1) != REG)
1166 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1168 if (GET_CODE (reg2) != REG)
1169 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1171 /* Figure out what the base and index are. */
1173 if (GET_CODE (reg1) == REG
1174 && REG_POINTER (reg1))
1176 base = reg1;
1177 idx = gen_rtx_PLUS (Pmode,
1178 gen_rtx_MULT (Pmode,
1179 XEXP (XEXP (XEXP (x, 0), 0), 0),
1180 XEXP (XEXP (XEXP (x, 0), 0), 1)),
1181 XEXP (x, 1));
1183 else if (GET_CODE (reg2) == REG
1184 && REG_POINTER (reg2))
1186 base = reg2;
1187 idx = XEXP (x, 0);
1190 if (base == 0)
1191 return orig;
1193 /* If the index adds a large constant, try to scale the
1194 constant so that it can be loaded with only one insn. */
1195 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1196 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1197 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1198 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1200 /* Divide the CONST_INT by the scale factor, then add it to A. */
1201 int val = INTVAL (XEXP (idx, 1));
1203 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
1204 reg1 = XEXP (XEXP (idx, 0), 0);
1205 if (GET_CODE (reg1) != REG)
1206 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1208 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1210 /* We can now generate a simple scaled indexed address. */
1211 return
1212 force_reg
1213 (Pmode, gen_rtx_PLUS (Pmode,
1214 gen_rtx_MULT (Pmode, reg1,
1215 XEXP (XEXP (idx, 0), 1)),
1216 base));
1219 /* If B + C is still a valid base register, then add them. */
1220 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1221 && INTVAL (XEXP (idx, 1)) <= 4096
1222 && INTVAL (XEXP (idx, 1)) >= -4096)
1224 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
1225 rtx reg1, reg2;
1227 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1229 reg2 = XEXP (XEXP (idx, 0), 0);
1230 if (GET_CODE (reg2) != CONST_INT)
1231 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1233 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1234 gen_rtx_MULT (Pmode,
1235 reg2,
1236 GEN_INT (val)),
1237 reg1));
1240 /* Get the index into a register, then add the base + index and
1241 return a register holding the result. */
1243 /* First get A into a register. */
1244 reg1 = XEXP (XEXP (idx, 0), 0);
1245 if (GET_CODE (reg1) != REG)
1246 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1248 /* And get B into a register. */
1249 reg2 = XEXP (idx, 1);
1250 if (GET_CODE (reg2) != REG)
1251 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1253 reg1 = force_reg (Pmode,
1254 gen_rtx_PLUS (Pmode,
1255 gen_rtx_MULT (Pmode, reg1,
1256 XEXP (XEXP (idx, 0), 1)),
1257 reg2));
1259 /* Add the result to our base register and return. */
1260 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1264 /* Uh-oh. We might have an address for x[n-100000]. This needs
1265 special handling to avoid creating an indexed memory address
1266 with x-100000 as the base.
1268 If the constant part is small enough, then it's still safe because
1269 there is a guard page at the beginning and end of the data segment.
1271 Scaled references are common enough that we want to try and rearrange the
1272 terms so that we can use indexing for these addresses too. Only
1273 do the optimization for floatint point modes. */
1275 if (GET_CODE (x) == PLUS
1276 && pa_symbolic_expression_p (XEXP (x, 1)))
1278 /* Ugly. We modify things here so that the address offset specified
1279 by the index expression is computed first, then added to x to form
1280 the entire address. */
1282 rtx regx1, regx2, regy1, regy2, y;
1284 /* Strip off any CONST. */
1285 y = XEXP (x, 1);
1286 if (GET_CODE (y) == CONST)
1287 y = XEXP (y, 0);
1289 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1291 /* See if this looks like
1292 (plus (mult (reg) (shadd_const))
1293 (const (plus (symbol_ref) (const_int))))
1295 Where const_int is small. In that case the const
1296 expression is a valid pointer for indexing.
1298 If const_int is big, but can be divided evenly by shadd_const
1299 and added to (reg). This allows more scaled indexed addresses. */
1300 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1301 && GET_CODE (XEXP (x, 0)) == MULT
1302 && GET_CODE (XEXP (y, 1)) == CONST_INT
1303 && INTVAL (XEXP (y, 1)) >= -4096
1304 && INTVAL (XEXP (y, 1)) <= 4095
1305 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1306 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1308 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1309 rtx reg1, reg2;
1311 reg1 = XEXP (x, 1);
1312 if (GET_CODE (reg1) != REG)
1313 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1315 reg2 = XEXP (XEXP (x, 0), 0);
1316 if (GET_CODE (reg2) != REG)
1317 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1319 return force_reg (Pmode,
1320 gen_rtx_PLUS (Pmode,
1321 gen_rtx_MULT (Pmode,
1322 reg2,
1323 GEN_INT (val)),
1324 reg1));
1326 else if ((mode == DFmode || mode == SFmode)
1327 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1328 && GET_CODE (XEXP (x, 0)) == MULT
1329 && GET_CODE (XEXP (y, 1)) == CONST_INT
1330 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1331 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1332 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1334 regx1
1335 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1336 / INTVAL (XEXP (XEXP (x, 0), 1))));
1337 regx2 = XEXP (XEXP (x, 0), 0);
1338 if (GET_CODE (regx2) != REG)
1339 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1340 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1341 regx2, regx1));
1342 return
1343 force_reg (Pmode,
1344 gen_rtx_PLUS (Pmode,
1345 gen_rtx_MULT (Pmode, regx2,
1346 XEXP (XEXP (x, 0), 1)),
1347 force_reg (Pmode, XEXP (y, 0))));
1349 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1350 && INTVAL (XEXP (y, 1)) >= -4096
1351 && INTVAL (XEXP (y, 1)) <= 4095)
1353 /* This is safe because of the guard page at the
1354 beginning and end of the data space. Just
1355 return the original address. */
1356 return orig;
1358 else
1360 /* Doesn't look like one we can optimize. */
1361 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1362 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1363 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1364 regx1 = force_reg (Pmode,
1365 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1366 regx1, regy2));
1367 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1372 return orig;
1375 /* Implement the TARGET_REGISTER_MOVE_COST hook.
1377 Compute extra cost of moving data between one register class
1378 and another.
1380 Make moves from SAR so expensive they should never happen. We used to
1381 have 0xffff here, but that generates overflow in rare cases.
1383 Copies involving a FP register and a non-FP register are relatively
1384 expensive because they must go through memory.
1386 Other copies are reasonably cheap. */
1388 static int
1389 hppa_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
1390 reg_class_t from, reg_class_t to)
1392 if (from == SHIFT_REGS)
1393 return 0x100;
1394 else if (to == SHIFT_REGS && FP_REG_CLASS_P (from))
1395 return 18;
1396 else if ((FP_REG_CLASS_P (from) && ! FP_REG_CLASS_P (to))
1397 || (FP_REG_CLASS_P (to) && ! FP_REG_CLASS_P (from)))
1398 return 16;
1399 else
1400 return 2;
1403 /* For the HPPA, REG and REG+CONST is cost 0
1404 and addresses involving symbolic constants are cost 2.
1406 PIC addresses are very expensive.
1408 It is no coincidence that this has the same structure
1409 as pa_legitimate_address_p. */
1411 static int
1412 hppa_address_cost (rtx X, enum machine_mode mode ATTRIBUTE_UNUSED,
1413 addr_space_t as ATTRIBUTE_UNUSED,
1414 bool speed ATTRIBUTE_UNUSED)
1416 switch (GET_CODE (X))
1418 case REG:
1419 case PLUS:
1420 case LO_SUM:
1421 return 1;
1422 case HIGH:
1423 return 2;
1424 default:
1425 return 4;
1429 /* Compute a (partial) cost for rtx X. Return true if the complete
1430 cost has been computed, and false if subexpressions should be
1431 scanned. In either case, *TOTAL contains the cost result. */
1433 static bool
1434 hppa_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
1435 int *total, bool speed ATTRIBUTE_UNUSED)
1437 int factor;
1439 switch (code)
1441 case CONST_INT:
1442 if (INTVAL (x) == 0)
1443 *total = 0;
1444 else if (INT_14_BITS (x))
1445 *total = 1;
1446 else
1447 *total = 2;
1448 return true;
1450 case HIGH:
1451 *total = 2;
1452 return true;
1454 case CONST:
1455 case LABEL_REF:
1456 case SYMBOL_REF:
1457 *total = 4;
1458 return true;
1460 case CONST_DOUBLE:
1461 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1462 && outer_code != SET)
1463 *total = 0;
1464 else
1465 *total = 8;
1466 return true;
1468 case MULT:
1469 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1471 *total = COSTS_N_INSNS (3);
1472 return true;
1475 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1476 factor = GET_MODE_SIZE (GET_MODE (x)) / 4;
1477 if (factor == 0)
1478 factor = 1;
1480 if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1481 *total = factor * factor * COSTS_N_INSNS (8);
1482 else
1483 *total = factor * factor * COSTS_N_INSNS (20);
1484 return true;
1486 case DIV:
1487 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1489 *total = COSTS_N_INSNS (14);
1490 return true;
1492 /* FALLTHRU */
1494 case UDIV:
1495 case MOD:
1496 case UMOD:
1497 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1498 factor = GET_MODE_SIZE (GET_MODE (x)) / 4;
1499 if (factor == 0)
1500 factor = 1;
1502 *total = factor * factor * COSTS_N_INSNS (60);
1503 return true;
1505 case PLUS: /* this includes shNadd insns */
1506 case MINUS:
1507 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1509 *total = COSTS_N_INSNS (3);
1510 return true;
1513 /* A size N times larger than UNITS_PER_WORD needs N times as
1514 many insns, taking N times as long. */
1515 factor = GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD;
1516 if (factor == 0)
1517 factor = 1;
1518 *total = factor * COSTS_N_INSNS (1);
1519 return true;
1521 case ASHIFT:
1522 case ASHIFTRT:
1523 case LSHIFTRT:
1524 *total = COSTS_N_INSNS (1);
1525 return true;
1527 default:
1528 return false;
1532 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1533 new rtx with the correct mode. */
1534 static inline rtx
1535 force_mode (enum machine_mode mode, rtx orig)
1537 if (mode == GET_MODE (orig))
1538 return orig;
1540 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1542 return gen_rtx_REG (mode, REGNO (orig));
1545 /* Return 1 if *X is a thread-local symbol. */
1547 static int
1548 pa_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1550 return PA_SYMBOL_REF_TLS_P (*x);
1553 /* Return 1 if X contains a thread-local symbol. */
1555 bool
1556 pa_tls_referenced_p (rtx x)
1558 if (!TARGET_HAVE_TLS)
1559 return false;
1561 return for_each_rtx (&x, &pa_tls_symbol_ref_1, 0);
1564 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1566 static bool
1567 pa_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1569 return pa_tls_referenced_p (x);
1572 /* Emit insns to move operands[1] into operands[0].
1574 Return 1 if we have written out everything that needs to be done to
1575 do the move. Otherwise, return 0 and the caller will emit the move
1576 normally.
1578 Note SCRATCH_REG may not be in the proper mode depending on how it
1579 will be used. This routine is responsible for creating a new copy
1580 of SCRATCH_REG in the proper mode. */
1583 pa_emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
1585 register rtx operand0 = operands[0];
1586 register rtx operand1 = operands[1];
1587 register rtx tem;
1589 /* We can only handle indexed addresses in the destination operand
1590 of floating point stores. Thus, we need to break out indexed
1591 addresses from the destination operand. */
1592 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1594 gcc_assert (can_create_pseudo_p ());
1596 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1597 operand0 = replace_equiv_address (operand0, tem);
1600 /* On targets with non-equivalent space registers, break out unscaled
1601 indexed addresses from the source operand before the final CSE.
1602 We have to do this because the REG_POINTER flag is not correctly
1603 carried through various optimization passes and CSE may substitute
1604 a pseudo without the pointer set for one with the pointer set. As
1605 a result, we loose various opportunities to create insns with
1606 unscaled indexed addresses. */
1607 if (!TARGET_NO_SPACE_REGS
1608 && !cse_not_expected
1609 && GET_CODE (operand1) == MEM
1610 && GET_CODE (XEXP (operand1, 0)) == PLUS
1611 && REG_P (XEXP (XEXP (operand1, 0), 0))
1612 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1613 operand1
1614 = replace_equiv_address (operand1,
1615 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1617 if (scratch_reg
1618 && reload_in_progress && GET_CODE (operand0) == REG
1619 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1620 operand0 = reg_equiv_mem (REGNO (operand0));
1621 else if (scratch_reg
1622 && reload_in_progress && GET_CODE (operand0) == SUBREG
1623 && GET_CODE (SUBREG_REG (operand0)) == REG
1624 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1626 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1627 the code which tracks sets/uses for delete_output_reload. */
1628 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1629 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
1630 SUBREG_BYTE (operand0));
1631 operand0 = alter_subreg (&temp, true);
1634 if (scratch_reg
1635 && reload_in_progress && GET_CODE (operand1) == REG
1636 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1637 operand1 = reg_equiv_mem (REGNO (operand1));
1638 else if (scratch_reg
1639 && reload_in_progress && GET_CODE (operand1) == SUBREG
1640 && GET_CODE (SUBREG_REG (operand1)) == REG
1641 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1643 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1644 the code which tracks sets/uses for delete_output_reload. */
1645 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1646 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
1647 SUBREG_BYTE (operand1));
1648 operand1 = alter_subreg (&temp, true);
1651 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1652 && ((tem = find_replacement (&XEXP (operand0, 0)))
1653 != XEXP (operand0, 0)))
1654 operand0 = replace_equiv_address (operand0, tem);
1656 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1657 && ((tem = find_replacement (&XEXP (operand1, 0)))
1658 != XEXP (operand1, 0)))
1659 operand1 = replace_equiv_address (operand1, tem);
1661 /* Handle secondary reloads for loads/stores of FP registers from
1662 REG+D addresses where D does not fit in 5 or 14 bits, including
1663 (subreg (mem (addr))) cases. */
1664 if (scratch_reg
1665 && fp_reg_operand (operand0, mode)
1666 && (MEM_P (operand1)
1667 || (GET_CODE (operand1) == SUBREG
1668 && MEM_P (XEXP (operand1, 0))))
1669 && !floating_point_store_memory_operand (operand1, mode))
1671 if (GET_CODE (operand1) == SUBREG)
1672 operand1 = XEXP (operand1, 0);
1674 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1675 it in WORD_MODE regardless of what mode it was originally given
1676 to us. */
1677 scratch_reg = force_mode (word_mode, scratch_reg);
1679 /* D might not fit in 14 bits either; for such cases load D into
1680 scratch reg. */
1681 if (reg_plus_base_memory_operand (operand1, mode)
1682 && !(TARGET_PA_20
1683 && !TARGET_ELF32
1684 && INT_14_BITS (XEXP (XEXP (operand1, 0), 1))))
1686 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1687 emit_move_insn (scratch_reg,
1688 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1689 Pmode,
1690 XEXP (XEXP (operand1, 0), 0),
1691 scratch_reg));
1693 else
1694 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1695 emit_insn (gen_rtx_SET (VOIDmode, operand0,
1696 replace_equiv_address (operand1, scratch_reg)));
1697 return 1;
1699 else if (scratch_reg
1700 && fp_reg_operand (operand1, mode)
1701 && (MEM_P (operand0)
1702 || (GET_CODE (operand0) == SUBREG
1703 && MEM_P (XEXP (operand0, 0))))
1704 && !floating_point_store_memory_operand (operand0, mode))
1706 if (GET_CODE (operand0) == SUBREG)
1707 operand0 = XEXP (operand0, 0);
1709 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1710 it in WORD_MODE regardless of what mode it was originally given
1711 to us. */
1712 scratch_reg = force_mode (word_mode, scratch_reg);
1714 /* D might not fit in 14 bits either; for such cases load D into
1715 scratch reg. */
1716 if (reg_plus_base_memory_operand (operand0, mode)
1717 && !(TARGET_PA_20
1718 && !TARGET_ELF32
1719 && INT_14_BITS (XEXP (XEXP (operand0, 0), 1))))
1721 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1722 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1723 0)),
1724 Pmode,
1725 XEXP (XEXP (operand0, 0),
1727 scratch_reg));
1729 else
1730 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1731 emit_insn (gen_rtx_SET (VOIDmode,
1732 replace_equiv_address (operand0, scratch_reg),
1733 operand1));
1734 return 1;
1736 /* Handle secondary reloads for loads of FP registers from constant
1737 expressions by forcing the constant into memory. For the most part,
1738 this is only necessary for SImode and DImode.
1740 Use scratch_reg to hold the address of the memory location. */
1741 else if (scratch_reg
1742 && CONSTANT_P (operand1)
1743 && fp_reg_operand (operand0, mode))
1745 rtx const_mem, xoperands[2];
1747 if (operand1 == CONST0_RTX (mode))
1749 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1750 return 1;
1753 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1754 it in WORD_MODE regardless of what mode it was originally given
1755 to us. */
1756 scratch_reg = force_mode (word_mode, scratch_reg);
1758 /* Force the constant into memory and put the address of the
1759 memory location into scratch_reg. */
1760 const_mem = force_const_mem (mode, operand1);
1761 xoperands[0] = scratch_reg;
1762 xoperands[1] = XEXP (const_mem, 0);
1763 pa_emit_move_sequence (xoperands, Pmode, 0);
1765 /* Now load the destination register. */
1766 emit_insn (gen_rtx_SET (mode, operand0,
1767 replace_equiv_address (const_mem, scratch_reg)));
1768 return 1;
1770 /* Handle secondary reloads for SAR. These occur when trying to load
1771 the SAR from memory or a constant. */
1772 else if (scratch_reg
1773 && GET_CODE (operand0) == REG
1774 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1775 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1776 && (GET_CODE (operand1) == MEM || GET_CODE (operand1) == CONST_INT))
1778 /* D might not fit in 14 bits either; for such cases load D into
1779 scratch reg. */
1780 if (GET_CODE (operand1) == MEM
1781 && !memory_address_p (GET_MODE (operand0), XEXP (operand1, 0)))
1783 /* We are reloading the address into the scratch register, so we
1784 want to make sure the scratch register is a full register. */
1785 scratch_reg = force_mode (word_mode, scratch_reg);
1787 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1788 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1789 0)),
1790 Pmode,
1791 XEXP (XEXP (operand1, 0),
1793 scratch_reg));
1795 /* Now we are going to load the scratch register from memory,
1796 we want to load it in the same width as the original MEM,
1797 which must be the same as the width of the ultimate destination,
1798 OPERAND0. */
1799 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1801 emit_move_insn (scratch_reg,
1802 replace_equiv_address (operand1, scratch_reg));
1804 else
1806 /* We want to load the scratch register using the same mode as
1807 the ultimate destination. */
1808 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1810 emit_move_insn (scratch_reg, operand1);
1813 /* And emit the insn to set the ultimate destination. We know that
1814 the scratch register has the same mode as the destination at this
1815 point. */
1816 emit_move_insn (operand0, scratch_reg);
1817 return 1;
1819 /* Handle the most common case: storing into a register. */
1820 else if (register_operand (operand0, mode))
1822 /* Legitimize TLS symbol references. This happens for references
1823 that aren't a legitimate constant. */
1824 if (PA_SYMBOL_REF_TLS_P (operand1))
1825 operand1 = legitimize_tls_address (operand1);
1827 if (register_operand (operand1, mode)
1828 || (GET_CODE (operand1) == CONST_INT
1829 && pa_cint_ok_for_move (INTVAL (operand1)))
1830 || (operand1 == CONST0_RTX (mode))
1831 || (GET_CODE (operand1) == HIGH
1832 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1833 /* Only `general_operands' can come here, so MEM is ok. */
1834 || GET_CODE (operand1) == MEM)
1836 /* Various sets are created during RTL generation which don't
1837 have the REG_POINTER flag correctly set. After the CSE pass,
1838 instruction recognition can fail if we don't consistently
1839 set this flag when performing register copies. This should
1840 also improve the opportunities for creating insns that use
1841 unscaled indexing. */
1842 if (REG_P (operand0) && REG_P (operand1))
1844 if (REG_POINTER (operand1)
1845 && !REG_POINTER (operand0)
1846 && !HARD_REGISTER_P (operand0))
1847 copy_reg_pointer (operand0, operand1);
1850 /* When MEMs are broken out, the REG_POINTER flag doesn't
1851 get set. In some cases, we can set the REG_POINTER flag
1852 from the declaration for the MEM. */
1853 if (REG_P (operand0)
1854 && GET_CODE (operand1) == MEM
1855 && !REG_POINTER (operand0))
1857 tree decl = MEM_EXPR (operand1);
1859 /* Set the register pointer flag and register alignment
1860 if the declaration for this memory reference is a
1861 pointer type. */
1862 if (decl)
1864 tree type;
1866 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1867 tree operand 1. */
1868 if (TREE_CODE (decl) == COMPONENT_REF)
1869 decl = TREE_OPERAND (decl, 1);
1871 type = TREE_TYPE (decl);
1872 type = strip_array_types (type);
1874 if (POINTER_TYPE_P (type))
1876 int align;
1878 type = TREE_TYPE (type);
1879 /* Using TYPE_ALIGN_OK is rather conservative as
1880 only the ada frontend actually sets it. */
1881 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1882 : BITS_PER_UNIT);
1883 mark_reg_pointer (operand0, align);
1888 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1889 return 1;
1892 else if (GET_CODE (operand0) == MEM)
1894 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1895 && !(reload_in_progress || reload_completed))
1897 rtx temp = gen_reg_rtx (DFmode);
1899 emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
1900 emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
1901 return 1;
1903 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1905 /* Run this case quickly. */
1906 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1907 return 1;
1909 if (! (reload_in_progress || reload_completed))
1911 operands[0] = validize_mem (operand0);
1912 operands[1] = operand1 = force_reg (mode, operand1);
1916 /* Simplify the source if we need to.
1917 Note we do have to handle function labels here, even though we do
1918 not consider them legitimate constants. Loop optimizations can
1919 call the emit_move_xxx with one as a source. */
1920 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1921 || (GET_CODE (operand1) == HIGH
1922 && symbolic_operand (XEXP (operand1, 0), mode))
1923 || function_label_operand (operand1, VOIDmode)
1924 || pa_tls_referenced_p (operand1))
1926 int ishighonly = 0;
1928 if (GET_CODE (operand1) == HIGH)
1930 ishighonly = 1;
1931 operand1 = XEXP (operand1, 0);
1933 if (symbolic_operand (operand1, mode))
1935 /* Argh. The assembler and linker can't handle arithmetic
1936 involving plabels.
1938 So we force the plabel into memory, load operand0 from
1939 the memory location, then add in the constant part. */
1940 if ((GET_CODE (operand1) == CONST
1941 && GET_CODE (XEXP (operand1, 0)) == PLUS
1942 && function_label_operand (XEXP (XEXP (operand1, 0), 0),
1943 VOIDmode))
1944 || function_label_operand (operand1, VOIDmode))
1946 rtx temp, const_part;
1948 /* Figure out what (if any) scratch register to use. */
1949 if (reload_in_progress || reload_completed)
1951 scratch_reg = scratch_reg ? scratch_reg : operand0;
1952 /* SCRATCH_REG will hold an address and maybe the actual
1953 data. We want it in WORD_MODE regardless of what mode it
1954 was originally given to us. */
1955 scratch_reg = force_mode (word_mode, scratch_reg);
1957 else if (flag_pic)
1958 scratch_reg = gen_reg_rtx (Pmode);
1960 if (GET_CODE (operand1) == CONST)
1962 /* Save away the constant part of the expression. */
1963 const_part = XEXP (XEXP (operand1, 0), 1);
1964 gcc_assert (GET_CODE (const_part) == CONST_INT);
1966 /* Force the function label into memory. */
1967 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1969 else
1971 /* No constant part. */
1972 const_part = NULL_RTX;
1974 /* Force the function label into memory. */
1975 temp = force_const_mem (mode, operand1);
1979 /* Get the address of the memory location. PIC-ify it if
1980 necessary. */
1981 temp = XEXP (temp, 0);
1982 if (flag_pic)
1983 temp = legitimize_pic_address (temp, mode, scratch_reg);
1985 /* Put the address of the memory location into our destination
1986 register. */
1987 operands[1] = temp;
1988 pa_emit_move_sequence (operands, mode, scratch_reg);
1990 /* Now load from the memory location into our destination
1991 register. */
1992 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
1993 pa_emit_move_sequence (operands, mode, scratch_reg);
1995 /* And add back in the constant part. */
1996 if (const_part != NULL_RTX)
1997 expand_inc (operand0, const_part);
1999 return 1;
2002 if (flag_pic)
2004 rtx temp;
2006 if (reload_in_progress || reload_completed)
2008 temp = scratch_reg ? scratch_reg : operand0;
2009 /* TEMP will hold an address and maybe the actual
2010 data. We want it in WORD_MODE regardless of what mode it
2011 was originally given to us. */
2012 temp = force_mode (word_mode, temp);
2014 else
2015 temp = gen_reg_rtx (Pmode);
2017 /* (const (plus (symbol) (const_int))) must be forced to
2018 memory during/after reload if the const_int will not fit
2019 in 14 bits. */
2020 if (GET_CODE (operand1) == CONST
2021 && GET_CODE (XEXP (operand1, 0)) == PLUS
2022 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
2023 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
2024 && (reload_completed || reload_in_progress)
2025 && flag_pic)
2027 rtx const_mem = force_const_mem (mode, operand1);
2028 operands[1] = legitimize_pic_address (XEXP (const_mem, 0),
2029 mode, temp);
2030 operands[1] = replace_equiv_address (const_mem, operands[1]);
2031 pa_emit_move_sequence (operands, mode, temp);
2033 else
2035 operands[1] = legitimize_pic_address (operand1, mode, temp);
2036 if (REG_P (operand0) && REG_P (operands[1]))
2037 copy_reg_pointer (operand0, operands[1]);
2038 emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
2041 /* On the HPPA, references to data space are supposed to use dp,
2042 register 27, but showing it in the RTL inhibits various cse
2043 and loop optimizations. */
2044 else
2046 rtx temp, set;
2048 if (reload_in_progress || reload_completed)
2050 temp = scratch_reg ? scratch_reg : operand0;
2051 /* TEMP will hold an address and maybe the actual
2052 data. We want it in WORD_MODE regardless of what mode it
2053 was originally given to us. */
2054 temp = force_mode (word_mode, temp);
2056 else
2057 temp = gen_reg_rtx (mode);
2059 /* Loading a SYMBOL_REF into a register makes that register
2060 safe to be used as the base in an indexed address.
2062 Don't mark hard registers though. That loses. */
2063 if (GET_CODE (operand0) == REG
2064 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
2065 mark_reg_pointer (operand0, BITS_PER_UNIT);
2066 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
2067 mark_reg_pointer (temp, BITS_PER_UNIT);
2069 if (ishighonly)
2070 set = gen_rtx_SET (mode, operand0, temp);
2071 else
2072 set = gen_rtx_SET (VOIDmode,
2073 operand0,
2074 gen_rtx_LO_SUM (mode, temp, operand1));
2076 emit_insn (gen_rtx_SET (VOIDmode,
2077 temp,
2078 gen_rtx_HIGH (mode, operand1)));
2079 emit_insn (set);
2082 return 1;
2084 else if (pa_tls_referenced_p (operand1))
2086 rtx tmp = operand1;
2087 rtx addend = NULL;
2089 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
2091 addend = XEXP (XEXP (tmp, 0), 1);
2092 tmp = XEXP (XEXP (tmp, 0), 0);
2095 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
2096 tmp = legitimize_tls_address (tmp);
2097 if (addend)
2099 tmp = gen_rtx_PLUS (mode, tmp, addend);
2100 tmp = force_operand (tmp, operands[0]);
2102 operands[1] = tmp;
2104 else if (GET_CODE (operand1) != CONST_INT
2105 || !pa_cint_ok_for_move (INTVAL (operand1)))
2107 rtx insn, temp;
2108 rtx op1 = operand1;
2109 HOST_WIDE_INT value = 0;
2110 HOST_WIDE_INT insv = 0;
2111 int insert = 0;
2113 if (GET_CODE (operand1) == CONST_INT)
2114 value = INTVAL (operand1);
2116 if (TARGET_64BIT
2117 && GET_CODE (operand1) == CONST_INT
2118 && HOST_BITS_PER_WIDE_INT > 32
2119 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
2121 HOST_WIDE_INT nval;
2123 /* Extract the low order 32 bits of the value and sign extend.
2124 If the new value is the same as the original value, we can
2125 can use the original value as-is. If the new value is
2126 different, we use it and insert the most-significant 32-bits
2127 of the original value into the final result. */
2128 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
2129 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
2130 if (value != nval)
2132 #if HOST_BITS_PER_WIDE_INT > 32
2133 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
2134 #endif
2135 insert = 1;
2136 value = nval;
2137 operand1 = GEN_INT (nval);
2141 if (reload_in_progress || reload_completed)
2142 temp = scratch_reg ? scratch_reg : operand0;
2143 else
2144 temp = gen_reg_rtx (mode);
2146 /* We don't directly split DImode constants on 32-bit targets
2147 because PLUS uses an 11-bit immediate and the insn sequence
2148 generated is not as efficient as the one using HIGH/LO_SUM. */
2149 if (GET_CODE (operand1) == CONST_INT
2150 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
2151 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2152 && !insert)
2154 /* Directly break constant into high and low parts. This
2155 provides better optimization opportunities because various
2156 passes recognize constants split with PLUS but not LO_SUM.
2157 We use a 14-bit signed low part except when the addition
2158 of 0x4000 to the high part might change the sign of the
2159 high part. */
2160 HOST_WIDE_INT low = value & 0x3fff;
2161 HOST_WIDE_INT high = value & ~ 0x3fff;
2163 if (low >= 0x2000)
2165 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2166 high += 0x2000;
2167 else
2168 high += 0x4000;
2171 low = value - high;
2173 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
2174 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2176 else
2178 emit_insn (gen_rtx_SET (VOIDmode, temp,
2179 gen_rtx_HIGH (mode, operand1)));
2180 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2183 insn = emit_move_insn (operands[0], operands[1]);
2185 /* Now insert the most significant 32 bits of the value
2186 into the register. When we don't have a second register
2187 available, it could take up to nine instructions to load
2188 a 64-bit integer constant. Prior to reload, we force
2189 constants that would take more than three instructions
2190 to load to the constant pool. During and after reload,
2191 we have to handle all possible values. */
2192 if (insert)
2194 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2195 register and the value to be inserted is outside the
2196 range that can be loaded with three depdi instructions. */
2197 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2199 operand1 = GEN_INT (insv);
2201 emit_insn (gen_rtx_SET (VOIDmode, temp,
2202 gen_rtx_HIGH (mode, operand1)));
2203 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2204 if (mode == DImode)
2205 emit_insn (gen_insvdi (operand0, GEN_INT (32),
2206 const0_rtx, temp));
2207 else
2208 emit_insn (gen_insvsi (operand0, GEN_INT (32),
2209 const0_rtx, temp));
2211 else
2213 int len = 5, pos = 27;
2215 /* Insert the bits using the depdi instruction. */
2216 while (pos >= 0)
2218 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2219 HOST_WIDE_INT sign = v5 < 0;
2221 /* Left extend the insertion. */
2222 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2223 while (pos > 0 && (insv & 1) == sign)
2225 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2226 len += 1;
2227 pos -= 1;
2230 if (mode == DImode)
2231 emit_insn (gen_insvdi (operand0, GEN_INT (len),
2232 GEN_INT (pos), GEN_INT (v5)));
2233 else
2234 emit_insn (gen_insvsi (operand0, GEN_INT (len),
2235 GEN_INT (pos), GEN_INT (v5)));
2237 len = pos > 0 && pos < 5 ? pos : 5;
2238 pos -= len;
2243 set_unique_reg_note (insn, REG_EQUAL, op1);
2245 return 1;
2248 /* Now have insn-emit do whatever it normally does. */
2249 return 0;
2252 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2253 it will need a link/runtime reloc). */
2256 pa_reloc_needed (tree exp)
2258 int reloc = 0;
2260 switch (TREE_CODE (exp))
2262 case ADDR_EXPR:
2263 return 1;
2265 case POINTER_PLUS_EXPR:
2266 case PLUS_EXPR:
2267 case MINUS_EXPR:
2268 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2269 reloc |= pa_reloc_needed (TREE_OPERAND (exp, 1));
2270 break;
2272 CASE_CONVERT:
2273 case NON_LVALUE_EXPR:
2274 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2275 break;
2277 case CONSTRUCTOR:
2279 tree value;
2280 unsigned HOST_WIDE_INT ix;
2282 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2283 if (value)
2284 reloc |= pa_reloc_needed (value);
2286 break;
2288 case ERROR_MARK:
2289 break;
2291 default:
2292 break;
2294 return reloc;
2298 /* Return the best assembler insn template
2299 for moving operands[1] into operands[0] as a fullword. */
2300 const char *
2301 pa_singlemove_string (rtx *operands)
2303 HOST_WIDE_INT intval;
2305 if (GET_CODE (operands[0]) == MEM)
2306 return "stw %r1,%0";
2307 if (GET_CODE (operands[1]) == MEM)
2308 return "ldw %1,%0";
2309 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2311 long i;
2312 REAL_VALUE_TYPE d;
2314 gcc_assert (GET_MODE (operands[1]) == SFmode);
2316 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2317 bit pattern. */
2318 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
2319 REAL_VALUE_TO_TARGET_SINGLE (d, i);
2321 operands[1] = GEN_INT (i);
2322 /* Fall through to CONST_INT case. */
2324 if (GET_CODE (operands[1]) == CONST_INT)
2326 intval = INTVAL (operands[1]);
2328 if (VAL_14_BITS_P (intval))
2329 return "ldi %1,%0";
2330 else if ((intval & 0x7ff) == 0)
2331 return "ldil L'%1,%0";
2332 else if (pa_zdepi_cint_p (intval))
2333 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2334 else
2335 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2337 return "copy %1,%0";
2341 /* Compute position (in OP[1]) and width (in OP[2])
2342 useful for copying IMM to a register using the zdepi
2343 instructions. Store the immediate value to insert in OP[0]. */
2344 static void
2345 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2347 int lsb, len;
2349 /* Find the least significant set bit in IMM. */
2350 for (lsb = 0; lsb < 32; lsb++)
2352 if ((imm & 1) != 0)
2353 break;
2354 imm >>= 1;
2357 /* Choose variants based on *sign* of the 5-bit field. */
2358 if ((imm & 0x10) == 0)
2359 len = (lsb <= 28) ? 4 : 32 - lsb;
2360 else
2362 /* Find the width of the bitstring in IMM. */
2363 for (len = 5; len < 32 - lsb; len++)
2365 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2366 break;
2369 /* Sign extend IMM as a 5-bit value. */
2370 imm = (imm & 0xf) - 0x10;
2373 op[0] = imm;
2374 op[1] = 31 - lsb;
2375 op[2] = len;
2378 /* Compute position (in OP[1]) and width (in OP[2])
2379 useful for copying IMM to a register using the depdi,z
2380 instructions. Store the immediate value to insert in OP[0]. */
2382 static void
2383 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2385 int lsb, len, maxlen;
2387 maxlen = MIN (HOST_BITS_PER_WIDE_INT, 64);
2389 /* Find the least significant set bit in IMM. */
2390 for (lsb = 0; lsb < maxlen; lsb++)
2392 if ((imm & 1) != 0)
2393 break;
2394 imm >>= 1;
2397 /* Choose variants based on *sign* of the 5-bit field. */
2398 if ((imm & 0x10) == 0)
2399 len = (lsb <= maxlen - 4) ? 4 : maxlen - lsb;
2400 else
2402 /* Find the width of the bitstring in IMM. */
2403 for (len = 5; len < maxlen - lsb; len++)
2405 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2406 break;
2409 /* Extend length if host is narrow and IMM is negative. */
2410 if (HOST_BITS_PER_WIDE_INT == 32 && len == maxlen - lsb)
2411 len += 32;
2413 /* Sign extend IMM as a 5-bit value. */
2414 imm = (imm & 0xf) - 0x10;
2417 op[0] = imm;
2418 op[1] = 63 - lsb;
2419 op[2] = len;
2422 /* Output assembler code to perform a doubleword move insn
2423 with operands OPERANDS. */
2425 const char *
2426 pa_output_move_double (rtx *operands)
2428 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2429 rtx latehalf[2];
2430 rtx addreg0 = 0, addreg1 = 0;
2432 /* First classify both operands. */
2434 if (REG_P (operands[0]))
2435 optype0 = REGOP;
2436 else if (offsettable_memref_p (operands[0]))
2437 optype0 = OFFSOP;
2438 else if (GET_CODE (operands[0]) == MEM)
2439 optype0 = MEMOP;
2440 else
2441 optype0 = RNDOP;
2443 if (REG_P (operands[1]))
2444 optype1 = REGOP;
2445 else if (CONSTANT_P (operands[1]))
2446 optype1 = CNSTOP;
2447 else if (offsettable_memref_p (operands[1]))
2448 optype1 = OFFSOP;
2449 else if (GET_CODE (operands[1]) == MEM)
2450 optype1 = MEMOP;
2451 else
2452 optype1 = RNDOP;
2454 /* Check for the cases that the operand constraints are not
2455 supposed to allow to happen. */
2456 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2458 /* Handle copies between general and floating registers. */
2460 if (optype0 == REGOP && optype1 == REGOP
2461 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2463 if (FP_REG_P (operands[0]))
2465 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2466 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2467 return "{fldds|fldd} -16(%%sp),%0";
2469 else
2471 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2472 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2473 return "{ldws|ldw} -12(%%sp),%R0";
2477 /* Handle auto decrementing and incrementing loads and stores
2478 specifically, since the structure of the function doesn't work
2479 for them without major modification. Do it better when we learn
2480 this port about the general inc/dec addressing of PA.
2481 (This was written by tege. Chide him if it doesn't work.) */
2483 if (optype0 == MEMOP)
2485 /* We have to output the address syntax ourselves, since print_operand
2486 doesn't deal with the addresses we want to use. Fix this later. */
2488 rtx addr = XEXP (operands[0], 0);
2489 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2491 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2493 operands[0] = XEXP (addr, 0);
2494 gcc_assert (GET_CODE (operands[1]) == REG
2495 && GET_CODE (operands[0]) == REG);
2497 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2499 /* No overlap between high target register and address
2500 register. (We do this in a non-obvious way to
2501 save a register file writeback) */
2502 if (GET_CODE (addr) == POST_INC)
2503 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2504 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2506 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2508 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2510 operands[0] = XEXP (addr, 0);
2511 gcc_assert (GET_CODE (operands[1]) == REG
2512 && GET_CODE (operands[0]) == REG);
2514 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2515 /* No overlap between high target register and address
2516 register. (We do this in a non-obvious way to save a
2517 register file writeback) */
2518 if (GET_CODE (addr) == PRE_INC)
2519 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2520 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2523 if (optype1 == MEMOP)
2525 /* We have to output the address syntax ourselves, since print_operand
2526 doesn't deal with the addresses we want to use. Fix this later. */
2528 rtx addr = XEXP (operands[1], 0);
2529 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2531 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2533 operands[1] = XEXP (addr, 0);
2534 gcc_assert (GET_CODE (operands[0]) == REG
2535 && GET_CODE (operands[1]) == REG);
2537 if (!reg_overlap_mentioned_p (high_reg, addr))
2539 /* No overlap between high target register and address
2540 register. (We do this in a non-obvious way to
2541 save a register file writeback) */
2542 if (GET_CODE (addr) == POST_INC)
2543 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2544 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2546 else
2548 /* This is an undefined situation. We should load into the
2549 address register *and* update that register. Probably
2550 we don't need to handle this at all. */
2551 if (GET_CODE (addr) == POST_INC)
2552 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2553 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2556 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2558 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2560 operands[1] = XEXP (addr, 0);
2561 gcc_assert (GET_CODE (operands[0]) == REG
2562 && GET_CODE (operands[1]) == REG);
2564 if (!reg_overlap_mentioned_p (high_reg, addr))
2566 /* No overlap between high target register and address
2567 register. (We do this in a non-obvious way to
2568 save a register file writeback) */
2569 if (GET_CODE (addr) == PRE_INC)
2570 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2571 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2573 else
2575 /* This is an undefined situation. We should load into the
2576 address register *and* update that register. Probably
2577 we don't need to handle this at all. */
2578 if (GET_CODE (addr) == PRE_INC)
2579 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2580 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2583 else if (GET_CODE (addr) == PLUS
2584 && GET_CODE (XEXP (addr, 0)) == MULT)
2586 rtx xoperands[4];
2587 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2589 if (!reg_overlap_mentioned_p (high_reg, addr))
2591 xoperands[0] = high_reg;
2592 xoperands[1] = XEXP (addr, 1);
2593 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2594 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2595 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2596 xoperands);
2597 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2599 else
2601 xoperands[0] = high_reg;
2602 xoperands[1] = XEXP (addr, 1);
2603 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2604 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2605 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2606 xoperands);
2607 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2612 /* If an operand is an unoffsettable memory ref, find a register
2613 we can increment temporarily to make it refer to the second word. */
2615 if (optype0 == MEMOP)
2616 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2618 if (optype1 == MEMOP)
2619 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2621 /* Ok, we can do one word at a time.
2622 Normally we do the low-numbered word first.
2624 In either case, set up in LATEHALF the operands to use
2625 for the high-numbered word and in some cases alter the
2626 operands in OPERANDS to be suitable for the low-numbered word. */
2628 if (optype0 == REGOP)
2629 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2630 else if (optype0 == OFFSOP)
2631 latehalf[0] = adjust_address_nv (operands[0], SImode, 4);
2632 else
2633 latehalf[0] = operands[0];
2635 if (optype1 == REGOP)
2636 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2637 else if (optype1 == OFFSOP)
2638 latehalf[1] = adjust_address_nv (operands[1], SImode, 4);
2639 else if (optype1 == CNSTOP)
2640 split_double (operands[1], &operands[1], &latehalf[1]);
2641 else
2642 latehalf[1] = operands[1];
2644 /* If the first move would clobber the source of the second one,
2645 do them in the other order.
2647 This can happen in two cases:
2649 mem -> register where the first half of the destination register
2650 is the same register used in the memory's address. Reload
2651 can create such insns.
2653 mem in this case will be either register indirect or register
2654 indirect plus a valid offset.
2656 register -> register move where REGNO(dst) == REGNO(src + 1)
2657 someone (Tim/Tege?) claimed this can happen for parameter loads.
2659 Handle mem -> register case first. */
2660 if (optype0 == REGOP
2661 && (optype1 == MEMOP || optype1 == OFFSOP)
2662 && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
2663 operands[1], 0))
2665 /* Do the late half first. */
2666 if (addreg1)
2667 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2668 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2670 /* Then clobber. */
2671 if (addreg1)
2672 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2673 return pa_singlemove_string (operands);
2676 /* Now handle register -> register case. */
2677 if (optype0 == REGOP && optype1 == REGOP
2678 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2680 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2681 return pa_singlemove_string (operands);
2684 /* Normal case: do the two words, low-numbered first. */
2686 output_asm_insn (pa_singlemove_string (operands), operands);
2688 /* Make any unoffsettable addresses point at high-numbered word. */
2689 if (addreg0)
2690 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2691 if (addreg1)
2692 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2694 /* Do that word. */
2695 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2697 /* Undo the adds we just did. */
2698 if (addreg0)
2699 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2700 if (addreg1)
2701 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2703 return "";
2706 const char *
2707 pa_output_fp_move_double (rtx *operands)
2709 if (FP_REG_P (operands[0]))
2711 if (FP_REG_P (operands[1])
2712 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2713 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2714 else
2715 output_asm_insn ("fldd%F1 %1,%0", operands);
2717 else if (FP_REG_P (operands[1]))
2719 output_asm_insn ("fstd%F0 %1,%0", operands);
2721 else
2723 rtx xoperands[2];
2725 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2727 /* This is a pain. You have to be prepared to deal with an
2728 arbitrary address here including pre/post increment/decrement.
2730 so avoid this in the MD. */
2731 gcc_assert (GET_CODE (operands[0]) == REG);
2733 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2734 xoperands[0] = operands[0];
2735 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2737 return "";
2740 /* Return a REG that occurs in ADDR with coefficient 1.
2741 ADDR can be effectively incremented by incrementing REG. */
2743 static rtx
2744 find_addr_reg (rtx addr)
2746 while (GET_CODE (addr) == PLUS)
2748 if (GET_CODE (XEXP (addr, 0)) == REG)
2749 addr = XEXP (addr, 0);
2750 else if (GET_CODE (XEXP (addr, 1)) == REG)
2751 addr = XEXP (addr, 1);
2752 else if (CONSTANT_P (XEXP (addr, 0)))
2753 addr = XEXP (addr, 1);
2754 else if (CONSTANT_P (XEXP (addr, 1)))
2755 addr = XEXP (addr, 0);
2756 else
2757 gcc_unreachable ();
2759 gcc_assert (GET_CODE (addr) == REG);
2760 return addr;
2763 /* Emit code to perform a block move.
2765 OPERANDS[0] is the destination pointer as a REG, clobbered.
2766 OPERANDS[1] is the source pointer as a REG, clobbered.
2767 OPERANDS[2] is a register for temporary storage.
2768 OPERANDS[3] is a register for temporary storage.
2769 OPERANDS[4] is the size as a CONST_INT
2770 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2771 OPERANDS[6] is another temporary register. */
2773 const char *
2774 pa_output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2776 int align = INTVAL (operands[5]);
2777 unsigned long n_bytes = INTVAL (operands[4]);
2779 /* We can't move more than a word at a time because the PA
2780 has no longer integer move insns. (Could use fp mem ops?) */
2781 if (align > (TARGET_64BIT ? 8 : 4))
2782 align = (TARGET_64BIT ? 8 : 4);
2784 /* Note that we know each loop below will execute at least twice
2785 (else we would have open-coded the copy). */
2786 switch (align)
2788 case 8:
2789 /* Pre-adjust the loop counter. */
2790 operands[4] = GEN_INT (n_bytes - 16);
2791 output_asm_insn ("ldi %4,%2", operands);
2793 /* Copying loop. */
2794 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2795 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2796 output_asm_insn ("std,ma %3,8(%0)", operands);
2797 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2798 output_asm_insn ("std,ma %6,8(%0)", operands);
2800 /* Handle the residual. There could be up to 7 bytes of
2801 residual to copy! */
2802 if (n_bytes % 16 != 0)
2804 operands[4] = GEN_INT (n_bytes % 8);
2805 if (n_bytes % 16 >= 8)
2806 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2807 if (n_bytes % 8 != 0)
2808 output_asm_insn ("ldd 0(%1),%6", operands);
2809 if (n_bytes % 16 >= 8)
2810 output_asm_insn ("std,ma %3,8(%0)", operands);
2811 if (n_bytes % 8 != 0)
2812 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2814 return "";
2816 case 4:
2817 /* Pre-adjust the loop counter. */
2818 operands[4] = GEN_INT (n_bytes - 8);
2819 output_asm_insn ("ldi %4,%2", operands);
2821 /* Copying loop. */
2822 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2823 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2824 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2825 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2826 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2828 /* Handle the residual. There could be up to 7 bytes of
2829 residual to copy! */
2830 if (n_bytes % 8 != 0)
2832 operands[4] = GEN_INT (n_bytes % 4);
2833 if (n_bytes % 8 >= 4)
2834 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2835 if (n_bytes % 4 != 0)
2836 output_asm_insn ("ldw 0(%1),%6", operands);
2837 if (n_bytes % 8 >= 4)
2838 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2839 if (n_bytes % 4 != 0)
2840 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2842 return "";
2844 case 2:
2845 /* Pre-adjust the loop counter. */
2846 operands[4] = GEN_INT (n_bytes - 4);
2847 output_asm_insn ("ldi %4,%2", operands);
2849 /* Copying loop. */
2850 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2851 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2852 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2853 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2854 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2856 /* Handle the residual. */
2857 if (n_bytes % 4 != 0)
2859 if (n_bytes % 4 >= 2)
2860 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2861 if (n_bytes % 2 != 0)
2862 output_asm_insn ("ldb 0(%1),%6", operands);
2863 if (n_bytes % 4 >= 2)
2864 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2865 if (n_bytes % 2 != 0)
2866 output_asm_insn ("stb %6,0(%0)", operands);
2868 return "";
2870 case 1:
2871 /* Pre-adjust the loop counter. */
2872 operands[4] = GEN_INT (n_bytes - 2);
2873 output_asm_insn ("ldi %4,%2", operands);
2875 /* Copying loop. */
2876 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2877 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2878 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2879 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2880 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2882 /* Handle the residual. */
2883 if (n_bytes % 2 != 0)
2885 output_asm_insn ("ldb 0(%1),%3", operands);
2886 output_asm_insn ("stb %3,0(%0)", operands);
2888 return "";
2890 default:
2891 gcc_unreachable ();
2895 /* Count the number of insns necessary to handle this block move.
2897 Basic structure is the same as emit_block_move, except that we
2898 count insns rather than emit them. */
2900 static int
2901 compute_movmem_length (rtx insn)
2903 rtx pat = PATTERN (insn);
2904 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2905 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2906 unsigned int n_insns = 0;
2908 /* We can't move more than four bytes at a time because the PA
2909 has no longer integer move insns. (Could use fp mem ops?) */
2910 if (align > (TARGET_64BIT ? 8 : 4))
2911 align = (TARGET_64BIT ? 8 : 4);
2913 /* The basic copying loop. */
2914 n_insns = 6;
2916 /* Residuals. */
2917 if (n_bytes % (2 * align) != 0)
2919 if ((n_bytes % (2 * align)) >= align)
2920 n_insns += 2;
2922 if ((n_bytes % align) != 0)
2923 n_insns += 2;
2926 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2927 return n_insns * 4;
2930 /* Emit code to perform a block clear.
2932 OPERANDS[0] is the destination pointer as a REG, clobbered.
2933 OPERANDS[1] is a register for temporary storage.
2934 OPERANDS[2] is the size as a CONST_INT
2935 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2937 const char *
2938 pa_output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2940 int align = INTVAL (operands[3]);
2941 unsigned long n_bytes = INTVAL (operands[2]);
2943 /* We can't clear more than a word at a time because the PA
2944 has no longer integer move insns. */
2945 if (align > (TARGET_64BIT ? 8 : 4))
2946 align = (TARGET_64BIT ? 8 : 4);
2948 /* Note that we know each loop below will execute at least twice
2949 (else we would have open-coded the copy). */
2950 switch (align)
2952 case 8:
2953 /* Pre-adjust the loop counter. */
2954 operands[2] = GEN_INT (n_bytes - 16);
2955 output_asm_insn ("ldi %2,%1", operands);
2957 /* Loop. */
2958 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2959 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2960 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2962 /* Handle the residual. There could be up to 7 bytes of
2963 residual to copy! */
2964 if (n_bytes % 16 != 0)
2966 operands[2] = GEN_INT (n_bytes % 8);
2967 if (n_bytes % 16 >= 8)
2968 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2969 if (n_bytes % 8 != 0)
2970 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2972 return "";
2974 case 4:
2975 /* Pre-adjust the loop counter. */
2976 operands[2] = GEN_INT (n_bytes - 8);
2977 output_asm_insn ("ldi %2,%1", operands);
2979 /* Loop. */
2980 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2981 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2982 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2984 /* Handle the residual. There could be up to 7 bytes of
2985 residual to copy! */
2986 if (n_bytes % 8 != 0)
2988 operands[2] = GEN_INT (n_bytes % 4);
2989 if (n_bytes % 8 >= 4)
2990 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2991 if (n_bytes % 4 != 0)
2992 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
2994 return "";
2996 case 2:
2997 /* Pre-adjust the loop counter. */
2998 operands[2] = GEN_INT (n_bytes - 4);
2999 output_asm_insn ("ldi %2,%1", operands);
3001 /* Loop. */
3002 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3003 output_asm_insn ("addib,>= -4,%1,.-4", operands);
3004 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3006 /* Handle the residual. */
3007 if (n_bytes % 4 != 0)
3009 if (n_bytes % 4 >= 2)
3010 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3011 if (n_bytes % 2 != 0)
3012 output_asm_insn ("stb %%r0,0(%0)", operands);
3014 return "";
3016 case 1:
3017 /* Pre-adjust the loop counter. */
3018 operands[2] = GEN_INT (n_bytes - 2);
3019 output_asm_insn ("ldi %2,%1", operands);
3021 /* Loop. */
3022 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3023 output_asm_insn ("addib,>= -2,%1,.-4", operands);
3024 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3026 /* Handle the residual. */
3027 if (n_bytes % 2 != 0)
3028 output_asm_insn ("stb %%r0,0(%0)", operands);
3030 return "";
3032 default:
3033 gcc_unreachable ();
3037 /* Count the number of insns necessary to handle this block move.
3039 Basic structure is the same as emit_block_move, except that we
3040 count insns rather than emit them. */
3042 static int
3043 compute_clrmem_length (rtx insn)
3045 rtx pat = PATTERN (insn);
3046 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
3047 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
3048 unsigned int n_insns = 0;
3050 /* We can't clear more than a word at a time because the PA
3051 has no longer integer move insns. */
3052 if (align > (TARGET_64BIT ? 8 : 4))
3053 align = (TARGET_64BIT ? 8 : 4);
3055 /* The basic loop. */
3056 n_insns = 4;
3058 /* Residuals. */
3059 if (n_bytes % (2 * align) != 0)
3061 if ((n_bytes % (2 * align)) >= align)
3062 n_insns++;
3064 if ((n_bytes % align) != 0)
3065 n_insns++;
3068 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3069 return n_insns * 4;
3073 const char *
3074 pa_output_and (rtx *operands)
3076 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3078 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3079 int ls0, ls1, ms0, p, len;
3081 for (ls0 = 0; ls0 < 32; ls0++)
3082 if ((mask & (1 << ls0)) == 0)
3083 break;
3085 for (ls1 = ls0; ls1 < 32; ls1++)
3086 if ((mask & (1 << ls1)) != 0)
3087 break;
3089 for (ms0 = ls1; ms0 < 32; ms0++)
3090 if ((mask & (1 << ms0)) == 0)
3091 break;
3093 gcc_assert (ms0 == 32);
3095 if (ls1 == 32)
3097 len = ls0;
3099 gcc_assert (len);
3101 operands[2] = GEN_INT (len);
3102 return "{extru|extrw,u} %1,31,%2,%0";
3104 else
3106 /* We could use this `depi' for the case above as well, but `depi'
3107 requires one more register file access than an `extru'. */
3109 p = 31 - ls0;
3110 len = ls1 - ls0;
3112 operands[2] = GEN_INT (p);
3113 operands[3] = GEN_INT (len);
3114 return "{depi|depwi} 0,%2,%3,%0";
3117 else
3118 return "and %1,%2,%0";
3121 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3122 storing the result in operands[0]. */
3123 const char *
3124 pa_output_64bit_and (rtx *operands)
3126 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3128 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3129 int ls0, ls1, ms0, p, len;
3131 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
3132 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
3133 break;
3135 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
3136 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
3137 break;
3139 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
3140 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
3141 break;
3143 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
3145 if (ls1 == HOST_BITS_PER_WIDE_INT)
3147 len = ls0;
3149 gcc_assert (len);
3151 operands[2] = GEN_INT (len);
3152 return "extrd,u %1,63,%2,%0";
3154 else
3156 /* We could use this `depi' for the case above as well, but `depi'
3157 requires one more register file access than an `extru'. */
3159 p = 63 - ls0;
3160 len = ls1 - ls0;
3162 operands[2] = GEN_INT (p);
3163 operands[3] = GEN_INT (len);
3164 return "depdi 0,%2,%3,%0";
3167 else
3168 return "and %1,%2,%0";
3171 const char *
3172 pa_output_ior (rtx *operands)
3174 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3175 int bs0, bs1, p, len;
3177 if (INTVAL (operands[2]) == 0)
3178 return "copy %1,%0";
3180 for (bs0 = 0; bs0 < 32; bs0++)
3181 if ((mask & (1 << bs0)) != 0)
3182 break;
3184 for (bs1 = bs0; bs1 < 32; bs1++)
3185 if ((mask & (1 << bs1)) == 0)
3186 break;
3188 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3190 p = 31 - bs0;
3191 len = bs1 - bs0;
3193 operands[2] = GEN_INT (p);
3194 operands[3] = GEN_INT (len);
3195 return "{depi|depwi} -1,%2,%3,%0";
3198 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3199 storing the result in operands[0]. */
3200 const char *
3201 pa_output_64bit_ior (rtx *operands)
3203 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3204 int bs0, bs1, p, len;
3206 if (INTVAL (operands[2]) == 0)
3207 return "copy %1,%0";
3209 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3210 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3211 break;
3213 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3214 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3215 break;
3217 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
3218 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3220 p = 63 - bs0;
3221 len = bs1 - bs0;
3223 operands[2] = GEN_INT (p);
3224 operands[3] = GEN_INT (len);
3225 return "depdi -1,%2,%3,%0";
3228 /* Target hook for assembling integer objects. This code handles
3229 aligned SI and DI integers specially since function references
3230 must be preceded by P%. */
3232 static bool
3233 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3235 if (size == UNITS_PER_WORD
3236 && aligned_p
3237 && function_label_operand (x, VOIDmode))
3239 fputs (size == 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file);
3240 output_addr_const (asm_out_file, x);
3241 fputc ('\n', asm_out_file);
3242 return true;
3244 return default_assemble_integer (x, size, aligned_p);
3247 /* Output an ascii string. */
3248 void
3249 pa_output_ascii (FILE *file, const char *p, int size)
3251 int i;
3252 int chars_output;
3253 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3255 /* The HP assembler can only take strings of 256 characters at one
3256 time. This is a limitation on input line length, *not* the
3257 length of the string. Sigh. Even worse, it seems that the
3258 restriction is in number of input characters (see \xnn &
3259 \whatever). So we have to do this very carefully. */
3261 fputs ("\t.STRING \"", file);
3263 chars_output = 0;
3264 for (i = 0; i < size; i += 4)
3266 int co = 0;
3267 int io = 0;
3268 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3270 register unsigned int c = (unsigned char) p[i + io];
3272 if (c == '\"' || c == '\\')
3273 partial_output[co++] = '\\';
3274 if (c >= ' ' && c < 0177)
3275 partial_output[co++] = c;
3276 else
3278 unsigned int hexd;
3279 partial_output[co++] = '\\';
3280 partial_output[co++] = 'x';
3281 hexd = c / 16 - 0 + '0';
3282 if (hexd > '9')
3283 hexd -= '9' - 'a' + 1;
3284 partial_output[co++] = hexd;
3285 hexd = c % 16 - 0 + '0';
3286 if (hexd > '9')
3287 hexd -= '9' - 'a' + 1;
3288 partial_output[co++] = hexd;
3291 if (chars_output + co > 243)
3293 fputs ("\"\n\t.STRING \"", file);
3294 chars_output = 0;
3296 fwrite (partial_output, 1, (size_t) co, file);
3297 chars_output += co;
3298 co = 0;
3300 fputs ("\"\n", file);
3303 /* Try to rewrite floating point comparisons & branches to avoid
3304 useless add,tr insns.
3306 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3307 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3308 first attempt to remove useless add,tr insns. It is zero
3309 for the second pass as reorg sometimes leaves bogus REG_DEAD
3310 notes lying around.
3312 When CHECK_NOTES is zero we can only eliminate add,tr insns
3313 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3314 instructions. */
3315 static void
3316 remove_useless_addtr_insns (int check_notes)
3318 rtx insn;
3319 static int pass = 0;
3321 /* This is fairly cheap, so always run it when optimizing. */
3322 if (optimize > 0)
3324 int fcmp_count = 0;
3325 int fbranch_count = 0;
3327 /* Walk all the insns in this function looking for fcmp & fbranch
3328 instructions. Keep track of how many of each we find. */
3329 for (insn = get_insns (); insn; insn = next_insn (insn))
3331 rtx tmp;
3333 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3334 if (! NONJUMP_INSN_P (insn) && ! JUMP_P (insn))
3335 continue;
3337 tmp = PATTERN (insn);
3339 /* It must be a set. */
3340 if (GET_CODE (tmp) != SET)
3341 continue;
3343 /* If the destination is CCFP, then we've found an fcmp insn. */
3344 tmp = SET_DEST (tmp);
3345 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3347 fcmp_count++;
3348 continue;
3351 tmp = PATTERN (insn);
3352 /* If this is an fbranch instruction, bump the fbranch counter. */
3353 if (GET_CODE (tmp) == SET
3354 && SET_DEST (tmp) == pc_rtx
3355 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3356 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3357 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3358 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3360 fbranch_count++;
3361 continue;
3366 /* Find all floating point compare + branch insns. If possible,
3367 reverse the comparison & the branch to avoid add,tr insns. */
3368 for (insn = get_insns (); insn; insn = next_insn (insn))
3370 rtx tmp, next;
3372 /* Ignore anything that isn't an INSN. */
3373 if (! NONJUMP_INSN_P (insn))
3374 continue;
3376 tmp = PATTERN (insn);
3378 /* It must be a set. */
3379 if (GET_CODE (tmp) != SET)
3380 continue;
3382 /* The destination must be CCFP, which is register zero. */
3383 tmp = SET_DEST (tmp);
3384 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3385 continue;
3387 /* INSN should be a set of CCFP.
3389 See if the result of this insn is used in a reversed FP
3390 conditional branch. If so, reverse our condition and
3391 the branch. Doing so avoids useless add,tr insns. */
3392 next = next_insn (insn);
3393 while (next)
3395 /* Jumps, calls and labels stop our search. */
3396 if (JUMP_P (next) || CALL_P (next) || LABEL_P (next))
3397 break;
3399 /* As does another fcmp insn. */
3400 if (NONJUMP_INSN_P (next)
3401 && GET_CODE (PATTERN (next)) == SET
3402 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3403 && REGNO (SET_DEST (PATTERN (next))) == 0)
3404 break;
3406 next = next_insn (next);
3409 /* Is NEXT_INSN a branch? */
3410 if (next && JUMP_P (next))
3412 rtx pattern = PATTERN (next);
3414 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3415 and CCFP dies, then reverse our conditional and the branch
3416 to avoid the add,tr. */
3417 if (GET_CODE (pattern) == SET
3418 && SET_DEST (pattern) == pc_rtx
3419 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3420 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3421 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3422 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3423 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3424 && (fcmp_count == fbranch_count
3425 || (check_notes
3426 && find_regno_note (next, REG_DEAD, 0))))
3428 /* Reverse the branch. */
3429 tmp = XEXP (SET_SRC (pattern), 1);
3430 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3431 XEXP (SET_SRC (pattern), 2) = tmp;
3432 INSN_CODE (next) = -1;
3434 /* Reverse our condition. */
3435 tmp = PATTERN (insn);
3436 PUT_CODE (XEXP (tmp, 1),
3437 (reverse_condition_maybe_unordered
3438 (GET_CODE (XEXP (tmp, 1)))));
3444 pass = !pass;
3448 /* You may have trouble believing this, but this is the 32 bit HP-PA
3449 stack layout. Wow.
3451 Offset Contents
3453 Variable arguments (optional; any number may be allocated)
3455 SP-(4*(N+9)) arg word N
3457 SP-56 arg word 5
3458 SP-52 arg word 4
3460 Fixed arguments (must be allocated; may remain unused)
3462 SP-48 arg word 3
3463 SP-44 arg word 2
3464 SP-40 arg word 1
3465 SP-36 arg word 0
3467 Frame Marker
3469 SP-32 External Data Pointer (DP)
3470 SP-28 External sr4
3471 SP-24 External/stub RP (RP')
3472 SP-20 Current RP
3473 SP-16 Static Link
3474 SP-12 Clean up
3475 SP-8 Calling Stub RP (RP'')
3476 SP-4 Previous SP
3478 Top of Frame
3480 SP-0 Stack Pointer (points to next available address)
3484 /* This function saves registers as follows. Registers marked with ' are
3485 this function's registers (as opposed to the previous function's).
3486 If a frame_pointer isn't needed, r4 is saved as a general register;
3487 the space for the frame pointer is still allocated, though, to keep
3488 things simple.
3491 Top of Frame
3493 SP (FP') Previous FP
3494 SP + 4 Alignment filler (sigh)
3495 SP + 8 Space for locals reserved here.
3499 SP + n All call saved register used.
3503 SP + o All call saved fp registers used.
3507 SP + p (SP') points to next available address.
3511 /* Global variables set by output_function_prologue(). */
3512 /* Size of frame. Need to know this to emit return insns from
3513 leaf procedures. */
3514 static HOST_WIDE_INT actual_fsize, local_fsize;
3515 static int save_fregs;
3517 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3518 Handle case where DISP > 8k by using the add_high_const patterns.
3520 Note in DISP > 8k case, we will leave the high part of the address
3521 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3523 static void
3524 store_reg (int reg, HOST_WIDE_INT disp, int base)
3526 rtx insn, dest, src, basereg;
3528 src = gen_rtx_REG (word_mode, reg);
3529 basereg = gen_rtx_REG (Pmode, base);
3530 if (VAL_14_BITS_P (disp))
3532 dest = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
3533 insn = emit_move_insn (dest, src);
3535 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3537 rtx delta = GEN_INT (disp);
3538 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3540 emit_move_insn (tmpreg, delta);
3541 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3542 if (DO_FRAME_NOTES)
3544 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3545 gen_rtx_SET (VOIDmode, tmpreg,
3546 gen_rtx_PLUS (Pmode, basereg, delta)));
3547 RTX_FRAME_RELATED_P (insn) = 1;
3549 dest = gen_rtx_MEM (word_mode, tmpreg);
3550 insn = emit_move_insn (dest, src);
3552 else
3554 rtx delta = GEN_INT (disp);
3555 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3556 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3558 emit_move_insn (tmpreg, high);
3559 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3560 insn = emit_move_insn (dest, src);
3561 if (DO_FRAME_NOTES)
3562 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3563 gen_rtx_SET (VOIDmode,
3564 gen_rtx_MEM (word_mode,
3565 gen_rtx_PLUS (word_mode,
3566 basereg,
3567 delta)),
3568 src));
3571 if (DO_FRAME_NOTES)
3572 RTX_FRAME_RELATED_P (insn) = 1;
3575 /* Emit RTL to store REG at the memory location specified by BASE and then
3576 add MOD to BASE. MOD must be <= 8k. */
3578 static void
3579 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3581 rtx insn, basereg, srcreg, delta;
3583 gcc_assert (VAL_14_BITS_P (mod));
3585 basereg = gen_rtx_REG (Pmode, base);
3586 srcreg = gen_rtx_REG (word_mode, reg);
3587 delta = GEN_INT (mod);
3589 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3590 if (DO_FRAME_NOTES)
3592 RTX_FRAME_RELATED_P (insn) = 1;
3594 /* RTX_FRAME_RELATED_P must be set on each frame related set
3595 in a parallel with more than one element. */
3596 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3597 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3601 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3602 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3603 whether to add a frame note or not.
3605 In the DISP > 8k case, we leave the high part of the address in %r1.
3606 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3608 static void
3609 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3611 rtx insn;
3613 if (VAL_14_BITS_P (disp))
3615 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3616 plus_constant (Pmode,
3617 gen_rtx_REG (Pmode, base), disp));
3619 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3621 rtx basereg = gen_rtx_REG (Pmode, base);
3622 rtx delta = GEN_INT (disp);
3623 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3625 emit_move_insn (tmpreg, delta);
3626 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3627 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3628 if (DO_FRAME_NOTES)
3629 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3630 gen_rtx_SET (VOIDmode, tmpreg,
3631 gen_rtx_PLUS (Pmode, basereg, delta)));
3633 else
3635 rtx basereg = gen_rtx_REG (Pmode, base);
3636 rtx delta = GEN_INT (disp);
3637 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3639 emit_move_insn (tmpreg,
3640 gen_rtx_PLUS (Pmode, basereg,
3641 gen_rtx_HIGH (Pmode, delta)));
3642 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3643 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3646 if (DO_FRAME_NOTES && note)
3647 RTX_FRAME_RELATED_P (insn) = 1;
3650 HOST_WIDE_INT
3651 pa_compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3653 int freg_saved = 0;
3654 int i, j;
3656 /* The code in pa_expand_prologue and pa_expand_epilogue must
3657 be consistent with the rounding and size calculation done here.
3658 Change them at the same time. */
3660 /* We do our own stack alignment. First, round the size of the
3661 stack locals up to a word boundary. */
3662 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3664 /* Space for previous frame pointer + filler. If any frame is
3665 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3666 waste some space here for the sake of HP compatibility. The
3667 first slot is only used when the frame pointer is needed. */
3668 if (size || frame_pointer_needed)
3669 size += STARTING_FRAME_OFFSET;
3671 /* If the current function calls __builtin_eh_return, then we need
3672 to allocate stack space for registers that will hold data for
3673 the exception handler. */
3674 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3676 unsigned int i;
3678 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3679 continue;
3680 size += i * UNITS_PER_WORD;
3683 /* Account for space used by the callee general register saves. */
3684 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3685 if (df_regs_ever_live_p (i))
3686 size += UNITS_PER_WORD;
3688 /* Account for space used by the callee floating point register saves. */
3689 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3690 if (df_regs_ever_live_p (i)
3691 || (!TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3693 freg_saved = 1;
3695 /* We always save both halves of the FP register, so always
3696 increment the frame size by 8 bytes. */
3697 size += 8;
3700 /* If any of the floating registers are saved, account for the
3701 alignment needed for the floating point register save block. */
3702 if (freg_saved)
3704 size = (size + 7) & ~7;
3705 if (fregs_live)
3706 *fregs_live = 1;
3709 /* The various ABIs include space for the outgoing parameters in the
3710 size of the current function's stack frame. We don't need to align
3711 for the outgoing arguments as their alignment is set by the final
3712 rounding for the frame as a whole. */
3713 size += crtl->outgoing_args_size;
3715 /* Allocate space for the fixed frame marker. This space must be
3716 allocated for any function that makes calls or allocates
3717 stack space. */
3718 if (!crtl->is_leaf || size)
3719 size += TARGET_64BIT ? 48 : 32;
3721 /* Finally, round to the preferred stack boundary. */
3722 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3723 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3726 /* Generate the assembly code for function entry. FILE is a stdio
3727 stream to output the code to. SIZE is an int: how many units of
3728 temporary storage to allocate.
3730 Refer to the array `regs_ever_live' to determine which registers to
3731 save; `regs_ever_live[I]' is nonzero if register number I is ever
3732 used in the function. This function is responsible for knowing
3733 which registers should not be saved even if used. */
3735 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3736 of memory. If any fpu reg is used in the function, we allocate
3737 such a block here, at the bottom of the frame, just in case it's needed.
3739 If this function is a leaf procedure, then we may choose not
3740 to do a "save" insn. The decision about whether or not
3741 to do this is made in regclass.c. */
3743 static void
3744 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3746 /* The function's label and associated .PROC must never be
3747 separated and must be output *after* any profiling declarations
3748 to avoid changing spaces/subspaces within a procedure. */
3749 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3750 fputs ("\t.PROC\n", file);
3752 /* pa_expand_prologue does the dirty work now. We just need
3753 to output the assembler directives which denote the start
3754 of a function. */
3755 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3756 if (crtl->is_leaf)
3757 fputs (",NO_CALLS", file);
3758 else
3759 fputs (",CALLS", file);
3760 if (rp_saved)
3761 fputs (",SAVE_RP", file);
3763 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3764 at the beginning of the frame and that it is used as the frame
3765 pointer for the frame. We do this because our current frame
3766 layout doesn't conform to that specified in the HP runtime
3767 documentation and we need a way to indicate to programs such as
3768 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3769 isn't used by HP compilers but is supported by the assembler.
3770 However, SAVE_SP is supposed to indicate that the previous stack
3771 pointer has been saved in the frame marker. */
3772 if (frame_pointer_needed)
3773 fputs (",SAVE_SP", file);
3775 /* Pass on information about the number of callee register saves
3776 performed in the prologue.
3778 The compiler is supposed to pass the highest register number
3779 saved, the assembler then has to adjust that number before
3780 entering it into the unwind descriptor (to account for any
3781 caller saved registers with lower register numbers than the
3782 first callee saved register). */
3783 if (gr_saved)
3784 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3786 if (fr_saved)
3787 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3789 fputs ("\n\t.ENTRY\n", file);
3791 remove_useless_addtr_insns (0);
3794 void
3795 pa_expand_prologue (void)
3797 int merge_sp_adjust_with_store = 0;
3798 HOST_WIDE_INT size = get_frame_size ();
3799 HOST_WIDE_INT offset;
3800 int i;
3801 rtx insn, tmpreg;
3803 gr_saved = 0;
3804 fr_saved = 0;
3805 save_fregs = 0;
3807 /* Compute total size for frame pointer, filler, locals and rounding to
3808 the next word boundary. Similar code appears in pa_compute_frame_size
3809 and must be changed in tandem with this code. */
3810 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3811 if (local_fsize || frame_pointer_needed)
3812 local_fsize += STARTING_FRAME_OFFSET;
3814 actual_fsize = pa_compute_frame_size (size, &save_fregs);
3815 if (flag_stack_usage_info)
3816 current_function_static_stack_size = actual_fsize;
3818 /* Compute a few things we will use often. */
3819 tmpreg = gen_rtx_REG (word_mode, 1);
3821 /* Save RP first. The calling conventions manual states RP will
3822 always be stored into the caller's frame at sp - 20 or sp - 16
3823 depending on which ABI is in use. */
3824 if (df_regs_ever_live_p (2) || crtl->calls_eh_return)
3826 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3827 rp_saved = true;
3829 else
3830 rp_saved = false;
3832 /* Allocate the local frame and set up the frame pointer if needed. */
3833 if (actual_fsize != 0)
3835 if (frame_pointer_needed)
3837 /* Copy the old frame pointer temporarily into %r1. Set up the
3838 new stack pointer, then store away the saved old frame pointer
3839 into the stack at sp and at the same time update the stack
3840 pointer by actual_fsize bytes. Two versions, first
3841 handles small (<8k) frames. The second handles large (>=8k)
3842 frames. */
3843 insn = emit_move_insn (tmpreg, hard_frame_pointer_rtx);
3844 if (DO_FRAME_NOTES)
3845 RTX_FRAME_RELATED_P (insn) = 1;
3847 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3848 if (DO_FRAME_NOTES)
3849 RTX_FRAME_RELATED_P (insn) = 1;
3851 if (VAL_14_BITS_P (actual_fsize))
3852 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3853 else
3855 /* It is incorrect to store the saved frame pointer at *sp,
3856 then increment sp (writes beyond the current stack boundary).
3858 So instead use stwm to store at *sp and post-increment the
3859 stack pointer as an atomic operation. Then increment sp to
3860 finish allocating the new frame. */
3861 HOST_WIDE_INT adjust1 = 8192 - 64;
3862 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3864 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3865 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3866 adjust2, 1);
3869 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3870 we need to store the previous stack pointer (frame pointer)
3871 into the frame marker on targets that use the HP unwind
3872 library. This allows the HP unwind library to be used to
3873 unwind GCC frames. However, we are not fully compatible
3874 with the HP library because our frame layout differs from
3875 that specified in the HP runtime specification.
3877 We don't want a frame note on this instruction as the frame
3878 marker moves during dynamic stack allocation.
3880 This instruction also serves as a blockage to prevent
3881 register spills from being scheduled before the stack
3882 pointer is raised. This is necessary as we store
3883 registers using the frame pointer as a base register,
3884 and the frame pointer is set before sp is raised. */
3885 if (TARGET_HPUX_UNWIND_LIBRARY)
3887 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3888 GEN_INT (TARGET_64BIT ? -8 : -4));
3890 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3891 hard_frame_pointer_rtx);
3893 else
3894 emit_insn (gen_blockage ());
3896 /* no frame pointer needed. */
3897 else
3899 /* In some cases we can perform the first callee register save
3900 and allocating the stack frame at the same time. If so, just
3901 make a note of it and defer allocating the frame until saving
3902 the callee registers. */
3903 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3904 merge_sp_adjust_with_store = 1;
3905 /* Can not optimize. Adjust the stack frame by actual_fsize
3906 bytes. */
3907 else
3908 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3909 actual_fsize, 1);
3913 /* Normal register save.
3915 Do not save the frame pointer in the frame_pointer_needed case. It
3916 was done earlier. */
3917 if (frame_pointer_needed)
3919 offset = local_fsize;
3921 /* Saving the EH return data registers in the frame is the simplest
3922 way to get the frame unwind information emitted. We put them
3923 just before the general registers. */
3924 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3926 unsigned int i, regno;
3928 for (i = 0; ; ++i)
3930 regno = EH_RETURN_DATA_REGNO (i);
3931 if (regno == INVALID_REGNUM)
3932 break;
3934 store_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
3935 offset += UNITS_PER_WORD;
3939 for (i = 18; i >= 4; i--)
3940 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3942 store_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
3943 offset += UNITS_PER_WORD;
3944 gr_saved++;
3946 /* Account for %r3 which is saved in a special place. */
3947 gr_saved++;
3949 /* No frame pointer needed. */
3950 else
3952 offset = local_fsize - actual_fsize;
3954 /* Saving the EH return data registers in the frame is the simplest
3955 way to get the frame unwind information emitted. */
3956 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3958 unsigned int i, regno;
3960 for (i = 0; ; ++i)
3962 regno = EH_RETURN_DATA_REGNO (i);
3963 if (regno == INVALID_REGNUM)
3964 break;
3966 /* If merge_sp_adjust_with_store is nonzero, then we can
3967 optimize the first save. */
3968 if (merge_sp_adjust_with_store)
3970 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
3971 merge_sp_adjust_with_store = 0;
3973 else
3974 store_reg (regno, offset, STACK_POINTER_REGNUM);
3975 offset += UNITS_PER_WORD;
3979 for (i = 18; i >= 3; i--)
3980 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3982 /* If merge_sp_adjust_with_store is nonzero, then we can
3983 optimize the first GR save. */
3984 if (merge_sp_adjust_with_store)
3986 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
3987 merge_sp_adjust_with_store = 0;
3989 else
3990 store_reg (i, offset, STACK_POINTER_REGNUM);
3991 offset += UNITS_PER_WORD;
3992 gr_saved++;
3995 /* If we wanted to merge the SP adjustment with a GR save, but we never
3996 did any GR saves, then just emit the adjustment here. */
3997 if (merge_sp_adjust_with_store)
3998 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3999 actual_fsize, 1);
4002 /* The hppa calling conventions say that %r19, the pic offset
4003 register, is saved at sp - 32 (in this function's frame)
4004 when generating PIC code. FIXME: What is the correct thing
4005 to do for functions which make no calls and allocate no
4006 frame? Do we need to allocate a frame, or can we just omit
4007 the save? For now we'll just omit the save.
4009 We don't want a note on this insn as the frame marker can
4010 move if there is a dynamic stack allocation. */
4011 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
4013 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
4015 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
4019 /* Align pointer properly (doubleword boundary). */
4020 offset = (offset + 7) & ~7;
4022 /* Floating point register store. */
4023 if (save_fregs)
4025 rtx base;
4027 /* First get the frame or stack pointer to the start of the FP register
4028 save area. */
4029 if (frame_pointer_needed)
4031 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4032 base = hard_frame_pointer_rtx;
4034 else
4036 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4037 base = stack_pointer_rtx;
4040 /* Now actually save the FP registers. */
4041 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4043 if (df_regs_ever_live_p (i)
4044 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4046 rtx addr, insn, reg;
4047 addr = gen_rtx_MEM (DFmode,
4048 gen_rtx_POST_INC (word_mode, tmpreg));
4049 reg = gen_rtx_REG (DFmode, i);
4050 insn = emit_move_insn (addr, reg);
4051 if (DO_FRAME_NOTES)
4053 RTX_FRAME_RELATED_P (insn) = 1;
4054 if (TARGET_64BIT)
4056 rtx mem = gen_rtx_MEM (DFmode,
4057 plus_constant (Pmode, base,
4058 offset));
4059 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4060 gen_rtx_SET (VOIDmode, mem, reg));
4062 else
4064 rtx meml = gen_rtx_MEM (SFmode,
4065 plus_constant (Pmode, base,
4066 offset));
4067 rtx memr = gen_rtx_MEM (SFmode,
4068 plus_constant (Pmode, base,
4069 offset + 4));
4070 rtx regl = gen_rtx_REG (SFmode, i);
4071 rtx regr = gen_rtx_REG (SFmode, i + 1);
4072 rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
4073 rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
4074 rtvec vec;
4076 RTX_FRAME_RELATED_P (setl) = 1;
4077 RTX_FRAME_RELATED_P (setr) = 1;
4078 vec = gen_rtvec (2, setl, setr);
4079 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4080 gen_rtx_SEQUENCE (VOIDmode, vec));
4083 offset += GET_MODE_SIZE (DFmode);
4084 fr_saved++;
4090 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4091 Handle case where DISP > 8k by using the add_high_const patterns. */
4093 static void
4094 load_reg (int reg, HOST_WIDE_INT disp, int base)
4096 rtx dest = gen_rtx_REG (word_mode, reg);
4097 rtx basereg = gen_rtx_REG (Pmode, base);
4098 rtx src;
4100 if (VAL_14_BITS_P (disp))
4101 src = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
4102 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
4104 rtx delta = GEN_INT (disp);
4105 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4107 emit_move_insn (tmpreg, delta);
4108 if (TARGET_DISABLE_INDEXING)
4110 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4111 src = gen_rtx_MEM (word_mode, tmpreg);
4113 else
4114 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4116 else
4118 rtx delta = GEN_INT (disp);
4119 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
4120 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4122 emit_move_insn (tmpreg, high);
4123 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
4126 emit_move_insn (dest, src);
4129 /* Update the total code bytes output to the text section. */
4131 static void
4132 update_total_code_bytes (unsigned int nbytes)
4134 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
4135 && !IN_NAMED_SECTION_P (cfun->decl))
4137 unsigned int old_total = total_code_bytes;
4139 total_code_bytes += nbytes;
4141 /* Be prepared to handle overflows. */
4142 if (old_total > total_code_bytes)
4143 total_code_bytes = UINT_MAX;
4147 /* This function generates the assembly code for function exit.
4148 Args are as for output_function_prologue ().
4150 The function epilogue should not depend on the current stack
4151 pointer! It should use the frame pointer only. This is mandatory
4152 because of alloca; we also take advantage of it to omit stack
4153 adjustments before returning. */
4155 static void
4156 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4158 rtx insn = get_last_insn ();
4160 last_address = 0;
4162 /* pa_expand_epilogue does the dirty work now. We just need
4163 to output the assembler directives which denote the end
4164 of a function.
4166 To make debuggers happy, emit a nop if the epilogue was completely
4167 eliminated due to a volatile call as the last insn in the
4168 current function. That way the return address (in %r2) will
4169 always point to a valid instruction in the current function. */
4171 /* Get the last real insn. */
4172 if (NOTE_P (insn))
4173 insn = prev_real_insn (insn);
4175 /* If it is a sequence, then look inside. */
4176 if (insn && NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
4177 insn = XVECEXP (PATTERN (insn), 0, 0);
4179 /* If insn is a CALL_INSN, then it must be a call to a volatile
4180 function (otherwise there would be epilogue insns). */
4181 if (insn && CALL_P (insn))
4183 fputs ("\tnop\n", file);
4184 last_address += 4;
4187 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4189 if (TARGET_SOM && TARGET_GAS)
4191 /* We are done with this subspace except possibly for some additional
4192 debug information. Forget that we are in this subspace to ensure
4193 that the next function is output in its own subspace. */
4194 in_section = NULL;
4195 cfun->machine->in_nsubspa = 2;
4198 /* Thunks do their own accounting. */
4199 if (cfun->is_thunk)
4200 return;
4202 if (INSN_ADDRESSES_SET_P ())
4204 insn = get_last_nonnote_insn ();
4205 last_address += INSN_ADDRESSES (INSN_UID (insn));
4206 if (INSN_P (insn))
4207 last_address += insn_default_length (insn);
4208 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4209 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4211 else
4212 last_address = UINT_MAX;
4214 /* Finally, update the total number of code bytes output so far. */
4215 update_total_code_bytes (last_address);
4218 void
4219 pa_expand_epilogue (void)
4221 rtx tmpreg;
4222 HOST_WIDE_INT offset;
4223 HOST_WIDE_INT ret_off = 0;
4224 int i;
4225 int merge_sp_adjust_with_load = 0;
4227 /* We will use this often. */
4228 tmpreg = gen_rtx_REG (word_mode, 1);
4230 /* Try to restore RP early to avoid load/use interlocks when
4231 RP gets used in the return (bv) instruction. This appears to still
4232 be necessary even when we schedule the prologue and epilogue. */
4233 if (rp_saved)
4235 ret_off = TARGET_64BIT ? -16 : -20;
4236 if (frame_pointer_needed)
4238 load_reg (2, ret_off, HARD_FRAME_POINTER_REGNUM);
4239 ret_off = 0;
4241 else
4243 /* No frame pointer, and stack is smaller than 8k. */
4244 if (VAL_14_BITS_P (ret_off - actual_fsize))
4246 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4247 ret_off = 0;
4252 /* General register restores. */
4253 if (frame_pointer_needed)
4255 offset = local_fsize;
4257 /* If the current function calls __builtin_eh_return, then we need
4258 to restore the saved EH data registers. */
4259 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4261 unsigned int i, regno;
4263 for (i = 0; ; ++i)
4265 regno = EH_RETURN_DATA_REGNO (i);
4266 if (regno == INVALID_REGNUM)
4267 break;
4269 load_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4270 offset += UNITS_PER_WORD;
4274 for (i = 18; i >= 4; i--)
4275 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4277 load_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4278 offset += UNITS_PER_WORD;
4281 else
4283 offset = local_fsize - actual_fsize;
4285 /* If the current function calls __builtin_eh_return, then we need
4286 to restore the saved EH data registers. */
4287 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4289 unsigned int i, regno;
4291 for (i = 0; ; ++i)
4293 regno = EH_RETURN_DATA_REGNO (i);
4294 if (regno == INVALID_REGNUM)
4295 break;
4297 /* Only for the first load.
4298 merge_sp_adjust_with_load holds the register load
4299 with which we will merge the sp adjustment. */
4300 if (merge_sp_adjust_with_load == 0
4301 && local_fsize == 0
4302 && VAL_14_BITS_P (-actual_fsize))
4303 merge_sp_adjust_with_load = regno;
4304 else
4305 load_reg (regno, offset, STACK_POINTER_REGNUM);
4306 offset += UNITS_PER_WORD;
4310 for (i = 18; i >= 3; i--)
4312 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4314 /* Only for the first load.
4315 merge_sp_adjust_with_load holds the register load
4316 with which we will merge the sp adjustment. */
4317 if (merge_sp_adjust_with_load == 0
4318 && local_fsize == 0
4319 && VAL_14_BITS_P (-actual_fsize))
4320 merge_sp_adjust_with_load = i;
4321 else
4322 load_reg (i, offset, STACK_POINTER_REGNUM);
4323 offset += UNITS_PER_WORD;
4328 /* Align pointer properly (doubleword boundary). */
4329 offset = (offset + 7) & ~7;
4331 /* FP register restores. */
4332 if (save_fregs)
4334 /* Adjust the register to index off of. */
4335 if (frame_pointer_needed)
4336 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4337 else
4338 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4340 /* Actually do the restores now. */
4341 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4342 if (df_regs_ever_live_p (i)
4343 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4345 rtx src = gen_rtx_MEM (DFmode,
4346 gen_rtx_POST_INC (word_mode, tmpreg));
4347 rtx dest = gen_rtx_REG (DFmode, i);
4348 emit_move_insn (dest, src);
4352 /* Emit a blockage insn here to keep these insns from being moved to
4353 an earlier spot in the epilogue, or into the main instruction stream.
4355 This is necessary as we must not cut the stack back before all the
4356 restores are finished. */
4357 emit_insn (gen_blockage ());
4359 /* Reset stack pointer (and possibly frame pointer). The stack
4360 pointer is initially set to fp + 64 to avoid a race condition. */
4361 if (frame_pointer_needed)
4363 rtx delta = GEN_INT (-64);
4365 set_reg_plus_d (STACK_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM, 64, 0);
4366 emit_insn (gen_pre_load (hard_frame_pointer_rtx,
4367 stack_pointer_rtx, delta));
4369 /* If we were deferring a callee register restore, do it now. */
4370 else if (merge_sp_adjust_with_load)
4372 rtx delta = GEN_INT (-actual_fsize);
4373 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4375 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4377 else if (actual_fsize != 0)
4378 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4379 - actual_fsize, 0);
4381 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4382 frame greater than 8k), do so now. */
4383 if (ret_off != 0)
4384 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4386 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4388 rtx sa = EH_RETURN_STACKADJ_RTX;
4390 emit_insn (gen_blockage ());
4391 emit_insn (TARGET_64BIT
4392 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4393 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4397 bool
4398 pa_can_use_return_insn (void)
4400 if (!reload_completed)
4401 return false;
4403 if (frame_pointer_needed)
4404 return false;
4406 if (df_regs_ever_live_p (2))
4407 return false;
4409 if (crtl->profile)
4410 return false;
4412 return pa_compute_frame_size (get_frame_size (), 0) == 0;
4416 hppa_pic_save_rtx (void)
4418 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4421 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4422 #define NO_DEFERRED_PROFILE_COUNTERS 0
4423 #endif
4426 /* Vector of funcdef numbers. */
4427 static vec<int> funcdef_nos;
4429 /* Output deferred profile counters. */
4430 static void
4431 output_deferred_profile_counters (void)
4433 unsigned int i;
4434 int align, n;
4436 if (funcdef_nos.is_empty ())
4437 return;
4439 switch_to_section (data_section);
4440 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4441 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4443 for (i = 0; funcdef_nos.iterate (i, &n); i++)
4445 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4446 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4449 funcdef_nos.release ();
4452 void
4453 hppa_profile_hook (int label_no)
4455 /* We use SImode for the address of the function in both 32 and
4456 64-bit code to avoid having to provide DImode versions of the
4457 lcla2 and load_offset_label_address insn patterns. */
4458 rtx reg = gen_reg_rtx (SImode);
4459 rtx label_rtx = gen_label_rtx ();
4460 rtx begin_label_rtx, call_insn;
4461 char begin_label_name[16];
4463 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4464 label_no);
4465 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4467 if (TARGET_64BIT)
4468 emit_move_insn (arg_pointer_rtx,
4469 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4470 GEN_INT (64)));
4472 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4474 /* The address of the function is loaded into %r25 with an instruction-
4475 relative sequence that avoids the use of relocations. The sequence
4476 is split so that the load_offset_label_address instruction can
4477 occupy the delay slot of the call to _mcount. */
4478 if (TARGET_PA_20)
4479 emit_insn (gen_lcla2 (reg, label_rtx));
4480 else
4481 emit_insn (gen_lcla1 (reg, label_rtx));
4483 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4484 reg, begin_label_rtx, label_rtx));
4486 #if !NO_DEFERRED_PROFILE_COUNTERS
4488 rtx count_label_rtx, addr, r24;
4489 char count_label_name[16];
4491 funcdef_nos.safe_push (label_no);
4492 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4493 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4495 addr = force_reg (Pmode, count_label_rtx);
4496 r24 = gen_rtx_REG (Pmode, 24);
4497 emit_move_insn (r24, addr);
4499 call_insn =
4500 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4501 gen_rtx_SYMBOL_REF (Pmode,
4502 "_mcount")),
4503 GEN_INT (TARGET_64BIT ? 24 : 12)));
4505 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4507 #else
4509 call_insn =
4510 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4511 gen_rtx_SYMBOL_REF (Pmode,
4512 "_mcount")),
4513 GEN_INT (TARGET_64BIT ? 16 : 8)));
4515 #endif
4517 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4518 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4520 /* Indicate the _mcount call cannot throw, nor will it execute a
4521 non-local goto. */
4522 make_reg_eh_region_note_nothrow_nononlocal (call_insn);
4525 /* Fetch the return address for the frame COUNT steps up from
4526 the current frame, after the prologue. FRAMEADDR is the
4527 frame pointer of the COUNT frame.
4529 We want to ignore any export stub remnants here. To handle this,
4530 we examine the code at the return address, and if it is an export
4531 stub, we return a memory rtx for the stub return address stored
4532 at frame-24.
4534 The value returned is used in two different ways:
4536 1. To find a function's caller.
4538 2. To change the return address for a function.
4540 This function handles most instances of case 1; however, it will
4541 fail if there are two levels of stubs to execute on the return
4542 path. The only way I believe that can happen is if the return value
4543 needs a parameter relocation, which never happens for C code.
4545 This function handles most instances of case 2; however, it will
4546 fail if we did not originally have stub code on the return path
4547 but will need stub code on the new return path. This can happen if
4548 the caller & callee are both in the main program, but the new
4549 return location is in a shared library. */
4552 pa_return_addr_rtx (int count, rtx frameaddr)
4554 rtx label;
4555 rtx rp;
4556 rtx saved_rp;
4557 rtx ins;
4559 /* The instruction stream at the return address of a PA1.X export stub is:
4561 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4562 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4563 0x00011820 | stub+16: mtsp r1,sr0
4564 0xe0400002 | stub+20: be,n 0(sr0,rp)
4566 0xe0400002 must be specified as -532676606 so that it won't be
4567 rejected as an invalid immediate operand on 64-bit hosts.
4569 The instruction stream at the return address of a PA2.0 export stub is:
4571 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4572 0xe840d002 | stub+12: bve,n (rp)
4575 HOST_WIDE_INT insns[4];
4576 int i, len;
4578 if (count != 0)
4579 return NULL_RTX;
4581 rp = get_hard_reg_initial_val (Pmode, 2);
4583 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4584 return rp;
4586 /* If there is no export stub then just use the value saved from
4587 the return pointer register. */
4589 saved_rp = gen_reg_rtx (Pmode);
4590 emit_move_insn (saved_rp, rp);
4592 /* Get pointer to the instruction stream. We have to mask out the
4593 privilege level from the two low order bits of the return address
4594 pointer here so that ins will point to the start of the first
4595 instruction that would have been executed if we returned. */
4596 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4597 label = gen_label_rtx ();
4599 if (TARGET_PA_20)
4601 insns[0] = 0x4bc23fd1;
4602 insns[1] = -398405630;
4603 len = 2;
4605 else
4607 insns[0] = 0x4bc23fd1;
4608 insns[1] = 0x004010a1;
4609 insns[2] = 0x00011820;
4610 insns[3] = -532676606;
4611 len = 4;
4614 /* Check the instruction stream at the normal return address for the
4615 export stub. If it is an export stub, than our return address is
4616 really in -24[frameaddr]. */
4618 for (i = 0; i < len; i++)
4620 rtx op0 = gen_rtx_MEM (SImode, plus_constant (Pmode, ins, i * 4));
4621 rtx op1 = GEN_INT (insns[i]);
4622 emit_cmp_and_jump_insns (op0, op1, NE, NULL, SImode, 0, label);
4625 /* Here we know that our return address points to an export
4626 stub. We don't want to return the address of the export stub,
4627 but rather the return address of the export stub. That return
4628 address is stored at -24[frameaddr]. */
4630 emit_move_insn (saved_rp,
4631 gen_rtx_MEM (Pmode,
4632 memory_address (Pmode,
4633 plus_constant (Pmode, frameaddr,
4634 -24))));
4636 emit_label (label);
4638 return saved_rp;
4641 void
4642 pa_emit_bcond_fp (rtx operands[])
4644 enum rtx_code code = GET_CODE (operands[0]);
4645 rtx operand0 = operands[1];
4646 rtx operand1 = operands[2];
4647 rtx label = operands[3];
4649 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0),
4650 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1)));
4652 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
4653 gen_rtx_IF_THEN_ELSE (VOIDmode,
4654 gen_rtx_fmt_ee (NE,
4655 VOIDmode,
4656 gen_rtx_REG (CCFPmode, 0),
4657 const0_rtx),
4658 gen_rtx_LABEL_REF (VOIDmode, label),
4659 pc_rtx)));
4663 /* Adjust the cost of a scheduling dependency. Return the new cost of
4664 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4666 static int
4667 pa_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4669 enum attr_type attr_type;
4671 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4672 true dependencies as they are described with bypasses now. */
4673 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4674 return cost;
4676 if (! recog_memoized (insn))
4677 return 0;
4679 attr_type = get_attr_type (insn);
4681 switch (REG_NOTE_KIND (link))
4683 case REG_DEP_ANTI:
4684 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4685 cycles later. */
4687 if (attr_type == TYPE_FPLOAD)
4689 rtx pat = PATTERN (insn);
4690 rtx dep_pat = PATTERN (dep_insn);
4691 if (GET_CODE (pat) == PARALLEL)
4693 /* This happens for the fldXs,mb patterns. */
4694 pat = XVECEXP (pat, 0, 0);
4696 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4697 /* If this happens, we have to extend this to schedule
4698 optimally. Return 0 for now. */
4699 return 0;
4701 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4703 if (! recog_memoized (dep_insn))
4704 return 0;
4705 switch (get_attr_type (dep_insn))
4707 case TYPE_FPALU:
4708 case TYPE_FPMULSGL:
4709 case TYPE_FPMULDBL:
4710 case TYPE_FPDIVSGL:
4711 case TYPE_FPDIVDBL:
4712 case TYPE_FPSQRTSGL:
4713 case TYPE_FPSQRTDBL:
4714 /* A fpload can't be issued until one cycle before a
4715 preceding arithmetic operation has finished if
4716 the target of the fpload is any of the sources
4717 (or destination) of the arithmetic operation. */
4718 return insn_default_latency (dep_insn) - 1;
4720 default:
4721 return 0;
4725 else if (attr_type == TYPE_FPALU)
4727 rtx pat = PATTERN (insn);
4728 rtx dep_pat = PATTERN (dep_insn);
4729 if (GET_CODE (pat) == PARALLEL)
4731 /* This happens for the fldXs,mb patterns. */
4732 pat = XVECEXP (pat, 0, 0);
4734 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4735 /* If this happens, we have to extend this to schedule
4736 optimally. Return 0 for now. */
4737 return 0;
4739 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4741 if (! recog_memoized (dep_insn))
4742 return 0;
4743 switch (get_attr_type (dep_insn))
4745 case TYPE_FPDIVSGL:
4746 case TYPE_FPDIVDBL:
4747 case TYPE_FPSQRTSGL:
4748 case TYPE_FPSQRTDBL:
4749 /* An ALU flop can't be issued until two cycles before a
4750 preceding divide or sqrt operation has finished if
4751 the target of the ALU flop is any of the sources
4752 (or destination) of the divide or sqrt operation. */
4753 return insn_default_latency (dep_insn) - 2;
4755 default:
4756 return 0;
4761 /* For other anti dependencies, the cost is 0. */
4762 return 0;
4764 case REG_DEP_OUTPUT:
4765 /* Output dependency; DEP_INSN writes a register that INSN writes some
4766 cycles later. */
4767 if (attr_type == TYPE_FPLOAD)
4769 rtx pat = PATTERN (insn);
4770 rtx dep_pat = PATTERN (dep_insn);
4771 if (GET_CODE (pat) == PARALLEL)
4773 /* This happens for the fldXs,mb patterns. */
4774 pat = XVECEXP (pat, 0, 0);
4776 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4777 /* If this happens, we have to extend this to schedule
4778 optimally. Return 0 for now. */
4779 return 0;
4781 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4783 if (! recog_memoized (dep_insn))
4784 return 0;
4785 switch (get_attr_type (dep_insn))
4787 case TYPE_FPALU:
4788 case TYPE_FPMULSGL:
4789 case TYPE_FPMULDBL:
4790 case TYPE_FPDIVSGL:
4791 case TYPE_FPDIVDBL:
4792 case TYPE_FPSQRTSGL:
4793 case TYPE_FPSQRTDBL:
4794 /* A fpload can't be issued until one cycle before a
4795 preceding arithmetic operation has finished if
4796 the target of the fpload is the destination of the
4797 arithmetic operation.
4799 Exception: For PA7100LC, PA7200 and PA7300, the cost
4800 is 3 cycles, unless they bundle together. We also
4801 pay the penalty if the second insn is a fpload. */
4802 return insn_default_latency (dep_insn) - 1;
4804 default:
4805 return 0;
4809 else if (attr_type == TYPE_FPALU)
4811 rtx pat = PATTERN (insn);
4812 rtx dep_pat = PATTERN (dep_insn);
4813 if (GET_CODE (pat) == PARALLEL)
4815 /* This happens for the fldXs,mb patterns. */
4816 pat = XVECEXP (pat, 0, 0);
4818 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4819 /* If this happens, we have to extend this to schedule
4820 optimally. Return 0 for now. */
4821 return 0;
4823 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4825 if (! recog_memoized (dep_insn))
4826 return 0;
4827 switch (get_attr_type (dep_insn))
4829 case TYPE_FPDIVSGL:
4830 case TYPE_FPDIVDBL:
4831 case TYPE_FPSQRTSGL:
4832 case TYPE_FPSQRTDBL:
4833 /* An ALU flop can't be issued until two cycles before a
4834 preceding divide or sqrt operation has finished if
4835 the target of the ALU flop is also the target of
4836 the divide or sqrt operation. */
4837 return insn_default_latency (dep_insn) - 2;
4839 default:
4840 return 0;
4845 /* For other output dependencies, the cost is 0. */
4846 return 0;
4848 default:
4849 gcc_unreachable ();
4853 /* Adjust scheduling priorities. We use this to try and keep addil
4854 and the next use of %r1 close together. */
4855 static int
4856 pa_adjust_priority (rtx insn, int priority)
4858 rtx set = single_set (insn);
4859 rtx src, dest;
4860 if (set)
4862 src = SET_SRC (set);
4863 dest = SET_DEST (set);
4864 if (GET_CODE (src) == LO_SUM
4865 && symbolic_operand (XEXP (src, 1), VOIDmode)
4866 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4867 priority >>= 3;
4869 else if (GET_CODE (src) == MEM
4870 && GET_CODE (XEXP (src, 0)) == LO_SUM
4871 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4872 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4873 priority >>= 1;
4875 else if (GET_CODE (dest) == MEM
4876 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4877 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4878 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4879 priority >>= 3;
4881 return priority;
4884 /* The 700 can only issue a single insn at a time.
4885 The 7XXX processors can issue two insns at a time.
4886 The 8000 can issue 4 insns at a time. */
4887 static int
4888 pa_issue_rate (void)
4890 switch (pa_cpu)
4892 case PROCESSOR_700: return 1;
4893 case PROCESSOR_7100: return 2;
4894 case PROCESSOR_7100LC: return 2;
4895 case PROCESSOR_7200: return 2;
4896 case PROCESSOR_7300: return 2;
4897 case PROCESSOR_8000: return 4;
4899 default:
4900 gcc_unreachable ();
4906 /* Return any length plus adjustment needed by INSN which already has
4907 its length computed as LENGTH. Return LENGTH if no adjustment is
4908 necessary.
4910 Also compute the length of an inline block move here as it is too
4911 complicated to express as a length attribute in pa.md. */
4913 pa_adjust_insn_length (rtx insn, int length)
4915 rtx pat = PATTERN (insn);
4917 /* If length is negative or undefined, provide initial length. */
4918 if ((unsigned int) length >= INT_MAX)
4920 if (GET_CODE (pat) == SEQUENCE)
4921 insn = XVECEXP (pat, 0, 0);
4923 switch (get_attr_type (insn))
4925 case TYPE_MILLI:
4926 length = pa_attr_length_millicode_call (insn);
4927 break;
4928 case TYPE_CALL:
4929 length = pa_attr_length_call (insn, 0);
4930 break;
4931 case TYPE_SIBCALL:
4932 length = pa_attr_length_call (insn, 1);
4933 break;
4934 case TYPE_DYNCALL:
4935 length = pa_attr_length_indirect_call (insn);
4936 break;
4937 case TYPE_SH_FUNC_ADRS:
4938 length = pa_attr_length_millicode_call (insn) + 20;
4939 break;
4940 default:
4941 gcc_unreachable ();
4945 /* Block move pattern. */
4946 if (NONJUMP_INSN_P (insn)
4947 && GET_CODE (pat) == PARALLEL
4948 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4949 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4950 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4951 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4952 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4953 length += compute_movmem_length (insn) - 4;
4954 /* Block clear pattern. */
4955 else if (NONJUMP_INSN_P (insn)
4956 && GET_CODE (pat) == PARALLEL
4957 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4958 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4959 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4960 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4961 length += compute_clrmem_length (insn) - 4;
4962 /* Conditional branch with an unfilled delay slot. */
4963 else if (JUMP_P (insn) && ! simplejump_p (insn))
4965 /* Adjust a short backwards conditional with an unfilled delay slot. */
4966 if (GET_CODE (pat) == SET
4967 && length == 4
4968 && JUMP_LABEL (insn) != NULL_RTX
4969 && ! forward_branch_p (insn))
4970 length += 4;
4971 else if (GET_CODE (pat) == PARALLEL
4972 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4973 && length == 4)
4974 length += 4;
4975 /* Adjust dbra insn with short backwards conditional branch with
4976 unfilled delay slot -- only for case where counter is in a
4977 general register register. */
4978 else if (GET_CODE (pat) == PARALLEL
4979 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
4980 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
4981 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
4982 && length == 4
4983 && ! forward_branch_p (insn))
4984 length += 4;
4986 return length;
4989 /* Implement the TARGET_PRINT_OPERAND_PUNCT_VALID_P hook. */
4991 static bool
4992 pa_print_operand_punct_valid_p (unsigned char code)
4994 if (code == '@'
4995 || code == '#'
4996 || code == '*'
4997 || code == '^')
4998 return true;
5000 return false;
5003 /* Print operand X (an rtx) in assembler syntax to file FILE.
5004 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
5005 For `%' followed by punctuation, CODE is the punctuation and X is null. */
5007 void
5008 pa_print_operand (FILE *file, rtx x, int code)
5010 switch (code)
5012 case '#':
5013 /* Output a 'nop' if there's nothing for the delay slot. */
5014 if (dbr_sequence_length () == 0)
5015 fputs ("\n\tnop", file);
5016 return;
5017 case '*':
5018 /* Output a nullification completer if there's nothing for the */
5019 /* delay slot or nullification is requested. */
5020 if (dbr_sequence_length () == 0 ||
5021 (final_sequence &&
5022 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
5023 fputs (",n", file);
5024 return;
5025 case 'R':
5026 /* Print out the second register name of a register pair.
5027 I.e., R (6) => 7. */
5028 fputs (reg_names[REGNO (x) + 1], file);
5029 return;
5030 case 'r':
5031 /* A register or zero. */
5032 if (x == const0_rtx
5033 || (x == CONST0_RTX (DFmode))
5034 || (x == CONST0_RTX (SFmode)))
5036 fputs ("%r0", file);
5037 return;
5039 else
5040 break;
5041 case 'f':
5042 /* A register or zero (floating point). */
5043 if (x == const0_rtx
5044 || (x == CONST0_RTX (DFmode))
5045 || (x == CONST0_RTX (SFmode)))
5047 fputs ("%fr0", file);
5048 return;
5050 else
5051 break;
5052 case 'A':
5054 rtx xoperands[2];
5056 xoperands[0] = XEXP (XEXP (x, 0), 0);
5057 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
5058 pa_output_global_address (file, xoperands[1], 0);
5059 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
5060 return;
5063 case 'C': /* Plain (C)ondition */
5064 case 'X':
5065 switch (GET_CODE (x))
5067 case EQ:
5068 fputs ("=", file); break;
5069 case NE:
5070 fputs ("<>", file); break;
5071 case GT:
5072 fputs (">", file); break;
5073 case GE:
5074 fputs (">=", file); break;
5075 case GEU:
5076 fputs (">>=", file); break;
5077 case GTU:
5078 fputs (">>", file); break;
5079 case LT:
5080 fputs ("<", file); break;
5081 case LE:
5082 fputs ("<=", file); break;
5083 case LEU:
5084 fputs ("<<=", file); break;
5085 case LTU:
5086 fputs ("<<", file); break;
5087 default:
5088 gcc_unreachable ();
5090 return;
5091 case 'N': /* Condition, (N)egated */
5092 switch (GET_CODE (x))
5094 case EQ:
5095 fputs ("<>", file); break;
5096 case NE:
5097 fputs ("=", file); break;
5098 case GT:
5099 fputs ("<=", file); break;
5100 case GE:
5101 fputs ("<", file); break;
5102 case GEU:
5103 fputs ("<<", file); break;
5104 case GTU:
5105 fputs ("<<=", file); break;
5106 case LT:
5107 fputs (">=", file); break;
5108 case LE:
5109 fputs (">", file); break;
5110 case LEU:
5111 fputs (">>", file); break;
5112 case LTU:
5113 fputs (">>=", file); break;
5114 default:
5115 gcc_unreachable ();
5117 return;
5118 /* For floating point comparisons. Note that the output
5119 predicates are the complement of the desired mode. The
5120 conditions for GT, GE, LT, LE and LTGT cause an invalid
5121 operation exception if the result is unordered and this
5122 exception is enabled in the floating-point status register. */
5123 case 'Y':
5124 switch (GET_CODE (x))
5126 case EQ:
5127 fputs ("!=", file); break;
5128 case NE:
5129 fputs ("=", file); break;
5130 case GT:
5131 fputs ("!>", file); break;
5132 case GE:
5133 fputs ("!>=", file); break;
5134 case LT:
5135 fputs ("!<", file); break;
5136 case LE:
5137 fputs ("!<=", file); break;
5138 case LTGT:
5139 fputs ("!<>", file); break;
5140 case UNLE:
5141 fputs ("!?<=", file); break;
5142 case UNLT:
5143 fputs ("!?<", file); break;
5144 case UNGE:
5145 fputs ("!?>=", file); break;
5146 case UNGT:
5147 fputs ("!?>", file); break;
5148 case UNEQ:
5149 fputs ("!?=", file); break;
5150 case UNORDERED:
5151 fputs ("!?", file); break;
5152 case ORDERED:
5153 fputs ("?", file); break;
5154 default:
5155 gcc_unreachable ();
5157 return;
5158 case 'S': /* Condition, operands are (S)wapped. */
5159 switch (GET_CODE (x))
5161 case EQ:
5162 fputs ("=", file); break;
5163 case NE:
5164 fputs ("<>", file); break;
5165 case GT:
5166 fputs ("<", file); break;
5167 case GE:
5168 fputs ("<=", file); break;
5169 case GEU:
5170 fputs ("<<=", file); break;
5171 case GTU:
5172 fputs ("<<", file); break;
5173 case LT:
5174 fputs (">", file); break;
5175 case LE:
5176 fputs (">=", file); break;
5177 case LEU:
5178 fputs (">>=", file); break;
5179 case LTU:
5180 fputs (">>", file); break;
5181 default:
5182 gcc_unreachable ();
5184 return;
5185 case 'B': /* Condition, (B)oth swapped and negate. */
5186 switch (GET_CODE (x))
5188 case EQ:
5189 fputs ("<>", file); break;
5190 case NE:
5191 fputs ("=", file); break;
5192 case GT:
5193 fputs (">=", file); break;
5194 case GE:
5195 fputs (">", file); break;
5196 case GEU:
5197 fputs (">>", file); break;
5198 case GTU:
5199 fputs (">>=", file); break;
5200 case LT:
5201 fputs ("<=", file); break;
5202 case LE:
5203 fputs ("<", file); break;
5204 case LEU:
5205 fputs ("<<", file); break;
5206 case LTU:
5207 fputs ("<<=", file); break;
5208 default:
5209 gcc_unreachable ();
5211 return;
5212 case 'k':
5213 gcc_assert (GET_CODE (x) == CONST_INT);
5214 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
5215 return;
5216 case 'Q':
5217 gcc_assert (GET_CODE (x) == CONST_INT);
5218 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5219 return;
5220 case 'L':
5221 gcc_assert (GET_CODE (x) == CONST_INT);
5222 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5223 return;
5224 case 'O':
5225 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
5226 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5227 return;
5228 case 'p':
5229 gcc_assert (GET_CODE (x) == CONST_INT);
5230 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5231 return;
5232 case 'P':
5233 gcc_assert (GET_CODE (x) == CONST_INT);
5234 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5235 return;
5236 case 'I':
5237 if (GET_CODE (x) == CONST_INT)
5238 fputs ("i", file);
5239 return;
5240 case 'M':
5241 case 'F':
5242 switch (GET_CODE (XEXP (x, 0)))
5244 case PRE_DEC:
5245 case PRE_INC:
5246 if (ASSEMBLER_DIALECT == 0)
5247 fputs ("s,mb", file);
5248 else
5249 fputs (",mb", file);
5250 break;
5251 case POST_DEC:
5252 case POST_INC:
5253 if (ASSEMBLER_DIALECT == 0)
5254 fputs ("s,ma", file);
5255 else
5256 fputs (",ma", file);
5257 break;
5258 case PLUS:
5259 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5260 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5262 if (ASSEMBLER_DIALECT == 0)
5263 fputs ("x", file);
5265 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5266 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5268 if (ASSEMBLER_DIALECT == 0)
5269 fputs ("x,s", file);
5270 else
5271 fputs (",s", file);
5273 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5274 fputs ("s", file);
5275 break;
5276 default:
5277 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5278 fputs ("s", file);
5279 break;
5281 return;
5282 case 'G':
5283 pa_output_global_address (file, x, 0);
5284 return;
5285 case 'H':
5286 pa_output_global_address (file, x, 1);
5287 return;
5288 case 0: /* Don't do anything special */
5289 break;
5290 case 'Z':
5292 unsigned op[3];
5293 compute_zdepwi_operands (INTVAL (x), op);
5294 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5295 return;
5297 case 'z':
5299 unsigned op[3];
5300 compute_zdepdi_operands (INTVAL (x), op);
5301 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5302 return;
5304 case 'c':
5305 /* We can get here from a .vtable_inherit due to our
5306 CONSTANT_ADDRESS_P rejecting perfectly good constant
5307 addresses. */
5308 break;
5309 default:
5310 gcc_unreachable ();
5312 if (GET_CODE (x) == REG)
5314 fputs (reg_names [REGNO (x)], file);
5315 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5317 fputs ("R", file);
5318 return;
5320 if (FP_REG_P (x)
5321 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5322 && (REGNO (x) & 1) == 0)
5323 fputs ("L", file);
5325 else if (GET_CODE (x) == MEM)
5327 int size = GET_MODE_SIZE (GET_MODE (x));
5328 rtx base = NULL_RTX;
5329 switch (GET_CODE (XEXP (x, 0)))
5331 case PRE_DEC:
5332 case POST_DEC:
5333 base = XEXP (XEXP (x, 0), 0);
5334 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5335 break;
5336 case PRE_INC:
5337 case POST_INC:
5338 base = XEXP (XEXP (x, 0), 0);
5339 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5340 break;
5341 case PLUS:
5342 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5343 fprintf (file, "%s(%s)",
5344 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5345 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5346 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5347 fprintf (file, "%s(%s)",
5348 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5349 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5350 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5351 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5353 /* Because the REG_POINTER flag can get lost during reload,
5354 pa_legitimate_address_p canonicalizes the order of the
5355 index and base registers in the combined move patterns. */
5356 rtx base = XEXP (XEXP (x, 0), 1);
5357 rtx index = XEXP (XEXP (x, 0), 0);
5359 fprintf (file, "%s(%s)",
5360 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5362 else
5363 output_address (XEXP (x, 0));
5364 break;
5365 default:
5366 output_address (XEXP (x, 0));
5367 break;
5370 else
5371 output_addr_const (file, x);
5374 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5376 void
5377 pa_output_global_address (FILE *file, rtx x, int round_constant)
5380 /* Imagine (high (const (plus ...))). */
5381 if (GET_CODE (x) == HIGH)
5382 x = XEXP (x, 0);
5384 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5385 output_addr_const (file, x);
5386 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5388 output_addr_const (file, x);
5389 fputs ("-$global$", file);
5391 else if (GET_CODE (x) == CONST)
5393 const char *sep = "";
5394 int offset = 0; /* assembler wants -$global$ at end */
5395 rtx base = NULL_RTX;
5397 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5399 case SYMBOL_REF:
5400 base = XEXP (XEXP (x, 0), 0);
5401 output_addr_const (file, base);
5402 break;
5403 case CONST_INT:
5404 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5405 break;
5406 default:
5407 gcc_unreachable ();
5410 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5412 case SYMBOL_REF:
5413 base = XEXP (XEXP (x, 0), 1);
5414 output_addr_const (file, base);
5415 break;
5416 case CONST_INT:
5417 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5418 break;
5419 default:
5420 gcc_unreachable ();
5423 /* How bogus. The compiler is apparently responsible for
5424 rounding the constant if it uses an LR field selector.
5426 The linker and/or assembler seem a better place since
5427 they have to do this kind of thing already.
5429 If we fail to do this, HP's optimizing linker may eliminate
5430 an addil, but not update the ldw/stw/ldo instruction that
5431 uses the result of the addil. */
5432 if (round_constant)
5433 offset = ((offset + 0x1000) & ~0x1fff);
5435 switch (GET_CODE (XEXP (x, 0)))
5437 case PLUS:
5438 if (offset < 0)
5440 offset = -offset;
5441 sep = "-";
5443 else
5444 sep = "+";
5445 break;
5447 case MINUS:
5448 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5449 sep = "-";
5450 break;
5452 default:
5453 gcc_unreachable ();
5456 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5457 fputs ("-$global$", file);
5458 if (offset)
5459 fprintf (file, "%s%d", sep, offset);
5461 else
5462 output_addr_const (file, x);
5465 /* Output boilerplate text to appear at the beginning of the file.
5466 There are several possible versions. */
5467 #define aputs(x) fputs(x, asm_out_file)
5468 static inline void
5469 pa_file_start_level (void)
5471 if (TARGET_64BIT)
5472 aputs ("\t.LEVEL 2.0w\n");
5473 else if (TARGET_PA_20)
5474 aputs ("\t.LEVEL 2.0\n");
5475 else if (TARGET_PA_11)
5476 aputs ("\t.LEVEL 1.1\n");
5477 else
5478 aputs ("\t.LEVEL 1.0\n");
5481 static inline void
5482 pa_file_start_space (int sortspace)
5484 aputs ("\t.SPACE $PRIVATE$");
5485 if (sortspace)
5486 aputs (",SORT=16");
5487 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31");
5488 if (flag_tm)
5489 aputs ("\n\t.SUBSPA $TM_CLONE_TABLE$,QUAD=1,ALIGN=8,ACCESS=31");
5490 aputs ("\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5491 "\n\t.SPACE $TEXT$");
5492 if (sortspace)
5493 aputs (",SORT=8");
5494 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5495 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5498 static inline void
5499 pa_file_start_file (int want_version)
5501 if (write_symbols != NO_DEBUG)
5503 output_file_directive (asm_out_file, main_input_filename);
5504 if (want_version)
5505 aputs ("\t.version\t\"01.01\"\n");
5509 static inline void
5510 pa_file_start_mcount (const char *aswhat)
5512 if (profile_flag)
5513 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5516 static void
5517 pa_elf_file_start (void)
5519 pa_file_start_level ();
5520 pa_file_start_mcount ("ENTRY");
5521 pa_file_start_file (0);
5524 static void
5525 pa_som_file_start (void)
5527 pa_file_start_level ();
5528 pa_file_start_space (0);
5529 aputs ("\t.IMPORT $global$,DATA\n"
5530 "\t.IMPORT $$dyncall,MILLICODE\n");
5531 pa_file_start_mcount ("CODE");
5532 pa_file_start_file (0);
5535 static void
5536 pa_linux_file_start (void)
5538 pa_file_start_file (1);
5539 pa_file_start_level ();
5540 pa_file_start_mcount ("CODE");
5543 static void
5544 pa_hpux64_gas_file_start (void)
5546 pa_file_start_level ();
5547 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5548 if (profile_flag)
5549 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5550 #endif
5551 pa_file_start_file (1);
5554 static void
5555 pa_hpux64_hpas_file_start (void)
5557 pa_file_start_level ();
5558 pa_file_start_space (1);
5559 pa_file_start_mcount ("CODE");
5560 pa_file_start_file (0);
5562 #undef aputs
5564 /* Search the deferred plabel list for SYMBOL and return its internal
5565 label. If an entry for SYMBOL is not found, a new entry is created. */
5568 pa_get_deferred_plabel (rtx symbol)
5570 const char *fname = XSTR (symbol, 0);
5571 size_t i;
5573 /* See if we have already put this function on the list of deferred
5574 plabels. This list is generally small, so a liner search is not
5575 too ugly. If it proves too slow replace it with something faster. */
5576 for (i = 0; i < n_deferred_plabels; i++)
5577 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5578 break;
5580 /* If the deferred plabel list is empty, or this entry was not found
5581 on the list, create a new entry on the list. */
5582 if (deferred_plabels == NULL || i == n_deferred_plabels)
5584 tree id;
5586 if (deferred_plabels == 0)
5587 deferred_plabels = ggc_alloc<deferred_plabel> ();
5588 else
5589 deferred_plabels = GGC_RESIZEVEC (struct deferred_plabel,
5590 deferred_plabels,
5591 n_deferred_plabels + 1);
5593 i = n_deferred_plabels++;
5594 deferred_plabels[i].internal_label = gen_label_rtx ();
5595 deferred_plabels[i].symbol = symbol;
5597 /* Gross. We have just implicitly taken the address of this
5598 function. Mark it in the same manner as assemble_name. */
5599 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5600 if (id)
5601 mark_referenced (id);
5604 return deferred_plabels[i].internal_label;
5607 static void
5608 output_deferred_plabels (void)
5610 size_t i;
5612 /* If we have some deferred plabels, then we need to switch into the
5613 data or readonly data section, and align it to a 4 byte boundary
5614 before outputting the deferred plabels. */
5615 if (n_deferred_plabels)
5617 switch_to_section (flag_pic ? data_section : readonly_data_section);
5618 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5621 /* Now output the deferred plabels. */
5622 for (i = 0; i < n_deferred_plabels; i++)
5624 targetm.asm_out.internal_label (asm_out_file, "L",
5625 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5626 assemble_integer (deferred_plabels[i].symbol,
5627 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5631 /* Initialize optabs to point to emulation routines. */
5633 static void
5634 pa_init_libfuncs (void)
5636 if (HPUX_LONG_DOUBLE_LIBRARY)
5638 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5639 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5640 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5641 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5642 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5643 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5644 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5645 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5646 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5648 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5649 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5650 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5651 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5652 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5653 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5654 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5656 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5657 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5658 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5659 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5661 set_conv_libfunc (sfix_optab, SImode, TFmode,
5662 TARGET_64BIT ? "__U_Qfcnvfxt_quad_to_sgl"
5663 : "_U_Qfcnvfxt_quad_to_sgl");
5664 set_conv_libfunc (sfix_optab, DImode, TFmode,
5665 "_U_Qfcnvfxt_quad_to_dbl");
5666 set_conv_libfunc (ufix_optab, SImode, TFmode,
5667 "_U_Qfcnvfxt_quad_to_usgl");
5668 set_conv_libfunc (ufix_optab, DImode, TFmode,
5669 "_U_Qfcnvfxt_quad_to_udbl");
5671 set_conv_libfunc (sfloat_optab, TFmode, SImode,
5672 "_U_Qfcnvxf_sgl_to_quad");
5673 set_conv_libfunc (sfloat_optab, TFmode, DImode,
5674 "_U_Qfcnvxf_dbl_to_quad");
5675 set_conv_libfunc (ufloat_optab, TFmode, SImode,
5676 "_U_Qfcnvxf_usgl_to_quad");
5677 set_conv_libfunc (ufloat_optab, TFmode, DImode,
5678 "_U_Qfcnvxf_udbl_to_quad");
5681 if (TARGET_SYNC_LIBCALL)
5682 init_sync_libfuncs (UNITS_PER_WORD);
5685 /* HP's millicode routines mean something special to the assembler.
5686 Keep track of which ones we have used. */
5688 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5689 static void import_milli (enum millicodes);
5690 static char imported[(int) end1000];
5691 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5692 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5693 #define MILLI_START 10
5695 static void
5696 import_milli (enum millicodes code)
5698 char str[sizeof (import_string)];
5700 if (!imported[(int) code])
5702 imported[(int) code] = 1;
5703 strcpy (str, import_string);
5704 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5705 output_asm_insn (str, 0);
5709 /* The register constraints have put the operands and return value in
5710 the proper registers. */
5712 const char *
5713 pa_output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx insn)
5715 import_milli (mulI);
5716 return pa_output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5719 /* Emit the rtl for doing a division by a constant. */
5721 /* Do magic division millicodes exist for this value? */
5722 const int pa_magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5724 /* We'll use an array to keep track of the magic millicodes and
5725 whether or not we've used them already. [n][0] is signed, [n][1] is
5726 unsigned. */
5728 static int div_milli[16][2];
5731 pa_emit_hpdiv_const (rtx *operands, int unsignedp)
5733 if (GET_CODE (operands[2]) == CONST_INT
5734 && INTVAL (operands[2]) > 0
5735 && INTVAL (operands[2]) < 16
5736 && pa_magic_milli[INTVAL (operands[2])])
5738 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5740 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5741 emit
5742 (gen_rtx_PARALLEL
5743 (VOIDmode,
5744 gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29),
5745 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5746 SImode,
5747 gen_rtx_REG (SImode, 26),
5748 operands[2])),
5749 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5750 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5751 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5752 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5753 gen_rtx_CLOBBER (VOIDmode, ret))));
5754 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5755 return 1;
5757 return 0;
5760 const char *
5761 pa_output_div_insn (rtx *operands, int unsignedp, rtx insn)
5763 int divisor;
5765 /* If the divisor is a constant, try to use one of the special
5766 opcodes .*/
5767 if (GET_CODE (operands[0]) == CONST_INT)
5769 static char buf[100];
5770 divisor = INTVAL (operands[0]);
5771 if (!div_milli[divisor][unsignedp])
5773 div_milli[divisor][unsignedp] = 1;
5774 if (unsignedp)
5775 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5776 else
5777 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5779 if (unsignedp)
5781 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5782 INTVAL (operands[0]));
5783 return pa_output_millicode_call (insn,
5784 gen_rtx_SYMBOL_REF (SImode, buf));
5786 else
5788 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5789 INTVAL (operands[0]));
5790 return pa_output_millicode_call (insn,
5791 gen_rtx_SYMBOL_REF (SImode, buf));
5794 /* Divisor isn't a special constant. */
5795 else
5797 if (unsignedp)
5799 import_milli (divU);
5800 return pa_output_millicode_call (insn,
5801 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5803 else
5805 import_milli (divI);
5806 return pa_output_millicode_call (insn,
5807 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5812 /* Output a $$rem millicode to do mod. */
5814 const char *
5815 pa_output_mod_insn (int unsignedp, rtx insn)
5817 if (unsignedp)
5819 import_milli (remU);
5820 return pa_output_millicode_call (insn,
5821 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5823 else
5825 import_milli (remI);
5826 return pa_output_millicode_call (insn,
5827 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5831 void
5832 pa_output_arg_descriptor (rtx call_insn)
5834 const char *arg_regs[4];
5835 enum machine_mode arg_mode;
5836 rtx link;
5837 int i, output_flag = 0;
5838 int regno;
5840 /* We neither need nor want argument location descriptors for the
5841 64bit runtime environment or the ELF32 environment. */
5842 if (TARGET_64BIT || TARGET_ELF32)
5843 return;
5845 for (i = 0; i < 4; i++)
5846 arg_regs[i] = 0;
5848 /* Specify explicitly that no argument relocations should take place
5849 if using the portable runtime calling conventions. */
5850 if (TARGET_PORTABLE_RUNTIME)
5852 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5853 asm_out_file);
5854 return;
5857 gcc_assert (CALL_P (call_insn));
5858 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5859 link; link = XEXP (link, 1))
5861 rtx use = XEXP (link, 0);
5863 if (! (GET_CODE (use) == USE
5864 && GET_CODE (XEXP (use, 0)) == REG
5865 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5866 continue;
5868 arg_mode = GET_MODE (XEXP (use, 0));
5869 regno = REGNO (XEXP (use, 0));
5870 if (regno >= 23 && regno <= 26)
5872 arg_regs[26 - regno] = "GR";
5873 if (arg_mode == DImode)
5874 arg_regs[25 - regno] = "GR";
5876 else if (regno >= 32 && regno <= 39)
5878 if (arg_mode == SFmode)
5879 arg_regs[(regno - 32) / 2] = "FR";
5880 else
5882 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5883 arg_regs[(regno - 34) / 2] = "FR";
5884 arg_regs[(regno - 34) / 2 + 1] = "FU";
5885 #else
5886 arg_regs[(regno - 34) / 2] = "FU";
5887 arg_regs[(regno - 34) / 2 + 1] = "FR";
5888 #endif
5892 fputs ("\t.CALL ", asm_out_file);
5893 for (i = 0; i < 4; i++)
5895 if (arg_regs[i])
5897 if (output_flag++)
5898 fputc (',', asm_out_file);
5899 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5902 fputc ('\n', asm_out_file);
5905 /* Inform reload about cases where moving X with a mode MODE to or from
5906 a register in RCLASS requires an extra scratch or immediate register.
5907 Return the class needed for the immediate register. */
5909 static reg_class_t
5910 pa_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
5911 enum machine_mode mode, secondary_reload_info *sri)
5913 int regno;
5914 enum reg_class rclass = (enum reg_class) rclass_i;
5916 /* Handle the easy stuff first. */
5917 if (rclass == R1_REGS)
5918 return NO_REGS;
5920 if (REG_P (x))
5922 regno = REGNO (x);
5923 if (rclass == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
5924 return NO_REGS;
5926 else
5927 regno = -1;
5929 /* If we have something like (mem (mem (...)), we can safely assume the
5930 inner MEM will end up in a general register after reloading, so there's
5931 no need for a secondary reload. */
5932 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
5933 return NO_REGS;
5935 /* Trying to load a constant into a FP register during PIC code
5936 generation requires %r1 as a scratch register. For float modes,
5937 the only legitimate constant is CONST0_RTX. However, there are
5938 a few patterns that accept constant double operands. */
5939 if (flag_pic
5940 && FP_REG_CLASS_P (rclass)
5941 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
5943 switch (mode)
5945 case SImode:
5946 sri->icode = CODE_FOR_reload_insi_r1;
5947 break;
5949 case DImode:
5950 sri->icode = CODE_FOR_reload_indi_r1;
5951 break;
5953 case SFmode:
5954 sri->icode = CODE_FOR_reload_insf_r1;
5955 break;
5957 case DFmode:
5958 sri->icode = CODE_FOR_reload_indf_r1;
5959 break;
5961 default:
5962 gcc_unreachable ();
5964 return NO_REGS;
5967 /* Secondary reloads of symbolic expressions require %r1 as a scratch
5968 register when we're generating PIC code or when the operand isn't
5969 readonly. */
5970 if (pa_symbolic_expression_p (x))
5972 if (GET_CODE (x) == HIGH)
5973 x = XEXP (x, 0);
5975 if (flag_pic || !read_only_operand (x, VOIDmode))
5977 switch (mode)
5979 case SImode:
5980 sri->icode = CODE_FOR_reload_insi_r1;
5981 break;
5983 case DImode:
5984 sri->icode = CODE_FOR_reload_indi_r1;
5985 break;
5987 default:
5988 gcc_unreachable ();
5990 return NO_REGS;
5994 /* Profiling showed the PA port spends about 1.3% of its compilation
5995 time in true_regnum from calls inside pa_secondary_reload_class. */
5996 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
5997 regno = true_regnum (x);
5999 /* Handle reloads for floating point loads and stores. */
6000 if ((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
6001 && FP_REG_CLASS_P (rclass))
6003 if (MEM_P (x))
6005 x = XEXP (x, 0);
6007 /* We don't need an intermediate for indexed and LO_SUM DLT
6008 memory addresses. When INT14_OK_STRICT is true, it might
6009 appear that we could directly allow register indirect
6010 memory addresses. However, this doesn't work because we
6011 don't support SUBREGs in floating-point register copies
6012 and reload doesn't tell us when it's going to use a SUBREG. */
6013 if (IS_INDEX_ADDR_P (x)
6014 || IS_LO_SUM_DLT_ADDR_P (x))
6015 return NO_REGS;
6017 /* Request intermediate general register. */
6018 return GENERAL_REGS;
6021 /* Request a secondary reload with a general scratch register
6022 for everything else. ??? Could symbolic operands be handled
6023 directly when generating non-pic PA 2.0 code? */
6024 sri->icode = (in_p
6025 ? direct_optab_handler (reload_in_optab, mode)
6026 : direct_optab_handler (reload_out_optab, mode));
6027 return NO_REGS;
6030 /* A SAR<->FP register copy requires an intermediate general register
6031 and secondary memory. We need a secondary reload with a general
6032 scratch register for spills. */
6033 if (rclass == SHIFT_REGS)
6035 /* Handle spill. */
6036 if (regno >= FIRST_PSEUDO_REGISTER || regno < 0)
6038 sri->icode = (in_p
6039 ? direct_optab_handler (reload_in_optab, mode)
6040 : direct_optab_handler (reload_out_optab, mode));
6041 return NO_REGS;
6044 /* Handle FP copy. */
6045 if (FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))
6046 return GENERAL_REGS;
6049 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
6050 && REGNO_REG_CLASS (regno) == SHIFT_REGS
6051 && FP_REG_CLASS_P (rclass))
6052 return GENERAL_REGS;
6054 return NO_REGS;
6057 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
6058 is only marked as live on entry by df-scan when it is a fixed
6059 register. It isn't a fixed register in the 64-bit runtime,
6060 so we need to mark it here. */
6062 static void
6063 pa_extra_live_on_entry (bitmap regs)
6065 if (TARGET_64BIT)
6066 bitmap_set_bit (regs, ARG_POINTER_REGNUM);
6069 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
6070 to prevent it from being deleted. */
6073 pa_eh_return_handler_rtx (void)
6075 rtx tmp;
6077 tmp = gen_rtx_PLUS (word_mode, hard_frame_pointer_rtx,
6078 TARGET_64BIT ? GEN_INT (-16) : GEN_INT (-20));
6079 tmp = gen_rtx_MEM (word_mode, tmp);
6080 tmp->volatil = 1;
6081 return tmp;
6084 /* In the 32-bit runtime, arguments larger than eight bytes are passed
6085 by invisible reference. As a GCC extension, we also pass anything
6086 with a zero or variable size by reference.
6088 The 64-bit runtime does not describe passing any types by invisible
6089 reference. The internals of GCC can't currently handle passing
6090 empty structures, and zero or variable length arrays when they are
6091 not passed entirely on the stack or by reference. Thus, as a GCC
6092 extension, we pass these types by reference. The HP compiler doesn't
6093 support these types, so hopefully there shouldn't be any compatibility
6094 issues. This may have to be revisited when HP releases a C99 compiler
6095 or updates the ABI. */
6097 static bool
6098 pa_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
6099 enum machine_mode mode, const_tree type,
6100 bool named ATTRIBUTE_UNUSED)
6102 HOST_WIDE_INT size;
6104 if (type)
6105 size = int_size_in_bytes (type);
6106 else
6107 size = GET_MODE_SIZE (mode);
6109 if (TARGET_64BIT)
6110 return size <= 0;
6111 else
6112 return size <= 0 || size > 8;
6115 enum direction
6116 pa_function_arg_padding (enum machine_mode mode, const_tree type)
6118 if (mode == BLKmode
6119 || (TARGET_64BIT
6120 && type
6121 && (AGGREGATE_TYPE_P (type)
6122 || TREE_CODE (type) == COMPLEX_TYPE
6123 || TREE_CODE (type) == VECTOR_TYPE)))
6125 /* Return none if justification is not required. */
6126 if (type
6127 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
6128 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
6129 return none;
6131 /* The directions set here are ignored when a BLKmode argument larger
6132 than a word is placed in a register. Different code is used for
6133 the stack and registers. This makes it difficult to have a
6134 consistent data representation for both the stack and registers.
6135 For both runtimes, the justification and padding for arguments on
6136 the stack and in registers should be identical. */
6137 if (TARGET_64BIT)
6138 /* The 64-bit runtime specifies left justification for aggregates. */
6139 return upward;
6140 else
6141 /* The 32-bit runtime architecture specifies right justification.
6142 When the argument is passed on the stack, the argument is padded
6143 with garbage on the left. The HP compiler pads with zeros. */
6144 return downward;
6147 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
6148 return downward;
6149 else
6150 return none;
6154 /* Do what is necessary for `va_start'. We look at the current function
6155 to determine if stdargs or varargs is used and fill in an initial
6156 va_list. A pointer to this constructor is returned. */
6158 static rtx
6159 hppa_builtin_saveregs (void)
6161 rtx offset, dest;
6162 tree fntype = TREE_TYPE (current_function_decl);
6163 int argadj = ((!stdarg_p (fntype))
6164 ? UNITS_PER_WORD : 0);
6166 if (argadj)
6167 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
6168 else
6169 offset = crtl->args.arg_offset_rtx;
6171 if (TARGET_64BIT)
6173 int i, off;
6175 /* Adjust for varargs/stdarg differences. */
6176 if (argadj)
6177 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, -argadj);
6178 else
6179 offset = crtl->args.arg_offset_rtx;
6181 /* We need to save %r26 .. %r19 inclusive starting at offset -64
6182 from the incoming arg pointer and growing to larger addresses. */
6183 for (i = 26, off = -64; i >= 19; i--, off += 8)
6184 emit_move_insn (gen_rtx_MEM (word_mode,
6185 plus_constant (Pmode,
6186 arg_pointer_rtx, off)),
6187 gen_rtx_REG (word_mode, i));
6189 /* The incoming args pointer points just beyond the flushback area;
6190 normally this is not a serious concern. However, when we are doing
6191 varargs/stdargs we want to make the arg pointer point to the start
6192 of the incoming argument area. */
6193 emit_move_insn (virtual_incoming_args_rtx,
6194 plus_constant (Pmode, arg_pointer_rtx, -64));
6196 /* Now return a pointer to the first anonymous argument. */
6197 return copy_to_reg (expand_binop (Pmode, add_optab,
6198 virtual_incoming_args_rtx,
6199 offset, 0, 0, OPTAB_LIB_WIDEN));
6202 /* Store general registers on the stack. */
6203 dest = gen_rtx_MEM (BLKmode,
6204 plus_constant (Pmode, crtl->args.internal_arg_pointer,
6205 -16));
6206 set_mem_alias_set (dest, get_varargs_alias_set ());
6207 set_mem_align (dest, BITS_PER_WORD);
6208 move_block_from_reg (23, dest, 4);
6210 /* move_block_from_reg will emit code to store the argument registers
6211 individually as scalar stores.
6213 However, other insns may later load from the same addresses for
6214 a structure load (passing a struct to a varargs routine).
6216 The alias code assumes that such aliasing can never happen, so we
6217 have to keep memory referencing insns from moving up beyond the
6218 last argument register store. So we emit a blockage insn here. */
6219 emit_insn (gen_blockage ());
6221 return copy_to_reg (expand_binop (Pmode, add_optab,
6222 crtl->args.internal_arg_pointer,
6223 offset, 0, 0, OPTAB_LIB_WIDEN));
6226 static void
6227 hppa_va_start (tree valist, rtx nextarg)
6229 nextarg = expand_builtin_saveregs ();
6230 std_expand_builtin_va_start (valist, nextarg);
6233 static tree
6234 hppa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
6235 gimple_seq *post_p)
6237 if (TARGET_64BIT)
6239 /* Args grow upward. We can use the generic routines. */
6240 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6242 else /* !TARGET_64BIT */
6244 tree ptr = build_pointer_type (type);
6245 tree valist_type;
6246 tree t, u;
6247 unsigned int size, ofs;
6248 bool indirect;
6250 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6251 if (indirect)
6253 type = ptr;
6254 ptr = build_pointer_type (type);
6256 size = int_size_in_bytes (type);
6257 valist_type = TREE_TYPE (valist);
6259 /* Args grow down. Not handled by generic routines. */
6261 u = fold_convert (sizetype, size_in_bytes (type));
6262 u = fold_build1 (NEGATE_EXPR, sizetype, u);
6263 t = fold_build_pointer_plus (valist, u);
6265 /* Align to 4 or 8 byte boundary depending on argument size. */
6267 u = build_int_cst (TREE_TYPE (t), (HOST_WIDE_INT)(size > 4 ? -8 : -4));
6268 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, u);
6269 t = fold_convert (valist_type, t);
6271 t = build2 (MODIFY_EXPR, valist_type, valist, t);
6273 ofs = (8 - size) % 4;
6274 if (ofs != 0)
6275 t = fold_build_pointer_plus_hwi (t, ofs);
6277 t = fold_convert (ptr, t);
6278 t = build_va_arg_indirect_ref (t);
6280 if (indirect)
6281 t = build_va_arg_indirect_ref (t);
6283 return t;
6287 /* True if MODE is valid for the target. By "valid", we mean able to
6288 be manipulated in non-trivial ways. In particular, this means all
6289 the arithmetic is supported.
6291 Currently, TImode is not valid as the HP 64-bit runtime documentation
6292 doesn't document the alignment and calling conventions for this type.
6293 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6294 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6296 static bool
6297 pa_scalar_mode_supported_p (enum machine_mode mode)
6299 int precision = GET_MODE_PRECISION (mode);
6301 switch (GET_MODE_CLASS (mode))
6303 case MODE_PARTIAL_INT:
6304 case MODE_INT:
6305 if (precision == CHAR_TYPE_SIZE)
6306 return true;
6307 if (precision == SHORT_TYPE_SIZE)
6308 return true;
6309 if (precision == INT_TYPE_SIZE)
6310 return true;
6311 if (precision == LONG_TYPE_SIZE)
6312 return true;
6313 if (precision == LONG_LONG_TYPE_SIZE)
6314 return true;
6315 return false;
6317 case MODE_FLOAT:
6318 if (precision == FLOAT_TYPE_SIZE)
6319 return true;
6320 if (precision == DOUBLE_TYPE_SIZE)
6321 return true;
6322 if (precision == LONG_DOUBLE_TYPE_SIZE)
6323 return true;
6324 return false;
6326 case MODE_DECIMAL_FLOAT:
6327 return false;
6329 default:
6330 gcc_unreachable ();
6334 /* Return TRUE if INSN, a jump insn, has an unfilled delay slot and
6335 it branches into the delay slot. Otherwise, return FALSE. */
6337 static bool
6338 branch_to_delay_slot_p (rtx insn)
6340 rtx jump_insn;
6342 if (dbr_sequence_length ())
6343 return FALSE;
6345 jump_insn = next_active_insn (JUMP_LABEL (insn));
6346 while (insn)
6348 insn = next_active_insn (insn);
6349 if (jump_insn == insn)
6350 return TRUE;
6352 /* We can't rely on the length of asms. So, we return FALSE when
6353 the branch is followed by an asm. */
6354 if (!insn
6355 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6356 || extract_asm_operands (PATTERN (insn)) != NULL_RTX
6357 || get_attr_length (insn) > 0)
6358 break;
6361 return FALSE;
6364 /* Return TRUE if INSN, a forward jump insn, needs a nop in its delay slot.
6366 This occurs when INSN has an unfilled delay slot and is followed
6367 by an asm. Disaster can occur if the asm is empty and the jump
6368 branches into the delay slot. So, we add a nop in the delay slot
6369 when this occurs. */
6371 static bool
6372 branch_needs_nop_p (rtx insn)
6374 rtx jump_insn;
6376 if (dbr_sequence_length ())
6377 return FALSE;
6379 jump_insn = next_active_insn (JUMP_LABEL (insn));
6380 while (insn)
6382 insn = next_active_insn (insn);
6383 if (!insn || jump_insn == insn)
6384 return TRUE;
6386 if (!(GET_CODE (PATTERN (insn)) == ASM_INPUT
6387 || extract_asm_operands (PATTERN (insn)) != NULL_RTX)
6388 && get_attr_length (insn) > 0)
6389 break;
6392 return FALSE;
6395 /* Return TRUE if INSN, a forward jump insn, can use nullification
6396 to skip the following instruction. This avoids an extra cycle due
6397 to a mis-predicted branch when we fall through. */
6399 static bool
6400 use_skip_p (rtx insn)
6402 rtx jump_insn = next_active_insn (JUMP_LABEL (insn));
6404 while (insn)
6406 insn = next_active_insn (insn);
6408 /* We can't rely on the length of asms, so we can't skip asms. */
6409 if (!insn
6410 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6411 || extract_asm_operands (PATTERN (insn)) != NULL_RTX)
6412 break;
6413 if (get_attr_length (insn) == 4
6414 && jump_insn == next_active_insn (insn))
6415 return TRUE;
6416 if (get_attr_length (insn) > 0)
6417 break;
6420 return FALSE;
6423 /* This routine handles all the normal conditional branch sequences we
6424 might need to generate. It handles compare immediate vs compare
6425 register, nullification of delay slots, varying length branches,
6426 negated branches, and all combinations of the above. It returns the
6427 output appropriate to emit the branch corresponding to all given
6428 parameters. */
6430 const char *
6431 pa_output_cbranch (rtx *operands, int negated, rtx insn)
6433 static char buf[100];
6434 bool useskip;
6435 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6436 int length = get_attr_length (insn);
6437 int xdelay;
6439 /* A conditional branch to the following instruction (e.g. the delay slot)
6440 is asking for a disaster. This can happen when not optimizing and
6441 when jump optimization fails.
6443 While it is usually safe to emit nothing, this can fail if the
6444 preceding instruction is a nullified branch with an empty delay
6445 slot and the same branch target as this branch. We could check
6446 for this but jump optimization should eliminate nop jumps. It
6447 is always safe to emit a nop. */
6448 if (branch_to_delay_slot_p (insn))
6449 return "nop";
6451 /* The doubleword form of the cmpib instruction doesn't have the LEU
6452 and GTU conditions while the cmpb instruction does. Since we accept
6453 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6454 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6455 operands[2] = gen_rtx_REG (DImode, 0);
6456 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
6457 operands[1] = gen_rtx_REG (DImode, 0);
6459 /* If this is a long branch with its delay slot unfilled, set `nullify'
6460 as it can nullify the delay slot and save a nop. */
6461 if (length == 8 && dbr_sequence_length () == 0)
6462 nullify = 1;
6464 /* If this is a short forward conditional branch which did not get
6465 its delay slot filled, the delay slot can still be nullified. */
6466 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6467 nullify = forward_branch_p (insn);
6469 /* A forward branch over a single nullified insn can be done with a
6470 comclr instruction. This avoids a single cycle penalty due to
6471 mis-predicted branch if we fall through (branch not taken). */
6472 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6474 switch (length)
6476 /* All short conditional branches except backwards with an unfilled
6477 delay slot. */
6478 case 4:
6479 if (useskip)
6480 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6481 else
6482 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6483 if (GET_MODE (operands[1]) == DImode)
6484 strcat (buf, "*");
6485 if (negated)
6486 strcat (buf, "%B3");
6487 else
6488 strcat (buf, "%S3");
6489 if (useskip)
6490 strcat (buf, " %2,%r1,%%r0");
6491 else if (nullify)
6493 if (branch_needs_nop_p (insn))
6494 strcat (buf, ",n %2,%r1,%0%#");
6495 else
6496 strcat (buf, ",n %2,%r1,%0");
6498 else
6499 strcat (buf, " %2,%r1,%0");
6500 break;
6502 /* All long conditionals. Note a short backward branch with an
6503 unfilled delay slot is treated just like a long backward branch
6504 with an unfilled delay slot. */
6505 case 8:
6506 /* Handle weird backwards branch with a filled delay slot
6507 which is nullified. */
6508 if (dbr_sequence_length () != 0
6509 && ! forward_branch_p (insn)
6510 && nullify)
6512 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6513 if (GET_MODE (operands[1]) == DImode)
6514 strcat (buf, "*");
6515 if (negated)
6516 strcat (buf, "%S3");
6517 else
6518 strcat (buf, "%B3");
6519 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6521 /* Handle short backwards branch with an unfilled delay slot.
6522 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6523 taken and untaken branches. */
6524 else if (dbr_sequence_length () == 0
6525 && ! forward_branch_p (insn)
6526 && INSN_ADDRESSES_SET_P ()
6527 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6528 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6530 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6531 if (GET_MODE (operands[1]) == DImode)
6532 strcat (buf, "*");
6533 if (negated)
6534 strcat (buf, "%B3 %2,%r1,%0%#");
6535 else
6536 strcat (buf, "%S3 %2,%r1,%0%#");
6538 else
6540 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6541 if (GET_MODE (operands[1]) == DImode)
6542 strcat (buf, "*");
6543 if (negated)
6544 strcat (buf, "%S3");
6545 else
6546 strcat (buf, "%B3");
6547 if (nullify)
6548 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6549 else
6550 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6552 break;
6554 default:
6555 /* The reversed conditional branch must branch over one additional
6556 instruction if the delay slot is filled and needs to be extracted
6557 by pa_output_lbranch. If the delay slot is empty or this is a
6558 nullified forward branch, the instruction after the reversed
6559 condition branch must be nullified. */
6560 if (dbr_sequence_length () == 0
6561 || (nullify && forward_branch_p (insn)))
6563 nullify = 1;
6564 xdelay = 0;
6565 operands[4] = GEN_INT (length);
6567 else
6569 xdelay = 1;
6570 operands[4] = GEN_INT (length + 4);
6573 /* Create a reversed conditional branch which branches around
6574 the following insns. */
6575 if (GET_MODE (operands[1]) != DImode)
6577 if (nullify)
6579 if (negated)
6580 strcpy (buf,
6581 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6582 else
6583 strcpy (buf,
6584 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6586 else
6588 if (negated)
6589 strcpy (buf,
6590 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6591 else
6592 strcpy (buf,
6593 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6596 else
6598 if (nullify)
6600 if (negated)
6601 strcpy (buf,
6602 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6603 else
6604 strcpy (buf,
6605 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6607 else
6609 if (negated)
6610 strcpy (buf,
6611 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6612 else
6613 strcpy (buf,
6614 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6618 output_asm_insn (buf, operands);
6619 return pa_output_lbranch (operands[0], insn, xdelay);
6621 return buf;
6624 /* This routine handles output of long unconditional branches that
6625 exceed the maximum range of a simple branch instruction. Since
6626 we don't have a register available for the branch, we save register
6627 %r1 in the frame marker, load the branch destination DEST into %r1,
6628 execute the branch, and restore %r1 in the delay slot of the branch.
6630 Since long branches may have an insn in the delay slot and the
6631 delay slot is used to restore %r1, we in general need to extract
6632 this insn and execute it before the branch. However, to facilitate
6633 use of this function by conditional branches, we also provide an
6634 option to not extract the delay insn so that it will be emitted
6635 after the long branch. So, if there is an insn in the delay slot,
6636 it is extracted if XDELAY is nonzero.
6638 The lengths of the various long-branch sequences are 20, 16 and 24
6639 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6641 const char *
6642 pa_output_lbranch (rtx dest, rtx insn, int xdelay)
6644 rtx xoperands[2];
6646 xoperands[0] = dest;
6648 /* First, free up the delay slot. */
6649 if (xdelay && dbr_sequence_length () != 0)
6651 /* We can't handle a jump in the delay slot. */
6652 gcc_assert (! JUMP_P (NEXT_INSN (insn)));
6654 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6655 optimize, 0, NULL);
6657 /* Now delete the delay insn. */
6658 SET_INSN_DELETED (NEXT_INSN (insn));
6661 /* Output an insn to save %r1. The runtime documentation doesn't
6662 specify whether the "Clean Up" slot in the callers frame can
6663 be clobbered by the callee. It isn't copied by HP's builtin
6664 alloca, so this suggests that it can be clobbered if necessary.
6665 The "Static Link" location is copied by HP builtin alloca, so
6666 we avoid using it. Using the cleanup slot might be a problem
6667 if we have to interoperate with languages that pass cleanup
6668 information. However, it should be possible to handle these
6669 situations with GCC's asm feature.
6671 The "Current RP" slot is reserved for the called procedure, so
6672 we try to use it when we don't have a frame of our own. It's
6673 rather unlikely that we won't have a frame when we need to emit
6674 a very long branch.
6676 Really the way to go long term is a register scavenger; goto
6677 the target of the jump and find a register which we can use
6678 as a scratch to hold the value in %r1. Then, we wouldn't have
6679 to free up the delay slot or clobber a slot that may be needed
6680 for other purposes. */
6681 if (TARGET_64BIT)
6683 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6684 /* Use the return pointer slot in the frame marker. */
6685 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6686 else
6687 /* Use the slot at -40 in the frame marker since HP builtin
6688 alloca doesn't copy it. */
6689 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6691 else
6693 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6694 /* Use the return pointer slot in the frame marker. */
6695 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6696 else
6697 /* Use the "Clean Up" slot in the frame marker. In GCC,
6698 the only other use of this location is for copying a
6699 floating point double argument from a floating-point
6700 register to two general registers. The copy is done
6701 as an "atomic" operation when outputting a call, so it
6702 won't interfere with our using the location here. */
6703 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6706 if (TARGET_PORTABLE_RUNTIME)
6708 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6709 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6710 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6712 else if (flag_pic)
6714 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6715 if (TARGET_SOM || !TARGET_GAS)
6717 xoperands[1] = gen_label_rtx ();
6718 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6719 targetm.asm_out.internal_label (asm_out_file, "L",
6720 CODE_LABEL_NUMBER (xoperands[1]));
6721 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6723 else
6725 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6726 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6728 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6730 else
6731 /* Now output a very long branch to the original target. */
6732 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6734 /* Now restore the value of %r1 in the delay slot. */
6735 if (TARGET_64BIT)
6737 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6738 return "ldd -16(%%r30),%%r1";
6739 else
6740 return "ldd -40(%%r30),%%r1";
6742 else
6744 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6745 return "ldw -20(%%r30),%%r1";
6746 else
6747 return "ldw -12(%%r30),%%r1";
6751 /* This routine handles all the branch-on-bit conditional branch sequences we
6752 might need to generate. It handles nullification of delay slots,
6753 varying length branches, negated branches and all combinations of the
6754 above. it returns the appropriate output template to emit the branch. */
6756 const char *
6757 pa_output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
6759 static char buf[100];
6760 bool useskip;
6761 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6762 int length = get_attr_length (insn);
6763 int xdelay;
6765 /* A conditional branch to the following instruction (e.g. the delay slot) is
6766 asking for a disaster. I do not think this can happen as this pattern
6767 is only used when optimizing; jump optimization should eliminate the
6768 jump. But be prepared just in case. */
6770 if (branch_to_delay_slot_p (insn))
6771 return "nop";
6773 /* If this is a long branch with its delay slot unfilled, set `nullify'
6774 as it can nullify the delay slot and save a nop. */
6775 if (length == 8 && dbr_sequence_length () == 0)
6776 nullify = 1;
6778 /* If this is a short forward conditional branch which did not get
6779 its delay slot filled, the delay slot can still be nullified. */
6780 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6781 nullify = forward_branch_p (insn);
6783 /* A forward branch over a single nullified insn can be done with a
6784 extrs instruction. This avoids a single cycle penalty due to
6785 mis-predicted branch if we fall through (branch not taken). */
6786 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6788 switch (length)
6791 /* All short conditional branches except backwards with an unfilled
6792 delay slot. */
6793 case 4:
6794 if (useskip)
6795 strcpy (buf, "{extrs,|extrw,s,}");
6796 else
6797 strcpy (buf, "bb,");
6798 if (useskip && GET_MODE (operands[0]) == DImode)
6799 strcpy (buf, "extrd,s,*");
6800 else if (GET_MODE (operands[0]) == DImode)
6801 strcpy (buf, "bb,*");
6802 if ((which == 0 && negated)
6803 || (which == 1 && ! negated))
6804 strcat (buf, ">=");
6805 else
6806 strcat (buf, "<");
6807 if (useskip)
6808 strcat (buf, " %0,%1,1,%%r0");
6809 else if (nullify && negated)
6811 if (branch_needs_nop_p (insn))
6812 strcat (buf, ",n %0,%1,%3%#");
6813 else
6814 strcat (buf, ",n %0,%1,%3");
6816 else if (nullify && ! negated)
6818 if (branch_needs_nop_p (insn))
6819 strcat (buf, ",n %0,%1,%2%#");
6820 else
6821 strcat (buf, ",n %0,%1,%2");
6823 else if (! nullify && negated)
6824 strcat (buf, " %0,%1,%3");
6825 else if (! nullify && ! negated)
6826 strcat (buf, " %0,%1,%2");
6827 break;
6829 /* All long conditionals. Note a short backward branch with an
6830 unfilled delay slot is treated just like a long backward branch
6831 with an unfilled delay slot. */
6832 case 8:
6833 /* Handle weird backwards branch with a filled delay slot
6834 which is nullified. */
6835 if (dbr_sequence_length () != 0
6836 && ! forward_branch_p (insn)
6837 && nullify)
6839 strcpy (buf, "bb,");
6840 if (GET_MODE (operands[0]) == DImode)
6841 strcat (buf, "*");
6842 if ((which == 0 && negated)
6843 || (which == 1 && ! negated))
6844 strcat (buf, "<");
6845 else
6846 strcat (buf, ">=");
6847 if (negated)
6848 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6849 else
6850 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6852 /* Handle short backwards branch with an unfilled delay slot.
6853 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6854 taken and untaken branches. */
6855 else if (dbr_sequence_length () == 0
6856 && ! forward_branch_p (insn)
6857 && INSN_ADDRESSES_SET_P ()
6858 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6859 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6861 strcpy (buf, "bb,");
6862 if (GET_MODE (operands[0]) == DImode)
6863 strcat (buf, "*");
6864 if ((which == 0 && negated)
6865 || (which == 1 && ! negated))
6866 strcat (buf, ">=");
6867 else
6868 strcat (buf, "<");
6869 if (negated)
6870 strcat (buf, " %0,%1,%3%#");
6871 else
6872 strcat (buf, " %0,%1,%2%#");
6874 else
6876 if (GET_MODE (operands[0]) == DImode)
6877 strcpy (buf, "extrd,s,*");
6878 else
6879 strcpy (buf, "{extrs,|extrw,s,}");
6880 if ((which == 0 && negated)
6881 || (which == 1 && ! negated))
6882 strcat (buf, "<");
6883 else
6884 strcat (buf, ">=");
6885 if (nullify && negated)
6886 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6887 else if (nullify && ! negated)
6888 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6889 else if (negated)
6890 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6891 else
6892 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6894 break;
6896 default:
6897 /* The reversed conditional branch must branch over one additional
6898 instruction if the delay slot is filled and needs to be extracted
6899 by pa_output_lbranch. If the delay slot is empty or this is a
6900 nullified forward branch, the instruction after the reversed
6901 condition branch must be nullified. */
6902 if (dbr_sequence_length () == 0
6903 || (nullify && forward_branch_p (insn)))
6905 nullify = 1;
6906 xdelay = 0;
6907 operands[4] = GEN_INT (length);
6909 else
6911 xdelay = 1;
6912 operands[4] = GEN_INT (length + 4);
6915 if (GET_MODE (operands[0]) == DImode)
6916 strcpy (buf, "bb,*");
6917 else
6918 strcpy (buf, "bb,");
6919 if ((which == 0 && negated)
6920 || (which == 1 && !negated))
6921 strcat (buf, "<");
6922 else
6923 strcat (buf, ">=");
6924 if (nullify)
6925 strcat (buf, ",n %0,%1,.+%4");
6926 else
6927 strcat (buf, " %0,%1,.+%4");
6928 output_asm_insn (buf, operands);
6929 return pa_output_lbranch (negated ? operands[3] : operands[2],
6930 insn, xdelay);
6932 return buf;
6935 /* This routine handles all the branch-on-variable-bit conditional branch
6936 sequences we might need to generate. It handles nullification of delay
6937 slots, varying length branches, negated branches and all combinations
6938 of the above. it returns the appropriate output template to emit the
6939 branch. */
6941 const char *
6942 pa_output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn,
6943 int which)
6945 static char buf[100];
6946 bool useskip;
6947 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6948 int length = get_attr_length (insn);
6949 int xdelay;
6951 /* A conditional branch to the following instruction (e.g. the delay slot) is
6952 asking for a disaster. I do not think this can happen as this pattern
6953 is only used when optimizing; jump optimization should eliminate the
6954 jump. But be prepared just in case. */
6956 if (branch_to_delay_slot_p (insn))
6957 return "nop";
6959 /* If this is a long branch with its delay slot unfilled, set `nullify'
6960 as it can nullify the delay slot and save a nop. */
6961 if (length == 8 && dbr_sequence_length () == 0)
6962 nullify = 1;
6964 /* If this is a short forward conditional branch which did not get
6965 its delay slot filled, the delay slot can still be nullified. */
6966 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6967 nullify = forward_branch_p (insn);
6969 /* A forward branch over a single nullified insn can be done with a
6970 extrs instruction. This avoids a single cycle penalty due to
6971 mis-predicted branch if we fall through (branch not taken). */
6972 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6974 switch (length)
6977 /* All short conditional branches except backwards with an unfilled
6978 delay slot. */
6979 case 4:
6980 if (useskip)
6981 strcpy (buf, "{vextrs,|extrw,s,}");
6982 else
6983 strcpy (buf, "{bvb,|bb,}");
6984 if (useskip && GET_MODE (operands[0]) == DImode)
6985 strcpy (buf, "extrd,s,*");
6986 else if (GET_MODE (operands[0]) == DImode)
6987 strcpy (buf, "bb,*");
6988 if ((which == 0 && negated)
6989 || (which == 1 && ! negated))
6990 strcat (buf, ">=");
6991 else
6992 strcat (buf, "<");
6993 if (useskip)
6994 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6995 else if (nullify && negated)
6997 if (branch_needs_nop_p (insn))
6998 strcat (buf, "{,n %0,%3%#|,n %0,%%sar,%3%#}");
6999 else
7000 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
7002 else if (nullify && ! negated)
7004 if (branch_needs_nop_p (insn))
7005 strcat (buf, "{,n %0,%2%#|,n %0,%%sar,%2%#}");
7006 else
7007 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
7009 else if (! nullify && negated)
7010 strcat (buf, "{ %0,%3| %0,%%sar,%3}");
7011 else if (! nullify && ! negated)
7012 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
7013 break;
7015 /* All long conditionals. Note a short backward branch with an
7016 unfilled delay slot is treated just like a long backward branch
7017 with an unfilled delay slot. */
7018 case 8:
7019 /* Handle weird backwards branch with a filled delay slot
7020 which is nullified. */
7021 if (dbr_sequence_length () != 0
7022 && ! forward_branch_p (insn)
7023 && nullify)
7025 strcpy (buf, "{bvb,|bb,}");
7026 if (GET_MODE (operands[0]) == DImode)
7027 strcat (buf, "*");
7028 if ((which == 0 && negated)
7029 || (which == 1 && ! negated))
7030 strcat (buf, "<");
7031 else
7032 strcat (buf, ">=");
7033 if (negated)
7034 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
7035 else
7036 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
7038 /* Handle short backwards branch with an unfilled delay slot.
7039 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7040 taken and untaken branches. */
7041 else if (dbr_sequence_length () == 0
7042 && ! forward_branch_p (insn)
7043 && INSN_ADDRESSES_SET_P ()
7044 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7045 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7047 strcpy (buf, "{bvb,|bb,}");
7048 if (GET_MODE (operands[0]) == DImode)
7049 strcat (buf, "*");
7050 if ((which == 0 && negated)
7051 || (which == 1 && ! negated))
7052 strcat (buf, ">=");
7053 else
7054 strcat (buf, "<");
7055 if (negated)
7056 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
7057 else
7058 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
7060 else
7062 strcpy (buf, "{vextrs,|extrw,s,}");
7063 if (GET_MODE (operands[0]) == DImode)
7064 strcpy (buf, "extrd,s,*");
7065 if ((which == 0 && negated)
7066 || (which == 1 && ! negated))
7067 strcat (buf, "<");
7068 else
7069 strcat (buf, ">=");
7070 if (nullify && negated)
7071 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
7072 else if (nullify && ! negated)
7073 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
7074 else if (negated)
7075 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
7076 else
7077 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
7079 break;
7081 default:
7082 /* The reversed conditional branch must branch over one additional
7083 instruction if the delay slot is filled and needs to be extracted
7084 by pa_output_lbranch. If the delay slot is empty or this is a
7085 nullified forward branch, the instruction after the reversed
7086 condition branch must be nullified. */
7087 if (dbr_sequence_length () == 0
7088 || (nullify && forward_branch_p (insn)))
7090 nullify = 1;
7091 xdelay = 0;
7092 operands[4] = GEN_INT (length);
7094 else
7096 xdelay = 1;
7097 operands[4] = GEN_INT (length + 4);
7100 if (GET_MODE (operands[0]) == DImode)
7101 strcpy (buf, "bb,*");
7102 else
7103 strcpy (buf, "{bvb,|bb,}");
7104 if ((which == 0 && negated)
7105 || (which == 1 && !negated))
7106 strcat (buf, "<");
7107 else
7108 strcat (buf, ">=");
7109 if (nullify)
7110 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
7111 else
7112 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
7113 output_asm_insn (buf, operands);
7114 return pa_output_lbranch (negated ? operands[3] : operands[2],
7115 insn, xdelay);
7117 return buf;
7120 /* Return the output template for emitting a dbra type insn.
7122 Note it may perform some output operations on its own before
7123 returning the final output string. */
7124 const char *
7125 pa_output_dbra (rtx *operands, rtx insn, int which_alternative)
7127 int length = get_attr_length (insn);
7129 /* A conditional branch to the following instruction (e.g. the delay slot) is
7130 asking for a disaster. Be prepared! */
7132 if (branch_to_delay_slot_p (insn))
7134 if (which_alternative == 0)
7135 return "ldo %1(%0),%0";
7136 else if (which_alternative == 1)
7138 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
7139 output_asm_insn ("ldw -16(%%r30),%4", operands);
7140 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7141 return "{fldws|fldw} -16(%%r30),%0";
7143 else
7145 output_asm_insn ("ldw %0,%4", operands);
7146 return "ldo %1(%4),%4\n\tstw %4,%0";
7150 if (which_alternative == 0)
7152 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7153 int xdelay;
7155 /* If this is a long branch with its delay slot unfilled, set `nullify'
7156 as it can nullify the delay slot and save a nop. */
7157 if (length == 8 && dbr_sequence_length () == 0)
7158 nullify = 1;
7160 /* If this is a short forward conditional branch which did not get
7161 its delay slot filled, the delay slot can still be nullified. */
7162 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7163 nullify = forward_branch_p (insn);
7165 switch (length)
7167 case 4:
7168 if (nullify)
7170 if (branch_needs_nop_p (insn))
7171 return "addib,%C2,n %1,%0,%3%#";
7172 else
7173 return "addib,%C2,n %1,%0,%3";
7175 else
7176 return "addib,%C2 %1,%0,%3";
7178 case 8:
7179 /* Handle weird backwards branch with a fulled delay slot
7180 which is nullified. */
7181 if (dbr_sequence_length () != 0
7182 && ! forward_branch_p (insn)
7183 && nullify)
7184 return "addib,%N2,n %1,%0,.+12\n\tb %3";
7185 /* Handle short backwards branch with an unfilled delay slot.
7186 Using a addb;nop rather than addi;bl saves 1 cycle for both
7187 taken and untaken branches. */
7188 else if (dbr_sequence_length () == 0
7189 && ! forward_branch_p (insn)
7190 && INSN_ADDRESSES_SET_P ()
7191 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7192 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7193 return "addib,%C2 %1,%0,%3%#";
7195 /* Handle normal cases. */
7196 if (nullify)
7197 return "addi,%N2 %1,%0,%0\n\tb,n %3";
7198 else
7199 return "addi,%N2 %1,%0,%0\n\tb %3";
7201 default:
7202 /* The reversed conditional branch must branch over one additional
7203 instruction if the delay slot is filled and needs to be extracted
7204 by pa_output_lbranch. If the delay slot is empty or this is a
7205 nullified forward branch, the instruction after the reversed
7206 condition branch must be nullified. */
7207 if (dbr_sequence_length () == 0
7208 || (nullify && forward_branch_p (insn)))
7210 nullify = 1;
7211 xdelay = 0;
7212 operands[4] = GEN_INT (length);
7214 else
7216 xdelay = 1;
7217 operands[4] = GEN_INT (length + 4);
7220 if (nullify)
7221 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
7222 else
7223 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
7225 return pa_output_lbranch (operands[3], insn, xdelay);
7229 /* Deal with gross reload from FP register case. */
7230 else if (which_alternative == 1)
7232 /* Move loop counter from FP register to MEM then into a GR,
7233 increment the GR, store the GR into MEM, and finally reload
7234 the FP register from MEM from within the branch's delay slot. */
7235 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
7236 operands);
7237 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7238 if (length == 24)
7239 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
7240 else if (length == 28)
7241 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7242 else
7244 operands[5] = GEN_INT (length - 16);
7245 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
7246 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7247 return pa_output_lbranch (operands[3], insn, 0);
7250 /* Deal with gross reload from memory case. */
7251 else
7253 /* Reload loop counter from memory, the store back to memory
7254 happens in the branch's delay slot. */
7255 output_asm_insn ("ldw %0,%4", operands);
7256 if (length == 12)
7257 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
7258 else if (length == 16)
7259 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
7260 else
7262 operands[5] = GEN_INT (length - 4);
7263 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
7264 return pa_output_lbranch (operands[3], insn, 0);
7269 /* Return the output template for emitting a movb type insn.
7271 Note it may perform some output operations on its own before
7272 returning the final output string. */
7273 const char *
7274 pa_output_movb (rtx *operands, rtx insn, int which_alternative,
7275 int reverse_comparison)
7277 int length = get_attr_length (insn);
7279 /* A conditional branch to the following instruction (e.g. the delay slot) is
7280 asking for a disaster. Be prepared! */
7282 if (branch_to_delay_slot_p (insn))
7284 if (which_alternative == 0)
7285 return "copy %1,%0";
7286 else if (which_alternative == 1)
7288 output_asm_insn ("stw %1,-16(%%r30)", operands);
7289 return "{fldws|fldw} -16(%%r30),%0";
7291 else if (which_alternative == 2)
7292 return "stw %1,%0";
7293 else
7294 return "mtsar %r1";
7297 /* Support the second variant. */
7298 if (reverse_comparison)
7299 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
7301 if (which_alternative == 0)
7303 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7304 int xdelay;
7306 /* If this is a long branch with its delay slot unfilled, set `nullify'
7307 as it can nullify the delay slot and save a nop. */
7308 if (length == 8 && dbr_sequence_length () == 0)
7309 nullify = 1;
7311 /* If this is a short forward conditional branch which did not get
7312 its delay slot filled, the delay slot can still be nullified. */
7313 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7314 nullify = forward_branch_p (insn);
7316 switch (length)
7318 case 4:
7319 if (nullify)
7321 if (branch_needs_nop_p (insn))
7322 return "movb,%C2,n %1,%0,%3%#";
7323 else
7324 return "movb,%C2,n %1,%0,%3";
7326 else
7327 return "movb,%C2 %1,%0,%3";
7329 case 8:
7330 /* Handle weird backwards branch with a filled delay slot
7331 which is nullified. */
7332 if (dbr_sequence_length () != 0
7333 && ! forward_branch_p (insn)
7334 && nullify)
7335 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7337 /* Handle short backwards branch with an unfilled delay slot.
7338 Using a movb;nop rather than or;bl saves 1 cycle for both
7339 taken and untaken branches. */
7340 else if (dbr_sequence_length () == 0
7341 && ! forward_branch_p (insn)
7342 && INSN_ADDRESSES_SET_P ()
7343 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7344 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7345 return "movb,%C2 %1,%0,%3%#";
7346 /* Handle normal cases. */
7347 if (nullify)
7348 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7349 else
7350 return "or,%N2 %1,%%r0,%0\n\tb %3";
7352 default:
7353 /* The reversed conditional branch must branch over one additional
7354 instruction if the delay slot is filled and needs to be extracted
7355 by pa_output_lbranch. If the delay slot is empty or this is a
7356 nullified forward branch, the instruction after the reversed
7357 condition branch must be nullified. */
7358 if (dbr_sequence_length () == 0
7359 || (nullify && forward_branch_p (insn)))
7361 nullify = 1;
7362 xdelay = 0;
7363 operands[4] = GEN_INT (length);
7365 else
7367 xdelay = 1;
7368 operands[4] = GEN_INT (length + 4);
7371 if (nullify)
7372 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
7373 else
7374 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
7376 return pa_output_lbranch (operands[3], insn, xdelay);
7379 /* Deal with gross reload for FP destination register case. */
7380 else if (which_alternative == 1)
7382 /* Move source register to MEM, perform the branch test, then
7383 finally load the FP register from MEM from within the branch's
7384 delay slot. */
7385 output_asm_insn ("stw %1,-16(%%r30)", operands);
7386 if (length == 12)
7387 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7388 else if (length == 16)
7389 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7390 else
7392 operands[4] = GEN_INT (length - 4);
7393 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
7394 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7395 return pa_output_lbranch (operands[3], insn, 0);
7398 /* Deal with gross reload from memory case. */
7399 else if (which_alternative == 2)
7401 /* Reload loop counter from memory, the store back to memory
7402 happens in the branch's delay slot. */
7403 if (length == 8)
7404 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7405 else if (length == 12)
7406 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7407 else
7409 operands[4] = GEN_INT (length);
7410 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7411 operands);
7412 return pa_output_lbranch (operands[3], insn, 0);
7415 /* Handle SAR as a destination. */
7416 else
7418 if (length == 8)
7419 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7420 else if (length == 12)
7421 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7422 else
7424 operands[4] = GEN_INT (length);
7425 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7426 operands);
7427 return pa_output_lbranch (operands[3], insn, 0);
7432 /* Copy any FP arguments in INSN into integer registers. */
7433 static void
7434 copy_fp_args (rtx insn)
7436 rtx link;
7437 rtx xoperands[2];
7439 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7441 int arg_mode, regno;
7442 rtx use = XEXP (link, 0);
7444 if (! (GET_CODE (use) == USE
7445 && GET_CODE (XEXP (use, 0)) == REG
7446 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7447 continue;
7449 arg_mode = GET_MODE (XEXP (use, 0));
7450 regno = REGNO (XEXP (use, 0));
7452 /* Is it a floating point register? */
7453 if (regno >= 32 && regno <= 39)
7455 /* Copy the FP register into an integer register via memory. */
7456 if (arg_mode == SFmode)
7458 xoperands[0] = XEXP (use, 0);
7459 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
7460 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
7461 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7463 else
7465 xoperands[0] = XEXP (use, 0);
7466 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
7467 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
7468 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
7469 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7475 /* Compute length of the FP argument copy sequence for INSN. */
7476 static int
7477 length_fp_args (rtx insn)
7479 int length = 0;
7480 rtx link;
7482 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7484 int arg_mode, regno;
7485 rtx use = XEXP (link, 0);
7487 if (! (GET_CODE (use) == USE
7488 && GET_CODE (XEXP (use, 0)) == REG
7489 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7490 continue;
7492 arg_mode = GET_MODE (XEXP (use, 0));
7493 regno = REGNO (XEXP (use, 0));
7495 /* Is it a floating point register? */
7496 if (regno >= 32 && regno <= 39)
7498 if (arg_mode == SFmode)
7499 length += 8;
7500 else
7501 length += 12;
7505 return length;
7508 /* Return the attribute length for the millicode call instruction INSN.
7509 The length must match the code generated by pa_output_millicode_call.
7510 We include the delay slot in the returned length as it is better to
7511 over estimate the length than to under estimate it. */
7514 pa_attr_length_millicode_call (rtx insn)
7516 unsigned long distance = -1;
7517 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7519 if (INSN_ADDRESSES_SET_P ())
7521 distance = (total + insn_current_reference_address (insn));
7522 if (distance < total)
7523 distance = -1;
7526 if (TARGET_64BIT)
7528 if (!TARGET_LONG_CALLS && distance < 7600000)
7529 return 8;
7531 return 20;
7533 else if (TARGET_PORTABLE_RUNTIME)
7534 return 24;
7535 else
7537 if (!TARGET_LONG_CALLS && distance < MAX_PCREL17F_OFFSET)
7538 return 8;
7540 if (!flag_pic)
7541 return 12;
7543 return 24;
7547 /* INSN is a function call. It may have an unconditional jump
7548 in its delay slot.
7550 CALL_DEST is the routine we are calling. */
7552 const char *
7553 pa_output_millicode_call (rtx insn, rtx call_dest)
7555 int attr_length = get_attr_length (insn);
7556 int seq_length = dbr_sequence_length ();
7557 int distance;
7558 rtx seq_insn;
7559 rtx xoperands[3];
7561 xoperands[0] = call_dest;
7562 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7564 /* Handle the common case where we are sure that the branch will
7565 reach the beginning of the $CODE$ subspace. The within reach
7566 form of the $$sh_func_adrs call has a length of 28. Because it
7567 has an attribute type of sh_func_adrs, it never has a nonzero
7568 sequence length (i.e., the delay slot is never filled). */
7569 if (!TARGET_LONG_CALLS
7570 && (attr_length == 8
7571 || (attr_length == 28
7572 && get_attr_type (insn) == TYPE_SH_FUNC_ADRS)))
7574 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
7576 else
7578 if (TARGET_64BIT)
7580 /* It might seem that one insn could be saved by accessing
7581 the millicode function using the linkage table. However,
7582 this doesn't work in shared libraries and other dynamically
7583 loaded objects. Using a pc-relative sequence also avoids
7584 problems related to the implicit use of the gp register. */
7585 output_asm_insn ("b,l .+8,%%r1", xoperands);
7587 if (TARGET_GAS)
7589 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7590 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7592 else
7594 xoperands[1] = gen_label_rtx ();
7595 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7596 targetm.asm_out.internal_label (asm_out_file, "L",
7597 CODE_LABEL_NUMBER (xoperands[1]));
7598 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7601 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7603 else if (TARGET_PORTABLE_RUNTIME)
7605 /* Pure portable runtime doesn't allow be/ble; we also don't
7606 have PIC support in the assembler/linker, so this sequence
7607 is needed. */
7609 /* Get the address of our target into %r1. */
7610 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7611 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7613 /* Get our return address into %r31. */
7614 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7615 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7617 /* Jump to our target address in %r1. */
7618 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7620 else if (!flag_pic)
7622 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7623 if (TARGET_PA_20)
7624 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7625 else
7626 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7628 else
7630 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7631 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
7633 if (TARGET_SOM || !TARGET_GAS)
7635 /* The HP assembler can generate relocations for the
7636 difference of two symbols. GAS can do this for a
7637 millicode symbol but not an arbitrary external
7638 symbol when generating SOM output. */
7639 xoperands[1] = gen_label_rtx ();
7640 targetm.asm_out.internal_label (asm_out_file, "L",
7641 CODE_LABEL_NUMBER (xoperands[1]));
7642 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7643 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7645 else
7647 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
7648 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7649 xoperands);
7652 /* Jump to our target address in %r1. */
7653 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7657 if (seq_length == 0)
7658 output_asm_insn ("nop", xoperands);
7660 /* We are done if there isn't a jump in the delay slot. */
7661 if (seq_length == 0 || ! JUMP_P (NEXT_INSN (insn)))
7662 return "";
7664 /* This call has an unconditional jump in its delay slot. */
7665 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7667 /* See if the return address can be adjusted. Use the containing
7668 sequence insn's address. */
7669 if (INSN_ADDRESSES_SET_P ())
7671 seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7672 distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7673 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7675 if (VAL_14_BITS_P (distance))
7677 xoperands[1] = gen_label_rtx ();
7678 output_asm_insn ("ldo %0-%1(%2),%2", xoperands);
7679 targetm.asm_out.internal_label (asm_out_file, "L",
7680 CODE_LABEL_NUMBER (xoperands[1]));
7682 else
7683 /* ??? This branch may not reach its target. */
7684 output_asm_insn ("nop\n\tb,n %0", xoperands);
7686 else
7687 /* ??? This branch may not reach its target. */
7688 output_asm_insn ("nop\n\tb,n %0", xoperands);
7690 /* Delete the jump. */
7691 SET_INSN_DELETED (NEXT_INSN (insn));
7693 return "";
7696 /* Return the attribute length of the call instruction INSN. The SIBCALL
7697 flag indicates whether INSN is a regular call or a sibling call. The
7698 length returned must be longer than the code actually generated by
7699 pa_output_call. Since branch shortening is done before delay branch
7700 sequencing, there is no way to determine whether or not the delay
7701 slot will be filled during branch shortening. Even when the delay
7702 slot is filled, we may have to add a nop if the delay slot contains
7703 a branch that can't reach its target. Thus, we always have to include
7704 the delay slot in the length estimate. This used to be done in
7705 pa_adjust_insn_length but we do it here now as some sequences always
7706 fill the delay slot and we can save four bytes in the estimate for
7707 these sequences. */
7710 pa_attr_length_call (rtx insn, int sibcall)
7712 int local_call;
7713 rtx call, call_dest;
7714 tree call_decl;
7715 int length = 0;
7716 rtx pat = PATTERN (insn);
7717 unsigned long distance = -1;
7719 gcc_assert (CALL_P (insn));
7721 if (INSN_ADDRESSES_SET_P ())
7723 unsigned long total;
7725 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7726 distance = (total + insn_current_reference_address (insn));
7727 if (distance < total)
7728 distance = -1;
7731 gcc_assert (GET_CODE (pat) == PARALLEL);
7733 /* Get the call rtx. */
7734 call = XVECEXP (pat, 0, 0);
7735 if (GET_CODE (call) == SET)
7736 call = SET_SRC (call);
7738 gcc_assert (GET_CODE (call) == CALL);
7740 /* Determine if this is a local call. */
7741 call_dest = XEXP (XEXP (call, 0), 0);
7742 call_decl = SYMBOL_REF_DECL (call_dest);
7743 local_call = call_decl && targetm.binds_local_p (call_decl);
7745 /* pc-relative branch. */
7746 if (!TARGET_LONG_CALLS
7747 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7748 || distance < MAX_PCREL17F_OFFSET))
7749 length += 8;
7751 /* 64-bit plabel sequence. */
7752 else if (TARGET_64BIT && !local_call)
7753 length += sibcall ? 28 : 24;
7755 /* non-pic long absolute branch sequence. */
7756 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7757 length += 12;
7759 /* long pc-relative branch sequence. */
7760 else if (TARGET_LONG_PIC_SDIFF_CALL
7761 || (TARGET_GAS && !TARGET_SOM
7762 && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7764 length += 20;
7766 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7767 length += 8;
7770 /* 32-bit plabel sequence. */
7771 else
7773 length += 32;
7775 if (TARGET_SOM)
7776 length += length_fp_args (insn);
7778 if (flag_pic)
7779 length += 4;
7781 if (!TARGET_PA_20)
7783 if (!sibcall)
7784 length += 8;
7786 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7787 length += 8;
7791 return length;
7794 /* INSN is a function call. It may have an unconditional jump
7795 in its delay slot.
7797 CALL_DEST is the routine we are calling. */
7799 const char *
7800 pa_output_call (rtx insn, rtx call_dest, int sibcall)
7802 int delay_insn_deleted = 0;
7803 int delay_slot_filled = 0;
7804 int seq_length = dbr_sequence_length ();
7805 tree call_decl = SYMBOL_REF_DECL (call_dest);
7806 int local_call = call_decl && targetm.binds_local_p (call_decl);
7807 rtx xoperands[2];
7809 xoperands[0] = call_dest;
7811 /* Handle the common case where we're sure that the branch will reach
7812 the beginning of the "$CODE$" subspace. This is the beginning of
7813 the current function if we are in a named section. */
7814 if (!TARGET_LONG_CALLS && pa_attr_length_call (insn, sibcall) == 8)
7816 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7817 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7819 else
7821 if (TARGET_64BIT && !local_call)
7823 /* ??? As far as I can tell, the HP linker doesn't support the
7824 long pc-relative sequence described in the 64-bit runtime
7825 architecture. So, we use a slightly longer indirect call. */
7826 xoperands[0] = pa_get_deferred_plabel (call_dest);
7827 xoperands[1] = gen_label_rtx ();
7829 /* If this isn't a sibcall, we put the load of %r27 into the
7830 delay slot. We can't do this in a sibcall as we don't
7831 have a second call-clobbered scratch register available. */
7832 if (seq_length != 0
7833 && ! JUMP_P (NEXT_INSN (insn))
7834 && !sibcall)
7836 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7837 optimize, 0, NULL);
7839 /* Now delete the delay insn. */
7840 SET_INSN_DELETED (NEXT_INSN (insn));
7841 delay_insn_deleted = 1;
7844 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7845 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7846 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7848 if (sibcall)
7850 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7851 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7852 output_asm_insn ("bve (%%r1)", xoperands);
7854 else
7856 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7857 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7858 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7859 delay_slot_filled = 1;
7862 else
7864 int indirect_call = 0;
7866 /* Emit a long call. There are several different sequences
7867 of increasing length and complexity. In most cases,
7868 they don't allow an instruction in the delay slot. */
7869 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7870 && !TARGET_LONG_PIC_SDIFF_CALL
7871 && !(TARGET_GAS && !TARGET_SOM
7872 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7873 && !TARGET_64BIT)
7874 indirect_call = 1;
7876 if (seq_length != 0
7877 && ! JUMP_P (NEXT_INSN (insn))
7878 && !sibcall
7879 && (!TARGET_PA_20
7880 || indirect_call
7881 || ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)))
7883 /* A non-jump insn in the delay slot. By definition we can
7884 emit this insn before the call (and in fact before argument
7885 relocating. */
7886 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7887 NULL);
7889 /* Now delete the delay insn. */
7890 SET_INSN_DELETED (NEXT_INSN (insn));
7891 delay_insn_deleted = 1;
7894 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7896 /* This is the best sequence for making long calls in
7897 non-pic code. Unfortunately, GNU ld doesn't provide
7898 the stub needed for external calls, and GAS's support
7899 for this with the SOM linker is buggy. It is safe
7900 to use this for local calls. */
7901 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7902 if (sibcall)
7903 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7904 else
7906 if (TARGET_PA_20)
7907 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7908 xoperands);
7909 else
7910 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7912 output_asm_insn ("copy %%r31,%%r2", xoperands);
7913 delay_slot_filled = 1;
7916 else
7918 if (TARGET_LONG_PIC_SDIFF_CALL)
7920 /* The HP assembler and linker can handle relocations
7921 for the difference of two symbols. The HP assembler
7922 recognizes the sequence as a pc-relative call and
7923 the linker provides stubs when needed. */
7924 xoperands[1] = gen_label_rtx ();
7925 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7926 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7927 targetm.asm_out.internal_label (asm_out_file, "L",
7928 CODE_LABEL_NUMBER (xoperands[1]));
7929 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7931 else if (TARGET_GAS && !TARGET_SOM
7932 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7934 /* GAS currently can't generate the relocations that
7935 are needed for the SOM linker under HP-UX using this
7936 sequence. The GNU linker doesn't generate the stubs
7937 that are needed for external calls on TARGET_ELF32
7938 with this sequence. For now, we have to use a
7939 longer plabel sequence when using GAS. */
7940 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7941 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7942 xoperands);
7943 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7944 xoperands);
7946 else
7948 /* Emit a long plabel-based call sequence. This is
7949 essentially an inline implementation of $$dyncall.
7950 We don't actually try to call $$dyncall as this is
7951 as difficult as calling the function itself. */
7952 xoperands[0] = pa_get_deferred_plabel (call_dest);
7953 xoperands[1] = gen_label_rtx ();
7955 /* Since the call is indirect, FP arguments in registers
7956 need to be copied to the general registers. Then, the
7957 argument relocation stub will copy them back. */
7958 if (TARGET_SOM)
7959 copy_fp_args (insn);
7961 if (flag_pic)
7963 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7964 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7965 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7967 else
7969 output_asm_insn ("addil LR'%0-$global$,%%r27",
7970 xoperands);
7971 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7972 xoperands);
7975 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7976 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7977 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7978 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7980 if (!sibcall && !TARGET_PA_20)
7982 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7983 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
7984 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7985 else
7986 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7990 if (TARGET_PA_20)
7992 if (sibcall)
7993 output_asm_insn ("bve (%%r1)", xoperands);
7994 else
7996 if (indirect_call)
7998 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7999 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
8000 delay_slot_filled = 1;
8002 else
8003 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
8006 else
8008 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
8009 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
8010 xoperands);
8012 if (sibcall)
8014 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8015 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
8016 else
8017 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
8019 else
8021 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8022 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
8023 else
8024 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
8026 if (indirect_call)
8027 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
8028 else
8029 output_asm_insn ("copy %%r31,%%r2", xoperands);
8030 delay_slot_filled = 1;
8037 if (!delay_slot_filled && (seq_length == 0 || delay_insn_deleted))
8038 output_asm_insn ("nop", xoperands);
8040 /* We are done if there isn't a jump in the delay slot. */
8041 if (seq_length == 0
8042 || delay_insn_deleted
8043 || ! JUMP_P (NEXT_INSN (insn)))
8044 return "";
8046 /* A sibcall should never have a branch in the delay slot. */
8047 gcc_assert (!sibcall);
8049 /* This call has an unconditional jump in its delay slot. */
8050 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
8052 if (!delay_slot_filled && INSN_ADDRESSES_SET_P ())
8054 /* See if the return address can be adjusted. Use the containing
8055 sequence insn's address. This would break the regular call/return@
8056 relationship assumed by the table based eh unwinder, so only do that
8057 if the call is not possibly throwing. */
8058 rtx seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
8059 int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
8060 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
8062 if (VAL_14_BITS_P (distance)
8063 && !(can_throw_internal (insn) || can_throw_external (insn)))
8065 xoperands[1] = gen_label_rtx ();
8066 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands);
8067 targetm.asm_out.internal_label (asm_out_file, "L",
8068 CODE_LABEL_NUMBER (xoperands[1]));
8070 else
8071 output_asm_insn ("nop\n\tb,n %0", xoperands);
8073 else
8074 output_asm_insn ("b,n %0", xoperands);
8076 /* Delete the jump. */
8077 SET_INSN_DELETED (NEXT_INSN (insn));
8079 return "";
8082 /* Return the attribute length of the indirect call instruction INSN.
8083 The length must match the code generated by output_indirect call.
8084 The returned length includes the delay slot. Currently, the delay
8085 slot of an indirect call sequence is not exposed and it is used by
8086 the sequence itself. */
8089 pa_attr_length_indirect_call (rtx insn)
8091 unsigned long distance = -1;
8092 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
8094 if (INSN_ADDRESSES_SET_P ())
8096 distance = (total + insn_current_reference_address (insn));
8097 if (distance < total)
8098 distance = -1;
8101 if (TARGET_64BIT)
8102 return 12;
8104 if (TARGET_FAST_INDIRECT_CALLS
8105 || (!TARGET_LONG_CALLS
8106 && !TARGET_PORTABLE_RUNTIME
8107 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
8108 || distance < MAX_PCREL17F_OFFSET)))
8109 return 8;
8111 if (flag_pic)
8112 return 20;
8114 if (TARGET_PORTABLE_RUNTIME)
8115 return 16;
8117 /* Out of reach, can use ble. */
8118 return 12;
8121 const char *
8122 pa_output_indirect_call (rtx insn, rtx call_dest)
8124 rtx xoperands[1];
8126 if (TARGET_64BIT)
8128 xoperands[0] = call_dest;
8129 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
8130 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
8131 return "";
8134 /* First the special case for kernels, level 0 systems, etc. */
8135 if (TARGET_FAST_INDIRECT_CALLS)
8136 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8138 /* Now the normal case -- we can reach $$dyncall directly or
8139 we're sure that we can get there via a long-branch stub.
8141 No need to check target flags as the length uniquely identifies
8142 the remaining cases. */
8143 if (pa_attr_length_indirect_call (insn) == 8)
8145 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
8146 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
8147 variant of the B,L instruction can't be used on the SOM target. */
8148 if (TARGET_PA_20 && !TARGET_SOM)
8149 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
8150 else
8151 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
8154 /* Long millicode call, but we are not generating PIC or portable runtime
8155 code. */
8156 if (pa_attr_length_indirect_call (insn) == 12)
8157 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
8159 /* Long millicode call for portable runtime. */
8160 if (pa_attr_length_indirect_call (insn) == 16)
8161 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)";
8163 /* We need a long PIC call to $$dyncall. */
8164 xoperands[0] = NULL_RTX;
8165 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
8166 if (TARGET_SOM || !TARGET_GAS)
8168 xoperands[0] = gen_label_rtx ();
8169 output_asm_insn ("addil L'$$dyncall-%0,%%r2", xoperands);
8170 targetm.asm_out.internal_label (asm_out_file, "L",
8171 CODE_LABEL_NUMBER (xoperands[0]));
8172 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
8174 else
8176 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r2", xoperands);
8177 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
8178 xoperands);
8180 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8181 output_asm_insn ("ldo 12(%%r2),%%r2", xoperands);
8182 return "";
8185 /* In HPUX 8.0's shared library scheme, special relocations are needed
8186 for function labels if they might be passed to a function
8187 in a shared library (because shared libraries don't live in code
8188 space), and special magic is needed to construct their address. */
8190 void
8191 pa_encode_label (rtx sym)
8193 const char *str = XSTR (sym, 0);
8194 int len = strlen (str) + 1;
8195 char *newstr, *p;
8197 p = newstr = XALLOCAVEC (char, len + 1);
8198 *p++ = '@';
8199 strcpy (p, str);
8201 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
8204 static void
8205 pa_encode_section_info (tree decl, rtx rtl, int first)
8207 int old_referenced = 0;
8209 if (!first && MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
8210 old_referenced
8211 = SYMBOL_REF_FLAGS (XEXP (rtl, 0)) & SYMBOL_FLAG_REFERENCED;
8213 default_encode_section_info (decl, rtl, first);
8215 if (first && TEXT_SPACE_P (decl))
8217 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
8218 if (TREE_CODE (decl) == FUNCTION_DECL)
8219 pa_encode_label (XEXP (rtl, 0));
8221 else if (old_referenced)
8222 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= old_referenced;
8225 /* This is sort of inverse to pa_encode_section_info. */
8227 static const char *
8228 pa_strip_name_encoding (const char *str)
8230 str += (*str == '@');
8231 str += (*str == '*');
8232 return str;
8235 /* Returns 1 if OP is a function label involved in a simple addition
8236 with a constant. Used to keep certain patterns from matching
8237 during instruction combination. */
8239 pa_is_function_label_plus_const (rtx op)
8241 /* Strip off any CONST. */
8242 if (GET_CODE (op) == CONST)
8243 op = XEXP (op, 0);
8245 return (GET_CODE (op) == PLUS
8246 && function_label_operand (XEXP (op, 0), VOIDmode)
8247 && GET_CODE (XEXP (op, 1)) == CONST_INT);
8250 /* Output assembly code for a thunk to FUNCTION. */
8252 static void
8253 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
8254 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
8255 tree function)
8257 static unsigned int current_thunk_number;
8258 int val_14 = VAL_14_BITS_P (delta);
8259 unsigned int old_last_address = last_address, nbytes = 0;
8260 char label[16];
8261 rtx xoperands[4];
8263 xoperands[0] = XEXP (DECL_RTL (function), 0);
8264 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
8265 xoperands[2] = GEN_INT (delta);
8267 final_start_function (emit_barrier (), file, 1);
8269 /* Output the thunk. We know that the function is in the same
8270 translation unit (i.e., the same space) as the thunk, and that
8271 thunks are output after their method. Thus, we don't need an
8272 external branch to reach the function. With SOM and GAS,
8273 functions and thunks are effectively in different sections.
8274 Thus, we can always use a IA-relative branch and the linker
8275 will add a long branch stub if necessary.
8277 However, we have to be careful when generating PIC code on the
8278 SOM port to ensure that the sequence does not transfer to an
8279 import stub for the target function as this could clobber the
8280 return value saved at SP-24. This would also apply to the
8281 32-bit linux port if the multi-space model is implemented. */
8282 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8283 && !(flag_pic && TREE_PUBLIC (function))
8284 && (TARGET_GAS || last_address < 262132))
8285 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8286 && ((targetm_common.have_named_sections
8287 && DECL_SECTION_NAME (thunk_fndecl) != NULL
8288 /* The GNU 64-bit linker has rather poor stub management.
8289 So, we use a long branch from thunks that aren't in
8290 the same section as the target function. */
8291 && ((!TARGET_64BIT
8292 && (DECL_SECTION_NAME (thunk_fndecl)
8293 != DECL_SECTION_NAME (function)))
8294 || ((DECL_SECTION_NAME (thunk_fndecl)
8295 == DECL_SECTION_NAME (function))
8296 && last_address < 262132)))
8297 || (targetm_common.have_named_sections
8298 && DECL_SECTION_NAME (thunk_fndecl) == NULL
8299 && DECL_SECTION_NAME (function) == NULL
8300 && last_address < 262132)
8301 || (!targetm_common.have_named_sections
8302 && last_address < 262132))))
8304 if (!val_14)
8305 output_asm_insn ("addil L'%2,%%r26", xoperands);
8307 output_asm_insn ("b %0", xoperands);
8309 if (val_14)
8311 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8312 nbytes += 8;
8314 else
8316 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8317 nbytes += 12;
8320 else if (TARGET_64BIT)
8322 /* We only have one call-clobbered scratch register, so we can't
8323 make use of the delay slot if delta doesn't fit in 14 bits. */
8324 if (!val_14)
8326 output_asm_insn ("addil L'%2,%%r26", xoperands);
8327 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8330 output_asm_insn ("b,l .+8,%%r1", xoperands);
8332 if (TARGET_GAS)
8334 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8335 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
8337 else
8339 xoperands[3] = GEN_INT (val_14 ? 8 : 16);
8340 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands);
8343 if (val_14)
8345 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8346 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8347 nbytes += 20;
8349 else
8351 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
8352 nbytes += 24;
8355 else if (TARGET_PORTABLE_RUNTIME)
8357 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8358 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
8360 if (!val_14)
8361 output_asm_insn ("addil L'%2,%%r26", xoperands);
8363 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8365 if (val_14)
8367 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8368 nbytes += 16;
8370 else
8372 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8373 nbytes += 20;
8376 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8378 /* The function is accessible from outside this module. The only
8379 way to avoid an import stub between the thunk and function is to
8380 call the function directly with an indirect sequence similar to
8381 that used by $$dyncall. This is possible because $$dyncall acts
8382 as the import stub in an indirect call. */
8383 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
8384 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8385 output_asm_insn ("addil LT'%3,%%r19", xoperands);
8386 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
8387 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8388 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
8389 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
8390 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
8391 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8393 if (!val_14)
8395 output_asm_insn ("addil L'%2,%%r26", xoperands);
8396 nbytes += 4;
8399 if (TARGET_PA_20)
8401 output_asm_insn ("bve (%%r22)", xoperands);
8402 nbytes += 36;
8404 else if (TARGET_NO_SPACE_REGS)
8406 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
8407 nbytes += 36;
8409 else
8411 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
8412 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
8413 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
8414 nbytes += 44;
8417 if (val_14)
8418 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8419 else
8420 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8422 else if (flag_pic)
8424 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
8426 if (TARGET_SOM || !TARGET_GAS)
8428 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands);
8429 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands);
8431 else
8433 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8434 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands);
8437 if (!val_14)
8438 output_asm_insn ("addil L'%2,%%r26", xoperands);
8440 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8442 if (val_14)
8444 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8445 nbytes += 20;
8447 else
8449 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8450 nbytes += 24;
8453 else
8455 if (!val_14)
8456 output_asm_insn ("addil L'%2,%%r26", xoperands);
8458 output_asm_insn ("ldil L'%0,%%r22", xoperands);
8459 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
8461 if (val_14)
8463 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8464 nbytes += 12;
8466 else
8468 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8469 nbytes += 16;
8473 final_end_function ();
8475 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8477 switch_to_section (data_section);
8478 output_asm_insn (".align 4", xoperands);
8479 ASM_OUTPUT_LABEL (file, label);
8480 output_asm_insn (".word P'%0", xoperands);
8483 current_thunk_number++;
8484 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8485 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8486 last_address += nbytes;
8487 if (old_last_address > last_address)
8488 last_address = UINT_MAX;
8489 update_total_code_bytes (nbytes);
8492 /* Only direct calls to static functions are allowed to be sibling (tail)
8493 call optimized.
8495 This restriction is necessary because some linker generated stubs will
8496 store return pointers into rp' in some cases which might clobber a
8497 live value already in rp'.
8499 In a sibcall the current function and the target function share stack
8500 space. Thus if the path to the current function and the path to the
8501 target function save a value in rp', they save the value into the
8502 same stack slot, which has undesirable consequences.
8504 Because of the deferred binding nature of shared libraries any function
8505 with external scope could be in a different load module and thus require
8506 rp' to be saved when calling that function. So sibcall optimizations
8507 can only be safe for static function.
8509 Note that GCC never needs return value relocations, so we don't have to
8510 worry about static calls with return value relocations (which require
8511 saving rp').
8513 It is safe to perform a sibcall optimization when the target function
8514 will never return. */
8515 static bool
8516 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8518 if (TARGET_PORTABLE_RUNTIME)
8519 return false;
8521 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
8522 single subspace mode and the call is not indirect. As far as I know,
8523 there is no operating system support for the multiple subspace mode.
8524 It might be possible to support indirect calls if we didn't use
8525 $$dyncall (see the indirect sequence generated in pa_output_call). */
8526 if (TARGET_ELF32)
8527 return (decl != NULL_TREE);
8529 /* Sibcalls are not ok because the arg pointer register is not a fixed
8530 register. This prevents the sibcall optimization from occurring. In
8531 addition, there are problems with stub placement using GNU ld. This
8532 is because a normal sibcall branch uses a 17-bit relocation while
8533 a regular call branch uses a 22-bit relocation. As a result, more
8534 care needs to be taken in the placement of long-branch stubs. */
8535 if (TARGET_64BIT)
8536 return false;
8538 /* Sibcalls are only ok within a translation unit. */
8539 return (decl && !TREE_PUBLIC (decl));
8542 /* ??? Addition is not commutative on the PA due to the weird implicit
8543 space register selection rules for memory addresses. Therefore, we
8544 don't consider a + b == b + a, as this might be inside a MEM. */
8545 static bool
8546 pa_commutative_p (const_rtx x, int outer_code)
8548 return (COMMUTATIVE_P (x)
8549 && (TARGET_NO_SPACE_REGS
8550 || (outer_code != UNKNOWN && outer_code != MEM)
8551 || GET_CODE (x) != PLUS));
8554 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8555 use in fmpyadd instructions. */
8557 pa_fmpyaddoperands (rtx *operands)
8559 enum machine_mode mode = GET_MODE (operands[0]);
8561 /* Must be a floating point mode. */
8562 if (mode != SFmode && mode != DFmode)
8563 return 0;
8565 /* All modes must be the same. */
8566 if (! (mode == GET_MODE (operands[1])
8567 && mode == GET_MODE (operands[2])
8568 && mode == GET_MODE (operands[3])
8569 && mode == GET_MODE (operands[4])
8570 && mode == GET_MODE (operands[5])))
8571 return 0;
8573 /* All operands must be registers. */
8574 if (! (GET_CODE (operands[1]) == REG
8575 && GET_CODE (operands[2]) == REG
8576 && GET_CODE (operands[3]) == REG
8577 && GET_CODE (operands[4]) == REG
8578 && GET_CODE (operands[5]) == REG))
8579 return 0;
8581 /* Only 2 real operands to the addition. One of the input operands must
8582 be the same as the output operand. */
8583 if (! rtx_equal_p (operands[3], operands[4])
8584 && ! rtx_equal_p (operands[3], operands[5]))
8585 return 0;
8587 /* Inout operand of add cannot conflict with any operands from multiply. */
8588 if (rtx_equal_p (operands[3], operands[0])
8589 || rtx_equal_p (operands[3], operands[1])
8590 || rtx_equal_p (operands[3], operands[2]))
8591 return 0;
8593 /* multiply cannot feed into addition operands. */
8594 if (rtx_equal_p (operands[4], operands[0])
8595 || rtx_equal_p (operands[5], operands[0]))
8596 return 0;
8598 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8599 if (mode == SFmode
8600 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8601 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8602 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8603 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8604 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8605 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8606 return 0;
8608 /* Passed. Operands are suitable for fmpyadd. */
8609 return 1;
8612 #if !defined(USE_COLLECT2)
8613 static void
8614 pa_asm_out_constructor (rtx symbol, int priority)
8616 if (!function_label_operand (symbol, VOIDmode))
8617 pa_encode_label (symbol);
8619 #ifdef CTORS_SECTION_ASM_OP
8620 default_ctor_section_asm_out_constructor (symbol, priority);
8621 #else
8622 # ifdef TARGET_ASM_NAMED_SECTION
8623 default_named_section_asm_out_constructor (symbol, priority);
8624 # else
8625 default_stabs_asm_out_constructor (symbol, priority);
8626 # endif
8627 #endif
8630 static void
8631 pa_asm_out_destructor (rtx symbol, int priority)
8633 if (!function_label_operand (symbol, VOIDmode))
8634 pa_encode_label (symbol);
8636 #ifdef DTORS_SECTION_ASM_OP
8637 default_dtor_section_asm_out_destructor (symbol, priority);
8638 #else
8639 # ifdef TARGET_ASM_NAMED_SECTION
8640 default_named_section_asm_out_destructor (symbol, priority);
8641 # else
8642 default_stabs_asm_out_destructor (symbol, priority);
8643 # endif
8644 #endif
8646 #endif
8648 /* This function places uninitialized global data in the bss section.
8649 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8650 function on the SOM port to prevent uninitialized global data from
8651 being placed in the data section. */
8653 void
8654 pa_asm_output_aligned_bss (FILE *stream,
8655 const char *name,
8656 unsigned HOST_WIDE_INT size,
8657 unsigned int align)
8659 switch_to_section (bss_section);
8660 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8662 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8663 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8664 #endif
8666 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8667 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8668 #endif
8670 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8671 ASM_OUTPUT_LABEL (stream, name);
8672 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8675 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8676 that doesn't allow the alignment of global common storage to be directly
8677 specified. The SOM linker aligns common storage based on the rounded
8678 value of the NUM_BYTES parameter in the .comm directive. It's not
8679 possible to use the .align directive as it doesn't affect the alignment
8680 of the label associated with a .comm directive. */
8682 void
8683 pa_asm_output_aligned_common (FILE *stream,
8684 const char *name,
8685 unsigned HOST_WIDE_INT size,
8686 unsigned int align)
8688 unsigned int max_common_align;
8690 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8691 if (align > max_common_align)
8693 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8694 "for global common data. Using %u",
8695 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8696 align = max_common_align;
8699 switch_to_section (bss_section);
8701 assemble_name (stream, name);
8702 fprintf (stream, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8703 MAX (size, align / BITS_PER_UNIT));
8706 /* We can't use .comm for local common storage as the SOM linker effectively
8707 treats the symbol as universal and uses the same storage for local symbols
8708 with the same name in different object files. The .block directive
8709 reserves an uninitialized block of storage. However, it's not common
8710 storage. Fortunately, GCC never requests common storage with the same
8711 name in any given translation unit. */
8713 void
8714 pa_asm_output_aligned_local (FILE *stream,
8715 const char *name,
8716 unsigned HOST_WIDE_INT size,
8717 unsigned int align)
8719 switch_to_section (bss_section);
8720 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8722 #ifdef LOCAL_ASM_OP
8723 fprintf (stream, "%s", LOCAL_ASM_OP);
8724 assemble_name (stream, name);
8725 fprintf (stream, "\n");
8726 #endif
8728 ASM_OUTPUT_LABEL (stream, name);
8729 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8732 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8733 use in fmpysub instructions. */
8735 pa_fmpysuboperands (rtx *operands)
8737 enum machine_mode mode = GET_MODE (operands[0]);
8739 /* Must be a floating point mode. */
8740 if (mode != SFmode && mode != DFmode)
8741 return 0;
8743 /* All modes must be the same. */
8744 if (! (mode == GET_MODE (operands[1])
8745 && mode == GET_MODE (operands[2])
8746 && mode == GET_MODE (operands[3])
8747 && mode == GET_MODE (operands[4])
8748 && mode == GET_MODE (operands[5])))
8749 return 0;
8751 /* All operands must be registers. */
8752 if (! (GET_CODE (operands[1]) == REG
8753 && GET_CODE (operands[2]) == REG
8754 && GET_CODE (operands[3]) == REG
8755 && GET_CODE (operands[4]) == REG
8756 && GET_CODE (operands[5]) == REG))
8757 return 0;
8759 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8760 operation, so operands[4] must be the same as operand[3]. */
8761 if (! rtx_equal_p (operands[3], operands[4]))
8762 return 0;
8764 /* multiply cannot feed into subtraction. */
8765 if (rtx_equal_p (operands[5], operands[0]))
8766 return 0;
8768 /* Inout operand of sub cannot conflict with any operands from multiply. */
8769 if (rtx_equal_p (operands[3], operands[0])
8770 || rtx_equal_p (operands[3], operands[1])
8771 || rtx_equal_p (operands[3], operands[2]))
8772 return 0;
8774 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8775 if (mode == SFmode
8776 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8777 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8778 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8779 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8780 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8781 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8782 return 0;
8784 /* Passed. Operands are suitable for fmpysub. */
8785 return 1;
8788 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8789 constants for shadd instructions. */
8791 pa_shadd_constant_p (int val)
8793 if (val == 2 || val == 4 || val == 8)
8794 return 1;
8795 else
8796 return 0;
8799 /* Return TRUE if INSN branches forward. */
8801 static bool
8802 forward_branch_p (rtx insn)
8804 rtx lab = JUMP_LABEL (insn);
8806 /* The INSN must have a jump label. */
8807 gcc_assert (lab != NULL_RTX);
8809 if (INSN_ADDRESSES_SET_P ())
8810 return INSN_ADDRESSES (INSN_UID (lab)) > INSN_ADDRESSES (INSN_UID (insn));
8812 while (insn)
8814 if (insn == lab)
8815 return true;
8816 else
8817 insn = NEXT_INSN (insn);
8820 return false;
8823 /* Return 1 if INSN is in the delay slot of a call instruction. */
8825 pa_jump_in_call_delay (rtx insn)
8828 if (! JUMP_P (insn))
8829 return 0;
8831 if (PREV_INSN (insn)
8832 && PREV_INSN (PREV_INSN (insn))
8833 && NONJUMP_INSN_P (next_real_insn (PREV_INSN (PREV_INSN (insn)))))
8835 rtx test_insn = next_real_insn (PREV_INSN (PREV_INSN (insn)));
8837 return (GET_CODE (PATTERN (test_insn)) == SEQUENCE
8838 && XVECEXP (PATTERN (test_insn), 0, 1) == insn);
8841 else
8842 return 0;
8845 /* Output an unconditional move and branch insn. */
8847 const char *
8848 pa_output_parallel_movb (rtx *operands, rtx insn)
8850 int length = get_attr_length (insn);
8852 /* These are the cases in which we win. */
8853 if (length == 4)
8854 return "mov%I1b,tr %1,%0,%2";
8856 /* None of the following cases win, but they don't lose either. */
8857 if (length == 8)
8859 if (dbr_sequence_length () == 0)
8861 /* Nothing in the delay slot, fake it by putting the combined
8862 insn (the copy or add) in the delay slot of a bl. */
8863 if (GET_CODE (operands[1]) == CONST_INT)
8864 return "b %2\n\tldi %1,%0";
8865 else
8866 return "b %2\n\tcopy %1,%0";
8868 else
8870 /* Something in the delay slot, but we've got a long branch. */
8871 if (GET_CODE (operands[1]) == CONST_INT)
8872 return "ldi %1,%0\n\tb %2";
8873 else
8874 return "copy %1,%0\n\tb %2";
8878 if (GET_CODE (operands[1]) == CONST_INT)
8879 output_asm_insn ("ldi %1,%0", operands);
8880 else
8881 output_asm_insn ("copy %1,%0", operands);
8882 return pa_output_lbranch (operands[2], insn, 1);
8885 /* Output an unconditional add and branch insn. */
8887 const char *
8888 pa_output_parallel_addb (rtx *operands, rtx insn)
8890 int length = get_attr_length (insn);
8892 /* To make life easy we want operand0 to be the shared input/output
8893 operand and operand1 to be the readonly operand. */
8894 if (operands[0] == operands[1])
8895 operands[1] = operands[2];
8897 /* These are the cases in which we win. */
8898 if (length == 4)
8899 return "add%I1b,tr %1,%0,%3";
8901 /* None of the following cases win, but they don't lose either. */
8902 if (length == 8)
8904 if (dbr_sequence_length () == 0)
8905 /* Nothing in the delay slot, fake it by putting the combined
8906 insn (the copy or add) in the delay slot of a bl. */
8907 return "b %3\n\tadd%I1 %1,%0,%0";
8908 else
8909 /* Something in the delay slot, but we've got a long branch. */
8910 return "add%I1 %1,%0,%0\n\tb %3";
8913 output_asm_insn ("add%I1 %1,%0,%0", operands);
8914 return pa_output_lbranch (operands[3], insn, 1);
8917 /* Return nonzero if INSN (a jump insn) immediately follows a call
8918 to a named function. This is used to avoid filling the delay slot
8919 of the jump since it can usually be eliminated by modifying RP in
8920 the delay slot of the call. */
8923 pa_following_call (rtx insn)
8925 if (! TARGET_JUMP_IN_DELAY)
8926 return 0;
8928 /* Find the previous real insn, skipping NOTEs. */
8929 insn = PREV_INSN (insn);
8930 while (insn && NOTE_P (insn))
8931 insn = PREV_INSN (insn);
8933 /* Check for CALL_INSNs and millicode calls. */
8934 if (insn
8935 && ((CALL_P (insn)
8936 && get_attr_type (insn) != TYPE_DYNCALL)
8937 || (NONJUMP_INSN_P (insn)
8938 && GET_CODE (PATTERN (insn)) != SEQUENCE
8939 && GET_CODE (PATTERN (insn)) != USE
8940 && GET_CODE (PATTERN (insn)) != CLOBBER
8941 && get_attr_type (insn) == TYPE_MILLI)))
8942 return 1;
8944 return 0;
8947 /* We use this hook to perform a PA specific optimization which is difficult
8948 to do in earlier passes.
8950 We surround the jump table itself with BEGIN_BRTAB and END_BRTAB
8951 insns. Those insns mark where we should emit .begin_brtab and
8952 .end_brtab directives when using GAS. This allows for better link
8953 time optimizations. */
8955 static void
8956 pa_reorg (void)
8958 rtx insn;
8960 remove_useless_addtr_insns (1);
8962 if (pa_cpu < PROCESSOR_8000)
8963 pa_combine_instructions ();
8965 /* Still need brtab marker insns. FIXME: the presence of these
8966 markers disables output of the branch table to readonly memory,
8967 and any alignment directives that might be needed. Possibly,
8968 the begin_brtab insn should be output before the label for the
8969 table. This doesn't matter at the moment since the tables are
8970 always output in the text section. */
8971 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8973 /* Find an ADDR_VEC insn. */
8974 if (! JUMP_TABLE_DATA_P (insn))
8975 continue;
8977 /* Now generate markers for the beginning and end of the
8978 branch table. */
8979 emit_insn_before (gen_begin_brtab (), insn);
8980 emit_insn_after (gen_end_brtab (), insn);
8984 /* The PA has a number of odd instructions which can perform multiple
8985 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8986 it may be profitable to combine two instructions into one instruction
8987 with two outputs. It's not profitable PA2.0 machines because the
8988 two outputs would take two slots in the reorder buffers.
8990 This routine finds instructions which can be combined and combines
8991 them. We only support some of the potential combinations, and we
8992 only try common ways to find suitable instructions.
8994 * addb can add two registers or a register and a small integer
8995 and jump to a nearby (+-8k) location. Normally the jump to the
8996 nearby location is conditional on the result of the add, but by
8997 using the "true" condition we can make the jump unconditional.
8998 Thus addb can perform two independent operations in one insn.
9000 * movb is similar to addb in that it can perform a reg->reg
9001 or small immediate->reg copy and jump to a nearby (+-8k location).
9003 * fmpyadd and fmpysub can perform a FP multiply and either an
9004 FP add or FP sub if the operands of the multiply and add/sub are
9005 independent (there are other minor restrictions). Note both
9006 the fmpy and fadd/fsub can in theory move to better spots according
9007 to data dependencies, but for now we require the fmpy stay at a
9008 fixed location.
9010 * Many of the memory operations can perform pre & post updates
9011 of index registers. GCC's pre/post increment/decrement addressing
9012 is far too simple to take advantage of all the possibilities. This
9013 pass may not be suitable since those insns may not be independent.
9015 * comclr can compare two ints or an int and a register, nullify
9016 the following instruction and zero some other register. This
9017 is more difficult to use as it's harder to find an insn which
9018 will generate a comclr than finding something like an unconditional
9019 branch. (conditional moves & long branches create comclr insns).
9021 * Most arithmetic operations can conditionally skip the next
9022 instruction. They can be viewed as "perform this operation
9023 and conditionally jump to this nearby location" (where nearby
9024 is an insns away). These are difficult to use due to the
9025 branch length restrictions. */
9027 static void
9028 pa_combine_instructions (void)
9030 rtx anchor, new_rtx;
9032 /* This can get expensive since the basic algorithm is on the
9033 order of O(n^2) (or worse). Only do it for -O2 or higher
9034 levels of optimization. */
9035 if (optimize < 2)
9036 return;
9038 /* Walk down the list of insns looking for "anchor" insns which
9039 may be combined with "floating" insns. As the name implies,
9040 "anchor" instructions don't move, while "floating" insns may
9041 move around. */
9042 new_rtx = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
9043 new_rtx = make_insn_raw (new_rtx);
9045 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
9047 enum attr_pa_combine_type anchor_attr;
9048 enum attr_pa_combine_type floater_attr;
9050 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
9051 Also ignore any special USE insns. */
9052 if ((! NONJUMP_INSN_P (anchor) && ! JUMP_P (anchor) && ! CALL_P (anchor))
9053 || GET_CODE (PATTERN (anchor)) == USE
9054 || GET_CODE (PATTERN (anchor)) == CLOBBER)
9055 continue;
9057 anchor_attr = get_attr_pa_combine_type (anchor);
9058 /* See if anchor is an insn suitable for combination. */
9059 if (anchor_attr == PA_COMBINE_TYPE_FMPY
9060 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
9061 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9062 && ! forward_branch_p (anchor)))
9064 rtx floater;
9066 for (floater = PREV_INSN (anchor);
9067 floater;
9068 floater = PREV_INSN (floater))
9070 if (NOTE_P (floater)
9071 || (NONJUMP_INSN_P (floater)
9072 && (GET_CODE (PATTERN (floater)) == USE
9073 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9074 continue;
9076 /* Anything except a regular INSN will stop our search. */
9077 if (! NONJUMP_INSN_P (floater))
9079 floater = NULL_RTX;
9080 break;
9083 /* See if FLOATER is suitable for combination with the
9084 anchor. */
9085 floater_attr = get_attr_pa_combine_type (floater);
9086 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9087 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9088 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9089 && floater_attr == PA_COMBINE_TYPE_FMPY))
9091 /* If ANCHOR and FLOATER can be combined, then we're
9092 done with this pass. */
9093 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9094 SET_DEST (PATTERN (floater)),
9095 XEXP (SET_SRC (PATTERN (floater)), 0),
9096 XEXP (SET_SRC (PATTERN (floater)), 1)))
9097 break;
9100 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9101 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
9103 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
9105 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9106 SET_DEST (PATTERN (floater)),
9107 XEXP (SET_SRC (PATTERN (floater)), 0),
9108 XEXP (SET_SRC (PATTERN (floater)), 1)))
9109 break;
9111 else
9113 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9114 SET_DEST (PATTERN (floater)),
9115 SET_SRC (PATTERN (floater)),
9116 SET_SRC (PATTERN (floater))))
9117 break;
9122 /* If we didn't find anything on the backwards scan try forwards. */
9123 if (!floater
9124 && (anchor_attr == PA_COMBINE_TYPE_FMPY
9125 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
9127 for (floater = anchor; floater; floater = NEXT_INSN (floater))
9129 if (NOTE_P (floater)
9130 || (NONJUMP_INSN_P (floater)
9131 && (GET_CODE (PATTERN (floater)) == USE
9132 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9134 continue;
9136 /* Anything except a regular INSN will stop our search. */
9137 if (! NONJUMP_INSN_P (floater))
9139 floater = NULL_RTX;
9140 break;
9143 /* See if FLOATER is suitable for combination with the
9144 anchor. */
9145 floater_attr = get_attr_pa_combine_type (floater);
9146 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9147 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9148 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9149 && floater_attr == PA_COMBINE_TYPE_FMPY))
9151 /* If ANCHOR and FLOATER can be combined, then we're
9152 done with this pass. */
9153 if (pa_can_combine_p (new_rtx, anchor, floater, 1,
9154 SET_DEST (PATTERN (floater)),
9155 XEXP (SET_SRC (PATTERN (floater)),
9157 XEXP (SET_SRC (PATTERN (floater)),
9158 1)))
9159 break;
9164 /* FLOATER will be nonzero if we found a suitable floating
9165 insn for combination with ANCHOR. */
9166 if (floater
9167 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9168 || anchor_attr == PA_COMBINE_TYPE_FMPY))
9170 /* Emit the new instruction and delete the old anchor. */
9171 emit_insn_before (gen_rtx_PARALLEL
9172 (VOIDmode,
9173 gen_rtvec (2, PATTERN (anchor),
9174 PATTERN (floater))),
9175 anchor);
9177 SET_INSN_DELETED (anchor);
9179 /* Emit a special USE insn for FLOATER, then delete
9180 the floating insn. */
9181 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9182 delete_insn (floater);
9184 continue;
9186 else if (floater
9187 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
9189 rtx temp;
9190 /* Emit the new_jump instruction and delete the old anchor. */
9191 temp
9192 = emit_jump_insn_before (gen_rtx_PARALLEL
9193 (VOIDmode,
9194 gen_rtvec (2, PATTERN (anchor),
9195 PATTERN (floater))),
9196 anchor);
9198 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
9199 SET_INSN_DELETED (anchor);
9201 /* Emit a special USE insn for FLOATER, then delete
9202 the floating insn. */
9203 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9204 delete_insn (floater);
9205 continue;
9211 static int
9212 pa_can_combine_p (rtx new_rtx, rtx anchor, rtx floater, int reversed, rtx dest,
9213 rtx src1, rtx src2)
9215 int insn_code_number;
9216 rtx start, end;
9218 /* Create a PARALLEL with the patterns of ANCHOR and
9219 FLOATER, try to recognize it, then test constraints
9220 for the resulting pattern.
9222 If the pattern doesn't match or the constraints
9223 aren't met keep searching for a suitable floater
9224 insn. */
9225 XVECEXP (PATTERN (new_rtx), 0, 0) = PATTERN (anchor);
9226 XVECEXP (PATTERN (new_rtx), 0, 1) = PATTERN (floater);
9227 INSN_CODE (new_rtx) = -1;
9228 insn_code_number = recog_memoized (new_rtx);
9229 if (insn_code_number < 0
9230 || (extract_insn (new_rtx), ! constrain_operands (1)))
9231 return 0;
9233 if (reversed)
9235 start = anchor;
9236 end = floater;
9238 else
9240 start = floater;
9241 end = anchor;
9244 /* There's up to three operands to consider. One
9245 output and two inputs.
9247 The output must not be used between FLOATER & ANCHOR
9248 exclusive. The inputs must not be set between
9249 FLOATER and ANCHOR exclusive. */
9251 if (reg_used_between_p (dest, start, end))
9252 return 0;
9254 if (reg_set_between_p (src1, start, end))
9255 return 0;
9257 if (reg_set_between_p (src2, start, end))
9258 return 0;
9260 /* If we get here, then everything is good. */
9261 return 1;
9264 /* Return nonzero if references for INSN are delayed.
9266 Millicode insns are actually function calls with some special
9267 constraints on arguments and register usage.
9269 Millicode calls always expect their arguments in the integer argument
9270 registers, and always return their result in %r29 (ret1). They
9271 are expected to clobber their arguments, %r1, %r29, and the return
9272 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9274 This function tells reorg that the references to arguments and
9275 millicode calls do not appear to happen until after the millicode call.
9276 This allows reorg to put insns which set the argument registers into the
9277 delay slot of the millicode call -- thus they act more like traditional
9278 CALL_INSNs.
9280 Note we cannot consider side effects of the insn to be delayed because
9281 the branch and link insn will clobber the return pointer. If we happened
9282 to use the return pointer in the delay slot of the call, then we lose.
9284 get_attr_type will try to recognize the given insn, so make sure to
9285 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9286 in particular. */
9288 pa_insn_refs_are_delayed (rtx insn)
9290 return ((NONJUMP_INSN_P (insn)
9291 && GET_CODE (PATTERN (insn)) != SEQUENCE
9292 && GET_CODE (PATTERN (insn)) != USE
9293 && GET_CODE (PATTERN (insn)) != CLOBBER
9294 && get_attr_type (insn) == TYPE_MILLI));
9297 /* Promote the return value, but not the arguments. */
9299 static enum machine_mode
9300 pa_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
9301 enum machine_mode mode,
9302 int *punsignedp ATTRIBUTE_UNUSED,
9303 const_tree fntype ATTRIBUTE_UNUSED,
9304 int for_return)
9306 if (for_return == 0)
9307 return mode;
9308 return promote_mode (type, mode, punsignedp);
9311 /* On the HP-PA the value is found in register(s) 28(-29), unless
9312 the mode is SF or DF. Then the value is returned in fr4 (32).
9314 This must perform the same promotions as PROMOTE_MODE, else promoting
9315 return values in TARGET_PROMOTE_FUNCTION_MODE will not work correctly.
9317 Small structures must be returned in a PARALLEL on PA64 in order
9318 to match the HP Compiler ABI. */
9320 static rtx
9321 pa_function_value (const_tree valtype,
9322 const_tree func ATTRIBUTE_UNUSED,
9323 bool outgoing ATTRIBUTE_UNUSED)
9325 enum machine_mode valmode;
9327 if (AGGREGATE_TYPE_P (valtype)
9328 || TREE_CODE (valtype) == COMPLEX_TYPE
9329 || TREE_CODE (valtype) == VECTOR_TYPE)
9331 if (TARGET_64BIT)
9333 /* Aggregates with a size less than or equal to 128 bits are
9334 returned in GR 28(-29). They are left justified. The pad
9335 bits are undefined. Larger aggregates are returned in
9336 memory. */
9337 rtx loc[2];
9338 int i, offset = 0;
9339 int ub = int_size_in_bytes (valtype) <= UNITS_PER_WORD ? 1 : 2;
9341 for (i = 0; i < ub; i++)
9343 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9344 gen_rtx_REG (DImode, 28 + i),
9345 GEN_INT (offset));
9346 offset += 8;
9349 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9351 else if (int_size_in_bytes (valtype) > UNITS_PER_WORD)
9353 /* Aggregates 5 to 8 bytes in size are returned in general
9354 registers r28-r29 in the same manner as other non
9355 floating-point objects. The data is right-justified and
9356 zero-extended to 64 bits. This is opposite to the normal
9357 justification used on big endian targets and requires
9358 special treatment. */
9359 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9360 gen_rtx_REG (DImode, 28), const0_rtx);
9361 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9365 if ((INTEGRAL_TYPE_P (valtype)
9366 && GET_MODE_BITSIZE (TYPE_MODE (valtype)) < BITS_PER_WORD)
9367 || POINTER_TYPE_P (valtype))
9368 valmode = word_mode;
9369 else
9370 valmode = TYPE_MODE (valtype);
9372 if (TREE_CODE (valtype) == REAL_TYPE
9373 && !AGGREGATE_TYPE_P (valtype)
9374 && TYPE_MODE (valtype) != TFmode
9375 && !TARGET_SOFT_FLOAT)
9376 return gen_rtx_REG (valmode, 32);
9378 return gen_rtx_REG (valmode, 28);
9381 /* Implement the TARGET_LIBCALL_VALUE hook. */
9383 static rtx
9384 pa_libcall_value (enum machine_mode mode,
9385 const_rtx fun ATTRIBUTE_UNUSED)
9387 if (! TARGET_SOFT_FLOAT
9388 && (mode == SFmode || mode == DFmode))
9389 return gen_rtx_REG (mode, 32);
9390 else
9391 return gen_rtx_REG (mode, 28);
9394 /* Implement the TARGET_FUNCTION_VALUE_REGNO_P hook. */
9396 static bool
9397 pa_function_value_regno_p (const unsigned int regno)
9399 if (regno == 28
9400 || (! TARGET_SOFT_FLOAT && regno == 32))
9401 return true;
9403 return false;
9406 /* Update the data in CUM to advance over an argument
9407 of mode MODE and data type TYPE.
9408 (TYPE is null for libcalls where that information may not be available.) */
9410 static void
9411 pa_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
9412 const_tree type, bool named ATTRIBUTE_UNUSED)
9414 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9415 int arg_size = FUNCTION_ARG_SIZE (mode, type);
9417 cum->nargs_prototype--;
9418 cum->words += (arg_size
9419 + ((cum->words & 01)
9420 && type != NULL_TREE
9421 && arg_size > 1));
9424 /* Return the location of a parameter that is passed in a register or NULL
9425 if the parameter has any component that is passed in memory.
9427 This is new code and will be pushed to into the net sources after
9428 further testing.
9430 ??? We might want to restructure this so that it looks more like other
9431 ports. */
9432 static rtx
9433 pa_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
9434 const_tree type, bool named ATTRIBUTE_UNUSED)
9436 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9437 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9438 int alignment = 0;
9439 int arg_size;
9440 int fpr_reg_base;
9441 int gpr_reg_base;
9442 rtx retval;
9444 if (mode == VOIDmode)
9445 return NULL_RTX;
9447 arg_size = FUNCTION_ARG_SIZE (mode, type);
9449 /* If this arg would be passed partially or totally on the stack, then
9450 this routine should return zero. pa_arg_partial_bytes will
9451 handle arguments which are split between regs and stack slots if
9452 the ABI mandates split arguments. */
9453 if (!TARGET_64BIT)
9455 /* The 32-bit ABI does not split arguments. */
9456 if (cum->words + arg_size > max_arg_words)
9457 return NULL_RTX;
9459 else
9461 if (arg_size > 1)
9462 alignment = cum->words & 1;
9463 if (cum->words + alignment >= max_arg_words)
9464 return NULL_RTX;
9467 /* The 32bit ABIs and the 64bit ABIs are rather different,
9468 particularly in their handling of FP registers. We might
9469 be able to cleverly share code between them, but I'm not
9470 going to bother in the hope that splitting them up results
9471 in code that is more easily understood. */
9473 if (TARGET_64BIT)
9475 /* Advance the base registers to their current locations.
9477 Remember, gprs grow towards smaller register numbers while
9478 fprs grow to higher register numbers. Also remember that
9479 although FP regs are 32-bit addressable, we pretend that
9480 the registers are 64-bits wide. */
9481 gpr_reg_base = 26 - cum->words;
9482 fpr_reg_base = 32 + cum->words;
9484 /* Arguments wider than one word and small aggregates need special
9485 treatment. */
9486 if (arg_size > 1
9487 || mode == BLKmode
9488 || (type && (AGGREGATE_TYPE_P (type)
9489 || TREE_CODE (type) == COMPLEX_TYPE
9490 || TREE_CODE (type) == VECTOR_TYPE)))
9492 /* Double-extended precision (80-bit), quad-precision (128-bit)
9493 and aggregates including complex numbers are aligned on
9494 128-bit boundaries. The first eight 64-bit argument slots
9495 are associated one-to-one, with general registers r26
9496 through r19, and also with floating-point registers fr4
9497 through fr11. Arguments larger than one word are always
9498 passed in general registers.
9500 Using a PARALLEL with a word mode register results in left
9501 justified data on a big-endian target. */
9503 rtx loc[8];
9504 int i, offset = 0, ub = arg_size;
9506 /* Align the base register. */
9507 gpr_reg_base -= alignment;
9509 ub = MIN (ub, max_arg_words - cum->words - alignment);
9510 for (i = 0; i < ub; i++)
9512 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9513 gen_rtx_REG (DImode, gpr_reg_base),
9514 GEN_INT (offset));
9515 gpr_reg_base -= 1;
9516 offset += 8;
9519 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9522 else
9524 /* If the argument is larger than a word, then we know precisely
9525 which registers we must use. */
9526 if (arg_size > 1)
9528 if (cum->words)
9530 gpr_reg_base = 23;
9531 fpr_reg_base = 38;
9533 else
9535 gpr_reg_base = 25;
9536 fpr_reg_base = 34;
9539 /* Structures 5 to 8 bytes in size are passed in the general
9540 registers in the same manner as other non floating-point
9541 objects. The data is right-justified and zero-extended
9542 to 64 bits. This is opposite to the normal justification
9543 used on big endian targets and requires special treatment.
9544 We now define BLOCK_REG_PADDING to pad these objects.
9545 Aggregates, complex and vector types are passed in the same
9546 manner as structures. */
9547 if (mode == BLKmode
9548 || (type && (AGGREGATE_TYPE_P (type)
9549 || TREE_CODE (type) == COMPLEX_TYPE
9550 || TREE_CODE (type) == VECTOR_TYPE)))
9552 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9553 gen_rtx_REG (DImode, gpr_reg_base),
9554 const0_rtx);
9555 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9558 else
9560 /* We have a single word (32 bits). A simple computation
9561 will get us the register #s we need. */
9562 gpr_reg_base = 26 - cum->words;
9563 fpr_reg_base = 32 + 2 * cum->words;
9567 /* Determine if the argument needs to be passed in both general and
9568 floating point registers. */
9569 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9570 /* If we are doing soft-float with portable runtime, then there
9571 is no need to worry about FP regs. */
9572 && !TARGET_SOFT_FLOAT
9573 /* The parameter must be some kind of scalar float, else we just
9574 pass it in integer registers. */
9575 && GET_MODE_CLASS (mode) == MODE_FLOAT
9576 /* The target function must not have a prototype. */
9577 && cum->nargs_prototype <= 0
9578 /* libcalls do not need to pass items in both FP and general
9579 registers. */
9580 && type != NULL_TREE
9581 /* All this hair applies to "outgoing" args only. This includes
9582 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9583 && !cum->incoming)
9584 /* Also pass outgoing floating arguments in both registers in indirect
9585 calls with the 32 bit ABI and the HP assembler since there is no
9586 way to the specify argument locations in static functions. */
9587 || (!TARGET_64BIT
9588 && !TARGET_GAS
9589 && !cum->incoming
9590 && cum->indirect
9591 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9593 retval
9594 = gen_rtx_PARALLEL
9595 (mode,
9596 gen_rtvec (2,
9597 gen_rtx_EXPR_LIST (VOIDmode,
9598 gen_rtx_REG (mode, fpr_reg_base),
9599 const0_rtx),
9600 gen_rtx_EXPR_LIST (VOIDmode,
9601 gen_rtx_REG (mode, gpr_reg_base),
9602 const0_rtx)));
9604 else
9606 /* See if we should pass this parameter in a general register. */
9607 if (TARGET_SOFT_FLOAT
9608 /* Indirect calls in the normal 32bit ABI require all arguments
9609 to be passed in general registers. */
9610 || (!TARGET_PORTABLE_RUNTIME
9611 && !TARGET_64BIT
9612 && !TARGET_ELF32
9613 && cum->indirect)
9614 /* If the parameter is not a scalar floating-point parameter,
9615 then it belongs in GPRs. */
9616 || GET_MODE_CLASS (mode) != MODE_FLOAT
9617 /* Structure with single SFmode field belongs in GPR. */
9618 || (type && AGGREGATE_TYPE_P (type)))
9619 retval = gen_rtx_REG (mode, gpr_reg_base);
9620 else
9621 retval = gen_rtx_REG (mode, fpr_reg_base);
9623 return retval;
9626 /* Arguments larger than one word are double word aligned. */
9628 static unsigned int
9629 pa_function_arg_boundary (enum machine_mode mode, const_tree type)
9631 bool singleword = (type
9632 ? (integer_zerop (TYPE_SIZE (type))
9633 || !TREE_CONSTANT (TYPE_SIZE (type))
9634 || int_size_in_bytes (type) <= UNITS_PER_WORD)
9635 : GET_MODE_SIZE (mode) <= UNITS_PER_WORD);
9637 return singleword ? PARM_BOUNDARY : MAX_PARM_BOUNDARY;
9640 /* If this arg would be passed totally in registers or totally on the stack,
9641 then this routine should return zero. */
9643 static int
9644 pa_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
9645 tree type, bool named ATTRIBUTE_UNUSED)
9647 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9648 unsigned int max_arg_words = 8;
9649 unsigned int offset = 0;
9651 if (!TARGET_64BIT)
9652 return 0;
9654 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9655 offset = 1;
9657 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9658 /* Arg fits fully into registers. */
9659 return 0;
9660 else if (cum->words + offset >= max_arg_words)
9661 /* Arg fully on the stack. */
9662 return 0;
9663 else
9664 /* Arg is split. */
9665 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9669 /* A get_unnamed_section callback for switching to the text section.
9671 This function is only used with SOM. Because we don't support
9672 named subspaces, we can only create a new subspace or switch back
9673 to the default text subspace. */
9675 static void
9676 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9678 gcc_assert (TARGET_SOM);
9679 if (TARGET_GAS)
9681 if (cfun && cfun->machine && !cfun->machine->in_nsubspa)
9683 /* We only want to emit a .nsubspa directive once at the
9684 start of the function. */
9685 cfun->machine->in_nsubspa = 1;
9687 /* Create a new subspace for the text. This provides
9688 better stub placement and one-only functions. */
9689 if (cfun->decl
9690 && DECL_ONE_ONLY (cfun->decl)
9691 && !DECL_WEAK (cfun->decl))
9693 output_section_asm_op ("\t.SPACE $TEXT$\n"
9694 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9695 "ACCESS=44,SORT=24,COMDAT");
9696 return;
9699 else
9701 /* There isn't a current function or the body of the current
9702 function has been completed. So, we are changing to the
9703 text section to output debugging information. Thus, we
9704 need to forget that we are in the text section so that
9705 varasm.c will call us when text_section is selected again. */
9706 gcc_assert (!cfun || !cfun->machine
9707 || cfun->machine->in_nsubspa == 2);
9708 in_section = NULL;
9710 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9711 return;
9713 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9716 /* A get_unnamed_section callback for switching to comdat data
9717 sections. This function is only used with SOM. */
9719 static void
9720 som_output_comdat_data_section_asm_op (const void *data)
9722 in_section = NULL;
9723 output_section_asm_op (data);
9726 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9728 static void
9729 pa_som_asm_init_sections (void)
9731 text_section
9732 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9734 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9735 is not being generated. */
9736 som_readonly_data_section
9737 = get_unnamed_section (0, output_section_asm_op,
9738 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9740 /* When secondary definitions are not supported, SOM makes readonly
9741 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9742 the comdat flag. */
9743 som_one_only_readonly_data_section
9744 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9745 "\t.SPACE $TEXT$\n"
9746 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9747 "ACCESS=0x2c,SORT=16,COMDAT");
9750 /* When secondary definitions are not supported, SOM makes data one-only
9751 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9752 som_one_only_data_section
9753 = get_unnamed_section (SECTION_WRITE,
9754 som_output_comdat_data_section_asm_op,
9755 "\t.SPACE $PRIVATE$\n"
9756 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9757 "ACCESS=31,SORT=24,COMDAT");
9759 if (flag_tm)
9760 som_tm_clone_table_section
9761 = get_unnamed_section (0, output_section_asm_op,
9762 "\t.SPACE $PRIVATE$\n\t.SUBSPA $TM_CLONE_TABLE$");
9764 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9765 which reference data within the $TEXT$ space (for example constant
9766 strings in the $LIT$ subspace).
9768 The assemblers (GAS and HP as) both have problems with handling
9769 the difference of two symbols which is the other correct way to
9770 reference constant data during PIC code generation.
9772 So, there's no way to reference constant data which is in the
9773 $TEXT$ space during PIC generation. Instead place all constant
9774 data into the $PRIVATE$ subspace (this reduces sharing, but it
9775 works correctly). */
9776 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9778 /* We must not have a reference to an external symbol defined in a
9779 shared library in a readonly section, else the SOM linker will
9780 complain.
9782 So, we force exception information into the data section. */
9783 exception_section = data_section;
9786 /* Implement TARGET_ASM_TM_CLONE_TABLE_SECTION. */
9788 static section *
9789 pa_som_tm_clone_table_section (void)
9791 return som_tm_clone_table_section;
9794 /* On hpux10, the linker will give an error if we have a reference
9795 in the read-only data section to a symbol defined in a shared
9796 library. Therefore, expressions that might require a reloc can
9797 not be placed in the read-only data section. */
9799 static section *
9800 pa_select_section (tree exp, int reloc,
9801 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9803 if (TREE_CODE (exp) == VAR_DECL
9804 && TREE_READONLY (exp)
9805 && !TREE_THIS_VOLATILE (exp)
9806 && DECL_INITIAL (exp)
9807 && (DECL_INITIAL (exp) == error_mark_node
9808 || TREE_CONSTANT (DECL_INITIAL (exp)))
9809 && !reloc)
9811 if (TARGET_SOM
9812 && DECL_ONE_ONLY (exp)
9813 && !DECL_WEAK (exp))
9814 return som_one_only_readonly_data_section;
9815 else
9816 return readonly_data_section;
9818 else if (CONSTANT_CLASS_P (exp) && !reloc)
9819 return readonly_data_section;
9820 else if (TARGET_SOM
9821 && TREE_CODE (exp) == VAR_DECL
9822 && DECL_ONE_ONLY (exp)
9823 && !DECL_WEAK (exp))
9824 return som_one_only_data_section;
9825 else
9826 return data_section;
9829 static void
9830 pa_globalize_label (FILE *stream, const char *name)
9832 /* We only handle DATA objects here, functions are globalized in
9833 ASM_DECLARE_FUNCTION_NAME. */
9834 if (! FUNCTION_NAME_P (name))
9836 fputs ("\t.EXPORT ", stream);
9837 assemble_name (stream, name);
9838 fputs (",DATA\n", stream);
9842 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9844 static rtx
9845 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9846 int incoming ATTRIBUTE_UNUSED)
9848 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9851 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9853 bool
9854 pa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9856 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9857 PA64 ABI says that objects larger than 128 bits are returned in memory.
9858 Note, int_size_in_bytes can return -1 if the size of the object is
9859 variable or larger than the maximum value that can be expressed as
9860 a HOST_WIDE_INT. It can also return zero for an empty type. The
9861 simplest way to handle variable and empty types is to pass them in
9862 memory. This avoids problems in defining the boundaries of argument
9863 slots, allocating registers, etc. */
9864 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9865 || int_size_in_bytes (type) <= 0);
9868 /* Structure to hold declaration and name of external symbols that are
9869 emitted by GCC. We generate a vector of these symbols and output them
9870 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9871 This avoids putting out names that are never really used. */
9873 typedef struct GTY(()) extern_symbol
9875 tree decl;
9876 const char *name;
9877 } extern_symbol;
9879 /* Define gc'd vector type for extern_symbol. */
9881 /* Vector of extern_symbol pointers. */
9882 static GTY(()) vec<extern_symbol, va_gc> *extern_symbols;
9884 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9885 /* Mark DECL (name NAME) as an external reference (assembler output
9886 file FILE). This saves the names to output at the end of the file
9887 if actually referenced. */
9889 void
9890 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9892 gcc_assert (file == asm_out_file);
9893 extern_symbol p = {decl, name};
9894 vec_safe_push (extern_symbols, p);
9897 /* Output text required at the end of an assembler file.
9898 This includes deferred plabels and .import directives for
9899 all external symbols that were actually referenced. */
9901 static void
9902 pa_hpux_file_end (void)
9904 unsigned int i;
9905 extern_symbol *p;
9907 if (!NO_DEFERRED_PROFILE_COUNTERS)
9908 output_deferred_profile_counters ();
9910 output_deferred_plabels ();
9912 for (i = 0; vec_safe_iterate (extern_symbols, i, &p); i++)
9914 tree decl = p->decl;
9916 if (!TREE_ASM_WRITTEN (decl)
9917 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
9918 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
9921 vec_free (extern_symbols);
9923 #endif
9925 /* Return true if a change from mode FROM to mode TO for a register
9926 in register class RCLASS is invalid. */
9928 bool
9929 pa_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
9930 enum reg_class rclass)
9932 if (from == to)
9933 return false;
9935 /* Reject changes to/from complex and vector modes. */
9936 if (COMPLEX_MODE_P (from) || VECTOR_MODE_P (from)
9937 || COMPLEX_MODE_P (to) || VECTOR_MODE_P (to))
9938 return true;
9940 if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to))
9941 return false;
9943 /* There is no way to load QImode or HImode values directly from
9944 memory. SImode loads to the FP registers are not zero extended.
9945 On the 64-bit target, this conflicts with the definition of
9946 LOAD_EXTEND_OP. Thus, we can't allow changing between modes
9947 with different sizes in the floating-point registers. */
9948 if (MAYBE_FP_REG_CLASS_P (rclass))
9949 return true;
9951 /* HARD_REGNO_MODE_OK places modes with sizes larger than a word
9952 in specific sets of registers. Thus, we cannot allow changing
9953 to a larger mode when it's larger than a word. */
9954 if (GET_MODE_SIZE (to) > UNITS_PER_WORD
9955 && GET_MODE_SIZE (to) > GET_MODE_SIZE (from))
9956 return true;
9958 return false;
9961 /* Returns TRUE if it is a good idea to tie two pseudo registers
9962 when one has mode MODE1 and one has mode MODE2.
9963 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
9964 for any hard reg, then this must be FALSE for correct output.
9966 We should return FALSE for QImode and HImode because these modes
9967 are not ok in the floating-point registers. However, this prevents
9968 tieing these modes to SImode and DImode in the general registers.
9969 So, this isn't a good idea. We rely on HARD_REGNO_MODE_OK and
9970 CANNOT_CHANGE_MODE_CLASS to prevent these modes from being used
9971 in the floating-point registers. */
9973 bool
9974 pa_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
9976 /* Don't tie modes in different classes. */
9977 if (GET_MODE_CLASS (mode1) != GET_MODE_CLASS (mode2))
9978 return false;
9980 return true;
9984 /* Length in units of the trampoline instruction code. */
9986 #define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 32 : 40))
9989 /* Output assembler code for a block containing the constant parts
9990 of a trampoline, leaving space for the variable parts.\
9992 The trampoline sets the static chain pointer to STATIC_CHAIN_REGNUM
9993 and then branches to the specified routine.
9995 This code template is copied from text segment to stack location
9996 and then patched with pa_trampoline_init to contain valid values,
9997 and then entered as a subroutine.
9999 It is best to keep this as small as possible to avoid having to
10000 flush multiple lines in the cache. */
10002 static void
10003 pa_asm_trampoline_template (FILE *f)
10005 if (!TARGET_64BIT)
10007 fputs ("\tldw 36(%r22),%r21\n", f);
10008 fputs ("\tbb,>=,n %r21,30,.+16\n", f);
10009 if (ASSEMBLER_DIALECT == 0)
10010 fputs ("\tdepi 0,31,2,%r21\n", f);
10011 else
10012 fputs ("\tdepwi 0,31,2,%r21\n", f);
10013 fputs ("\tldw 4(%r21),%r19\n", f);
10014 fputs ("\tldw 0(%r21),%r21\n", f);
10015 if (TARGET_PA_20)
10017 fputs ("\tbve (%r21)\n", f);
10018 fputs ("\tldw 40(%r22),%r29\n", f);
10019 fputs ("\t.word 0\n", f);
10020 fputs ("\t.word 0\n", f);
10022 else
10024 fputs ("\tldsid (%r21),%r1\n", f);
10025 fputs ("\tmtsp %r1,%sr0\n", f);
10026 fputs ("\tbe 0(%sr0,%r21)\n", f);
10027 fputs ("\tldw 40(%r22),%r29\n", f);
10029 fputs ("\t.word 0\n", f);
10030 fputs ("\t.word 0\n", f);
10031 fputs ("\t.word 0\n", f);
10032 fputs ("\t.word 0\n", f);
10034 else
10036 fputs ("\t.dword 0\n", f);
10037 fputs ("\t.dword 0\n", f);
10038 fputs ("\t.dword 0\n", f);
10039 fputs ("\t.dword 0\n", f);
10040 fputs ("\tmfia %r31\n", f);
10041 fputs ("\tldd 24(%r31),%r1\n", f);
10042 fputs ("\tldd 24(%r1),%r27\n", f);
10043 fputs ("\tldd 16(%r1),%r1\n", f);
10044 fputs ("\tbve (%r1)\n", f);
10045 fputs ("\tldd 32(%r31),%r31\n", f);
10046 fputs ("\t.dword 0 ; fptr\n", f);
10047 fputs ("\t.dword 0 ; static link\n", f);
10051 /* Emit RTL insns to initialize the variable parts of a trampoline.
10052 FNADDR is an RTX for the address of the function's pure code.
10053 CXT is an RTX for the static chain value for the function.
10055 Move the function address to the trampoline template at offset 36.
10056 Move the static chain value to trampoline template at offset 40.
10057 Move the trampoline address to trampoline template at offset 44.
10058 Move r19 to trampoline template at offset 48. The latter two
10059 words create a plabel for the indirect call to the trampoline.
10061 A similar sequence is used for the 64-bit port but the plabel is
10062 at the beginning of the trampoline.
10064 Finally, the cache entries for the trampoline code are flushed.
10065 This is necessary to ensure that the trampoline instruction sequence
10066 is written to memory prior to any attempts at prefetching the code
10067 sequence. */
10069 static void
10070 pa_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
10072 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
10073 rtx start_addr = gen_reg_rtx (Pmode);
10074 rtx end_addr = gen_reg_rtx (Pmode);
10075 rtx line_length = gen_reg_rtx (Pmode);
10076 rtx r_tramp, tmp;
10078 emit_block_move (m_tramp, assemble_trampoline_template (),
10079 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
10080 r_tramp = force_reg (Pmode, XEXP (m_tramp, 0));
10082 if (!TARGET_64BIT)
10084 tmp = adjust_address (m_tramp, Pmode, 36);
10085 emit_move_insn (tmp, fnaddr);
10086 tmp = adjust_address (m_tramp, Pmode, 40);
10087 emit_move_insn (tmp, chain_value);
10089 /* Create a fat pointer for the trampoline. */
10090 tmp = adjust_address (m_tramp, Pmode, 44);
10091 emit_move_insn (tmp, r_tramp);
10092 tmp = adjust_address (m_tramp, Pmode, 48);
10093 emit_move_insn (tmp, gen_rtx_REG (Pmode, 19));
10095 /* fdc and fic only use registers for the address to flush,
10096 they do not accept integer displacements. We align the
10097 start and end addresses to the beginning of their respective
10098 cache lines to minimize the number of lines flushed. */
10099 emit_insn (gen_andsi3 (start_addr, r_tramp,
10100 GEN_INT (-MIN_CACHELINE_SIZE)));
10101 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp,
10102 TRAMPOLINE_CODE_SIZE-1));
10103 emit_insn (gen_andsi3 (end_addr, tmp,
10104 GEN_INT (-MIN_CACHELINE_SIZE)));
10105 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10106 emit_insn (gen_dcacheflushsi (start_addr, end_addr, line_length));
10107 emit_insn (gen_icacheflushsi (start_addr, end_addr, line_length,
10108 gen_reg_rtx (Pmode),
10109 gen_reg_rtx (Pmode)));
10111 else
10113 tmp = adjust_address (m_tramp, Pmode, 56);
10114 emit_move_insn (tmp, fnaddr);
10115 tmp = adjust_address (m_tramp, Pmode, 64);
10116 emit_move_insn (tmp, chain_value);
10118 /* Create a fat pointer for the trampoline. */
10119 tmp = adjust_address (m_tramp, Pmode, 16);
10120 emit_move_insn (tmp, force_reg (Pmode, plus_constant (Pmode,
10121 r_tramp, 32)));
10122 tmp = adjust_address (m_tramp, Pmode, 24);
10123 emit_move_insn (tmp, gen_rtx_REG (Pmode, 27));
10125 /* fdc and fic only use registers for the address to flush,
10126 they do not accept integer displacements. We align the
10127 start and end addresses to the beginning of their respective
10128 cache lines to minimize the number of lines flushed. */
10129 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp, 32));
10130 emit_insn (gen_anddi3 (start_addr, tmp,
10131 GEN_INT (-MIN_CACHELINE_SIZE)));
10132 tmp = force_reg (Pmode, plus_constant (Pmode, tmp,
10133 TRAMPOLINE_CODE_SIZE - 1));
10134 emit_insn (gen_anddi3 (end_addr, tmp,
10135 GEN_INT (-MIN_CACHELINE_SIZE)));
10136 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10137 emit_insn (gen_dcacheflushdi (start_addr, end_addr, line_length));
10138 emit_insn (gen_icacheflushdi (start_addr, end_addr, line_length,
10139 gen_reg_rtx (Pmode),
10140 gen_reg_rtx (Pmode)));
10143 #ifdef HAVE_ENABLE_EXECUTE_STACK
10144  emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
10145      LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
10146 #endif
10149 /* Perform any machine-specific adjustment in the address of the trampoline.
10150 ADDR contains the address that was passed to pa_trampoline_init.
10151 Adjust the trampoline address to point to the plabel at offset 44. */
10153 static rtx
10154 pa_trampoline_adjust_address (rtx addr)
10156 if (!TARGET_64BIT)
10157 addr = memory_address (Pmode, plus_constant (Pmode, addr, 46));
10158 return addr;
10161 static rtx
10162 pa_delegitimize_address (rtx orig_x)
10164 rtx x = delegitimize_mem_from_attrs (orig_x);
10166 if (GET_CODE (x) == LO_SUM
10167 && GET_CODE (XEXP (x, 1)) == UNSPEC
10168 && XINT (XEXP (x, 1), 1) == UNSPEC_DLTIND14R)
10169 return gen_const_mem (Pmode, XVECEXP (XEXP (x, 1), 0, 0));
10170 return x;
10173 static rtx
10174 pa_internal_arg_pointer (void)
10176 /* The argument pointer and the hard frame pointer are the same in
10177 the 32-bit runtime, so we don't need a copy. */
10178 if (TARGET_64BIT)
10179 return copy_to_reg (virtual_incoming_args_rtx);
10180 else
10181 return virtual_incoming_args_rtx;
10184 /* Given FROM and TO register numbers, say whether this elimination is allowed.
10185 Frame pointer elimination is automatically handled. */
10187 static bool
10188 pa_can_eliminate (const int from, const int to)
10190 /* The argument cannot be eliminated in the 64-bit runtime. */
10191 if (TARGET_64BIT && from == ARG_POINTER_REGNUM)
10192 return false;
10194 return (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
10195 ? ! frame_pointer_needed
10196 : true);
10199 /* Define the offset between two registers, FROM to be eliminated and its
10200 replacement TO, at the start of a routine. */
10201 HOST_WIDE_INT
10202 pa_initial_elimination_offset (int from, int to)
10204 HOST_WIDE_INT offset;
10206 if ((from == HARD_FRAME_POINTER_REGNUM || from == FRAME_POINTER_REGNUM)
10207 && to == STACK_POINTER_REGNUM)
10208 offset = -pa_compute_frame_size (get_frame_size (), 0);
10209 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10210 offset = 0;
10211 else
10212 gcc_unreachable ();
10214 return offset;
10217 static void
10218 pa_conditional_register_usage (void)
10220 int i;
10222 if (!TARGET_64BIT && !TARGET_PA_11)
10224 for (i = 56; i <= FP_REG_LAST; i++)
10225 fixed_regs[i] = call_used_regs[i] = 1;
10226 for (i = 33; i < 56; i += 2)
10227 fixed_regs[i] = call_used_regs[i] = 1;
10229 if (TARGET_DISABLE_FPREGS || TARGET_SOFT_FLOAT)
10231 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
10232 fixed_regs[i] = call_used_regs[i] = 1;
10234 if (flag_pic)
10235 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10238 /* Target hook for c_mode_for_suffix. */
10240 static enum machine_mode
10241 pa_c_mode_for_suffix (char suffix)
10243 if (HPUX_LONG_DOUBLE_LIBRARY)
10245 if (suffix == 'q')
10246 return TFmode;
10249 return VOIDmode;
10252 /* Target hook for function_section. */
10254 static section *
10255 pa_function_section (tree decl, enum node_frequency freq,
10256 bool startup, bool exit)
10258 /* Put functions in text section if target doesn't have named sections. */
10259 if (!targetm_common.have_named_sections)
10260 return text_section;
10262 /* Force nested functions into the same section as the containing
10263 function. */
10264 if (decl
10265 && DECL_SECTION_NAME (decl) == NULL
10266 && DECL_CONTEXT (decl) != NULL_TREE
10267 && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL
10268 && DECL_SECTION_NAME (DECL_CONTEXT (decl)) == NULL)
10269 return function_section (DECL_CONTEXT (decl));
10271 /* Otherwise, use the default function section. */
10272 return default_function_section (decl, freq, startup, exit);
10275 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
10277 In 64-bit mode, we reject CONST_DOUBLES. We also reject CONST_INTS
10278 that need more than three instructions to load prior to reload. This
10279 limit is somewhat arbitrary. It takes three instructions to load a
10280 CONST_INT from memory but two are memory accesses. It may be better
10281 to increase the allowed range for CONST_INTS. We may also be able
10282 to handle CONST_DOUBLES. */
10284 static bool
10285 pa_legitimate_constant_p (enum machine_mode mode, rtx x)
10287 if (GET_MODE_CLASS (mode) == MODE_FLOAT && x != CONST0_RTX (mode))
10288 return false;
10290 if (!NEW_HP_ASSEMBLER && !TARGET_GAS && GET_CODE (x) == LABEL_REF)
10291 return false;
10293 /* TLS_MODEL_GLOBAL_DYNAMIC and TLS_MODEL_LOCAL_DYNAMIC are not
10294 legitimate constants. The other variants can't be handled by
10295 the move patterns after reload starts. */
10296 if (pa_tls_referenced_p (x))
10297 return false;
10299 if (TARGET_64BIT && GET_CODE (x) == CONST_DOUBLE)
10300 return false;
10302 if (TARGET_64BIT
10303 && HOST_BITS_PER_WIDE_INT > 32
10304 && GET_CODE (x) == CONST_INT
10305 && !reload_in_progress
10306 && !reload_completed
10307 && !LEGITIMATE_64BIT_CONST_INT_P (INTVAL (x))
10308 && !pa_cint_ok_for_move (INTVAL (x)))
10309 return false;
10311 if (function_label_operand (x, mode))
10312 return false;
10314 return true;
10317 /* Implement TARGET_SECTION_TYPE_FLAGS. */
10319 static unsigned int
10320 pa_section_type_flags (tree decl, const char *name, int reloc)
10322 unsigned int flags;
10324 flags = default_section_type_flags (decl, name, reloc);
10326 /* Function labels are placed in the constant pool. This can
10327 cause a section conflict if decls are put in ".data.rel.ro"
10328 or ".data.rel.ro.local" using the __attribute__ construct. */
10329 if (strcmp (name, ".data.rel.ro") == 0
10330 || strcmp (name, ".data.rel.ro.local") == 0)
10331 flags |= SECTION_WRITE | SECTION_RELRO;
10333 return flags;
10336 /* pa_legitimate_address_p recognizes an RTL expression that is a
10337 valid memory address for an instruction. The MODE argument is the
10338 machine mode for the MEM expression that wants to use this address.
10340 On HP PA-RISC, the legitimate address forms are REG+SMALLINT,
10341 REG+REG, and REG+(REG*SCALE). The indexed address forms are only
10342 available with floating point loads and stores, and integer loads.
10343 We get better code by allowing indexed addresses in the initial
10344 RTL generation.
10346 The acceptance of indexed addresses as legitimate implies that we
10347 must provide patterns for doing indexed integer stores, or the move
10348 expanders must force the address of an indexed store to a register.
10349 We have adopted the latter approach.
10351 Another function of pa_legitimate_address_p is to ensure that
10352 the base register is a valid pointer for indexed instructions.
10353 On targets that have non-equivalent space registers, we have to
10354 know at the time of assembler output which register in a REG+REG
10355 pair is the base register. The REG_POINTER flag is sometimes lost
10356 in reload and the following passes, so it can't be relied on during
10357 code generation. Thus, we either have to canonicalize the order
10358 of the registers in REG+REG indexed addresses, or treat REG+REG
10359 addresses separately and provide patterns for both permutations.
10361 The latter approach requires several hundred additional lines of
10362 code in pa.md. The downside to canonicalizing is that a PLUS
10363 in the wrong order can't combine to form to make a scaled indexed
10364 memory operand. As we won't need to canonicalize the operands if
10365 the REG_POINTER lossage can be fixed, it seems better canonicalize.
10367 We initially break out scaled indexed addresses in canonical order
10368 in pa_emit_move_sequence. LEGITIMIZE_ADDRESS also canonicalizes
10369 scaled indexed addresses during RTL generation. However, fold_rtx
10370 has its own opinion on how the operands of a PLUS should be ordered.
10371 If one of the operands is equivalent to a constant, it will make
10372 that operand the second operand. As the base register is likely to
10373 be equivalent to a SYMBOL_REF, we have made it the second operand.
10375 pa_legitimate_address_p accepts REG+REG as legitimate when the
10376 operands are in the order INDEX+BASE on targets with non-equivalent
10377 space registers, and in any order on targets with equivalent space
10378 registers. It accepts both MULT+BASE and BASE+MULT for scaled indexing.
10380 We treat a SYMBOL_REF as legitimate if it is part of the current
10381 function's constant-pool, because such addresses can actually be
10382 output as REG+SMALLINT. */
10384 static bool
10385 pa_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
10387 if ((REG_P (x)
10388 && (strict ? STRICT_REG_OK_FOR_BASE_P (x)
10389 : REG_OK_FOR_BASE_P (x)))
10390 || ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_DEC
10391 || GET_CODE (x) == PRE_INC || GET_CODE (x) == POST_INC)
10392 && REG_P (XEXP (x, 0))
10393 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10394 : REG_OK_FOR_BASE_P (XEXP (x, 0)))))
10395 return true;
10397 if (GET_CODE (x) == PLUS)
10399 rtx base, index;
10401 /* For REG+REG, the base register should be in XEXP (x, 1),
10402 so check it first. */
10403 if (REG_P (XEXP (x, 1))
10404 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 1))
10405 : REG_OK_FOR_BASE_P (XEXP (x, 1))))
10406 base = XEXP (x, 1), index = XEXP (x, 0);
10407 else if (REG_P (XEXP (x, 0))
10408 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10409 : REG_OK_FOR_BASE_P (XEXP (x, 0))))
10410 base = XEXP (x, 0), index = XEXP (x, 1);
10411 else
10412 return false;
10414 if (GET_CODE (index) == CONST_INT)
10416 if (INT_5_BITS (index))
10417 return true;
10419 /* When INT14_OK_STRICT is false, a secondary reload is needed
10420 to adjust the displacement of SImode and DImode floating point
10421 instructions but this may fail when the register also needs
10422 reloading. So, we return false when STRICT is true. We
10423 also reject long displacements for float mode addresses since
10424 the majority of accesses will use floating point instructions
10425 that don't support 14-bit offsets. */
10426 if (!INT14_OK_STRICT
10427 && (strict || !(reload_in_progress || reload_completed))
10428 && mode != QImode
10429 && mode != HImode)
10430 return false;
10432 return base14_operand (index, mode);
10435 if (!TARGET_DISABLE_INDEXING
10436 /* Only accept the "canonical" INDEX+BASE operand order
10437 on targets with non-equivalent space registers. */
10438 && (TARGET_NO_SPACE_REGS
10439 ? REG_P (index)
10440 : (base == XEXP (x, 1) && REG_P (index)
10441 && (reload_completed
10442 || (reload_in_progress && HARD_REGISTER_P (base))
10443 || REG_POINTER (base))
10444 && (reload_completed
10445 || (reload_in_progress && HARD_REGISTER_P (index))
10446 || !REG_POINTER (index))))
10447 && MODE_OK_FOR_UNSCALED_INDEXING_P (mode)
10448 && (strict ? STRICT_REG_OK_FOR_INDEX_P (index)
10449 : REG_OK_FOR_INDEX_P (index))
10450 && borx_reg_operand (base, Pmode)
10451 && borx_reg_operand (index, Pmode))
10452 return true;
10454 if (!TARGET_DISABLE_INDEXING
10455 && GET_CODE (index) == MULT
10456 && MODE_OK_FOR_SCALED_INDEXING_P (mode)
10457 && REG_P (XEXP (index, 0))
10458 && GET_MODE (XEXP (index, 0)) == Pmode
10459 && (strict ? STRICT_REG_OK_FOR_INDEX_P (XEXP (index, 0))
10460 : REG_OK_FOR_INDEX_P (XEXP (index, 0)))
10461 && GET_CODE (XEXP (index, 1)) == CONST_INT
10462 && INTVAL (XEXP (index, 1))
10463 == (HOST_WIDE_INT) GET_MODE_SIZE (mode)
10464 && borx_reg_operand (base, Pmode))
10465 return true;
10467 return false;
10470 if (GET_CODE (x) == LO_SUM)
10472 rtx y = XEXP (x, 0);
10474 if (GET_CODE (y) == SUBREG)
10475 y = SUBREG_REG (y);
10477 if (REG_P (y)
10478 && (strict ? STRICT_REG_OK_FOR_BASE_P (y)
10479 : REG_OK_FOR_BASE_P (y)))
10481 /* Needed for -fPIC */
10482 if (mode == Pmode
10483 && GET_CODE (XEXP (x, 1)) == UNSPEC)
10484 return true;
10486 if (!INT14_OK_STRICT
10487 && (strict || !(reload_in_progress || reload_completed))
10488 && mode != QImode
10489 && mode != HImode)
10490 return false;
10492 if (CONSTANT_P (XEXP (x, 1)))
10493 return true;
10495 return false;
10498 if (GET_CODE (x) == CONST_INT && INT_5_BITS (x))
10499 return true;
10501 return false;
10504 /* Look for machine dependent ways to make the invalid address AD a
10505 valid address.
10507 For the PA, transform:
10509 memory(X + <large int>)
10511 into:
10513 if (<large int> & mask) >= 16
10514 Y = (<large int> & ~mask) + mask + 1 Round up.
10515 else
10516 Y = (<large int> & ~mask) Round down.
10517 Z = X + Y
10518 memory (Z + (<large int> - Y));
10520 This makes reload inheritance and reload_cse work better since Z
10521 can be reused.
10523 There may be more opportunities to improve code with this hook. */
10526 pa_legitimize_reload_address (rtx ad, enum machine_mode mode,
10527 int opnum, int type,
10528 int ind_levels ATTRIBUTE_UNUSED)
10530 long offset, newoffset, mask;
10531 rtx new_rtx, temp = NULL_RTX;
10533 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
10534 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
10536 if (optimize && GET_CODE (ad) == PLUS)
10537 temp = simplify_binary_operation (PLUS, Pmode,
10538 XEXP (ad, 0), XEXP (ad, 1));
10540 new_rtx = temp ? temp : ad;
10542 if (optimize
10543 && GET_CODE (new_rtx) == PLUS
10544 && GET_CODE (XEXP (new_rtx, 0)) == REG
10545 && GET_CODE (XEXP (new_rtx, 1)) == CONST_INT)
10547 offset = INTVAL (XEXP ((new_rtx), 1));
10549 /* Choose rounding direction. Round up if we are >= halfway. */
10550 if ((offset & mask) >= ((mask + 1) / 2))
10551 newoffset = (offset & ~mask) + mask + 1;
10552 else
10553 newoffset = offset & ~mask;
10555 /* Ensure that long displacements are aligned. */
10556 if (mask == 0x3fff
10557 && (GET_MODE_CLASS (mode) == MODE_FLOAT
10558 || (TARGET_64BIT && (mode) == DImode)))
10559 newoffset &= ~(GET_MODE_SIZE (mode) - 1);
10561 if (newoffset != 0 && VAL_14_BITS_P (newoffset))
10563 temp = gen_rtx_PLUS (Pmode, XEXP (new_rtx, 0),
10564 GEN_INT (newoffset));
10565 ad = gen_rtx_PLUS (Pmode, temp, GEN_INT (offset - newoffset));
10566 push_reload (XEXP (ad, 0), 0, &XEXP (ad, 0), 0,
10567 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
10568 opnum, (enum reload_type) type);
10569 return ad;
10573 return NULL_RTX;
10576 #include "gt-pa.h"