Merge from trunk:
[official-gcc.git] / main / gcc / config / pa / pa.c
blobd52d52f071d953c8dbbd93b459742275a6e5f1e8
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992-2014 Free Software Foundation, Inc.
3 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
31 #include "flags.h"
32 #include "tree.h"
33 #include "stor-layout.h"
34 #include "stringpool.h"
35 #include "varasm.h"
36 #include "calls.h"
37 #include "output.h"
38 #include "dbxout.h"
39 #include "except.h"
40 #include "expr.h"
41 #include "optabs.h"
42 #include "reload.h"
43 #include "function.h"
44 #include "diagnostic-core.h"
45 #include "ggc.h"
46 #include "recog.h"
47 #include "predict.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "common/common-target.h"
51 #include "target-def.h"
52 #include "langhooks.h"
53 #include "df.h"
54 #include "opts.h"
55 #include "builtins.h"
57 /* Return nonzero if there is a bypass for the output of
58 OUT_INSN and the fp store IN_INSN. */
59 int
60 pa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
62 enum machine_mode store_mode;
63 enum machine_mode other_mode;
64 rtx set;
66 if (recog_memoized (in_insn) < 0
67 || (get_attr_type (in_insn) != TYPE_FPSTORE
68 && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD)
69 || recog_memoized (out_insn) < 0)
70 return 0;
72 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
74 set = single_set (out_insn);
75 if (!set)
76 return 0;
78 other_mode = GET_MODE (SET_SRC (set));
80 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
84 #ifndef DO_FRAME_NOTES
85 #ifdef INCOMING_RETURN_ADDR_RTX
86 #define DO_FRAME_NOTES 1
87 #else
88 #define DO_FRAME_NOTES 0
89 #endif
90 #endif
92 static void pa_option_override (void);
93 static void copy_reg_pointer (rtx, rtx);
94 static void fix_range (const char *);
95 static int hppa_register_move_cost (enum machine_mode mode, reg_class_t,
96 reg_class_t);
97 static int hppa_address_cost (rtx, enum machine_mode mode, addr_space_t, bool);
98 static bool hppa_rtx_costs (rtx, int, int, int, int *, bool);
99 static inline rtx force_mode (enum machine_mode, rtx);
100 static void pa_reorg (void);
101 static void pa_combine_instructions (void);
102 static int pa_can_combine_p (rtx, rtx, rtx, int, rtx, rtx, rtx);
103 static bool forward_branch_p (rtx);
104 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
105 static void compute_zdepdi_operands (unsigned HOST_WIDE_INT, unsigned *);
106 static int compute_movmem_length (rtx);
107 static int compute_clrmem_length (rtx);
108 static bool pa_assemble_integer (rtx, unsigned int, int);
109 static void remove_useless_addtr_insns (int);
110 static void store_reg (int, HOST_WIDE_INT, int);
111 static void store_reg_modify (int, int, HOST_WIDE_INT);
112 static void load_reg (int, HOST_WIDE_INT, int);
113 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
114 static rtx pa_function_value (const_tree, const_tree, bool);
115 static rtx pa_libcall_value (enum machine_mode, const_rtx);
116 static bool pa_function_value_regno_p (const unsigned int);
117 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
118 static void update_total_code_bytes (unsigned int);
119 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
120 static int pa_adjust_cost (rtx, rtx, rtx, int);
121 static int pa_adjust_priority (rtx, int);
122 static int pa_issue_rate (void);
123 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
124 static section *pa_som_tm_clone_table_section (void) ATTRIBUTE_UNUSED;
125 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
126 ATTRIBUTE_UNUSED;
127 static void pa_encode_section_info (tree, rtx, int);
128 static const char *pa_strip_name_encoding (const char *);
129 static bool pa_function_ok_for_sibcall (tree, tree);
130 static void pa_globalize_label (FILE *, const char *)
131 ATTRIBUTE_UNUSED;
132 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
133 HOST_WIDE_INT, tree);
134 #if !defined(USE_COLLECT2)
135 static void pa_asm_out_constructor (rtx, int);
136 static void pa_asm_out_destructor (rtx, int);
137 #endif
138 static void pa_init_builtins (void);
139 static rtx pa_expand_builtin (tree, rtx, rtx, enum machine_mode mode, int);
140 static rtx hppa_builtin_saveregs (void);
141 static void hppa_va_start (tree, rtx);
142 static tree hppa_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
143 static bool pa_scalar_mode_supported_p (enum machine_mode);
144 static bool pa_commutative_p (const_rtx x, int outer_code);
145 static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
146 static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
147 static rtx hppa_legitimize_address (rtx, rtx, enum machine_mode);
148 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
149 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
150 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
151 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
152 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
153 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
154 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
155 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
156 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
157 static void output_deferred_plabels (void);
158 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
159 #ifdef ASM_OUTPUT_EXTERNAL_REAL
160 static void pa_hpux_file_end (void);
161 #endif
162 static void pa_init_libfuncs (void);
163 static rtx pa_struct_value_rtx (tree, int);
164 static bool pa_pass_by_reference (cumulative_args_t, enum machine_mode,
165 const_tree, bool);
166 static int pa_arg_partial_bytes (cumulative_args_t, enum machine_mode,
167 tree, bool);
168 static void pa_function_arg_advance (cumulative_args_t, enum machine_mode,
169 const_tree, bool);
170 static rtx pa_function_arg (cumulative_args_t, enum machine_mode,
171 const_tree, bool);
172 static unsigned int pa_function_arg_boundary (enum machine_mode, const_tree);
173 static struct machine_function * pa_init_machine_status (void);
174 static reg_class_t pa_secondary_reload (bool, rtx, reg_class_t,
175 enum machine_mode,
176 secondary_reload_info *);
177 static void pa_extra_live_on_entry (bitmap);
178 static enum machine_mode pa_promote_function_mode (const_tree,
179 enum machine_mode, int *,
180 const_tree, int);
182 static void pa_asm_trampoline_template (FILE *);
183 static void pa_trampoline_init (rtx, tree, rtx);
184 static rtx pa_trampoline_adjust_address (rtx);
185 static rtx pa_delegitimize_address (rtx);
186 static bool pa_print_operand_punct_valid_p (unsigned char);
187 static rtx pa_internal_arg_pointer (void);
188 static bool pa_can_eliminate (const int, const int);
189 static void pa_conditional_register_usage (void);
190 static enum machine_mode pa_c_mode_for_suffix (char);
191 static section *pa_function_section (tree, enum node_frequency, bool, bool);
192 static bool pa_cannot_force_const_mem (enum machine_mode, rtx);
193 static bool pa_legitimate_constant_p (enum machine_mode, rtx);
194 static unsigned int pa_section_type_flags (tree, const char *, int);
195 static bool pa_legitimate_address_p (enum machine_mode, rtx, bool);
197 /* The following extra sections are only used for SOM. */
198 static GTY(()) section *som_readonly_data_section;
199 static GTY(()) section *som_one_only_readonly_data_section;
200 static GTY(()) section *som_one_only_data_section;
201 static GTY(()) section *som_tm_clone_table_section;
203 /* Counts for the number of callee-saved general and floating point
204 registers which were saved by the current function's prologue. */
205 static int gr_saved, fr_saved;
207 /* Boolean indicating whether the return pointer was saved by the
208 current function's prologue. */
209 static bool rp_saved;
211 static rtx find_addr_reg (rtx);
213 /* Keep track of the number of bytes we have output in the CODE subspace
214 during this compilation so we'll know when to emit inline long-calls. */
215 unsigned long total_code_bytes;
217 /* The last address of the previous function plus the number of bytes in
218 associated thunks that have been output. This is used to determine if
219 a thunk can use an IA-relative branch to reach its target function. */
220 static unsigned int last_address;
222 /* Variables to handle plabels that we discover are necessary at assembly
223 output time. They are output after the current function. */
224 struct GTY(()) deferred_plabel
226 rtx internal_label;
227 rtx symbol;
229 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
230 deferred_plabels;
231 static size_t n_deferred_plabels = 0;
233 /* Initialize the GCC target structure. */
235 #undef TARGET_OPTION_OVERRIDE
236 #define TARGET_OPTION_OVERRIDE pa_option_override
238 #undef TARGET_ASM_ALIGNED_HI_OP
239 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
240 #undef TARGET_ASM_ALIGNED_SI_OP
241 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
242 #undef TARGET_ASM_ALIGNED_DI_OP
243 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
244 #undef TARGET_ASM_UNALIGNED_HI_OP
245 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
246 #undef TARGET_ASM_UNALIGNED_SI_OP
247 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
248 #undef TARGET_ASM_UNALIGNED_DI_OP
249 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
250 #undef TARGET_ASM_INTEGER
251 #define TARGET_ASM_INTEGER pa_assemble_integer
253 #undef TARGET_ASM_FUNCTION_PROLOGUE
254 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
255 #undef TARGET_ASM_FUNCTION_EPILOGUE
256 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
258 #undef TARGET_FUNCTION_VALUE
259 #define TARGET_FUNCTION_VALUE pa_function_value
260 #undef TARGET_LIBCALL_VALUE
261 #define TARGET_LIBCALL_VALUE pa_libcall_value
262 #undef TARGET_FUNCTION_VALUE_REGNO_P
263 #define TARGET_FUNCTION_VALUE_REGNO_P pa_function_value_regno_p
265 #undef TARGET_LEGITIMIZE_ADDRESS
266 #define TARGET_LEGITIMIZE_ADDRESS hppa_legitimize_address
268 #undef TARGET_SCHED_ADJUST_COST
269 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
270 #undef TARGET_SCHED_ADJUST_PRIORITY
271 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
272 #undef TARGET_SCHED_ISSUE_RATE
273 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
275 #undef TARGET_ENCODE_SECTION_INFO
276 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
277 #undef TARGET_STRIP_NAME_ENCODING
278 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
280 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
281 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
283 #undef TARGET_COMMUTATIVE_P
284 #define TARGET_COMMUTATIVE_P pa_commutative_p
286 #undef TARGET_ASM_OUTPUT_MI_THUNK
287 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
288 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
289 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
291 #undef TARGET_ASM_FILE_END
292 #ifdef ASM_OUTPUT_EXTERNAL_REAL
293 #define TARGET_ASM_FILE_END pa_hpux_file_end
294 #else
295 #define TARGET_ASM_FILE_END output_deferred_plabels
296 #endif
298 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
299 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P pa_print_operand_punct_valid_p
301 #if !defined(USE_COLLECT2)
302 #undef TARGET_ASM_CONSTRUCTOR
303 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
304 #undef TARGET_ASM_DESTRUCTOR
305 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
306 #endif
308 #undef TARGET_INIT_BUILTINS
309 #define TARGET_INIT_BUILTINS pa_init_builtins
311 #undef TARGET_EXPAND_BUILTIN
312 #define TARGET_EXPAND_BUILTIN pa_expand_builtin
314 #undef TARGET_REGISTER_MOVE_COST
315 #define TARGET_REGISTER_MOVE_COST hppa_register_move_cost
316 #undef TARGET_RTX_COSTS
317 #define TARGET_RTX_COSTS hppa_rtx_costs
318 #undef TARGET_ADDRESS_COST
319 #define TARGET_ADDRESS_COST hppa_address_cost
321 #undef TARGET_MACHINE_DEPENDENT_REORG
322 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
324 #undef TARGET_INIT_LIBFUNCS
325 #define TARGET_INIT_LIBFUNCS pa_init_libfuncs
327 #undef TARGET_PROMOTE_FUNCTION_MODE
328 #define TARGET_PROMOTE_FUNCTION_MODE pa_promote_function_mode
329 #undef TARGET_PROMOTE_PROTOTYPES
330 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
332 #undef TARGET_STRUCT_VALUE_RTX
333 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
334 #undef TARGET_RETURN_IN_MEMORY
335 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
336 #undef TARGET_MUST_PASS_IN_STACK
337 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
338 #undef TARGET_PASS_BY_REFERENCE
339 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
340 #undef TARGET_CALLEE_COPIES
341 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
342 #undef TARGET_ARG_PARTIAL_BYTES
343 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
344 #undef TARGET_FUNCTION_ARG
345 #define TARGET_FUNCTION_ARG pa_function_arg
346 #undef TARGET_FUNCTION_ARG_ADVANCE
347 #define TARGET_FUNCTION_ARG_ADVANCE pa_function_arg_advance
348 #undef TARGET_FUNCTION_ARG_BOUNDARY
349 #define TARGET_FUNCTION_ARG_BOUNDARY pa_function_arg_boundary
351 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
352 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
353 #undef TARGET_EXPAND_BUILTIN_VA_START
354 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
355 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
356 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
358 #undef TARGET_SCALAR_MODE_SUPPORTED_P
359 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
361 #undef TARGET_CANNOT_FORCE_CONST_MEM
362 #define TARGET_CANNOT_FORCE_CONST_MEM pa_cannot_force_const_mem
364 #undef TARGET_SECONDARY_RELOAD
365 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
367 #undef TARGET_EXTRA_LIVE_ON_ENTRY
368 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
370 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
371 #define TARGET_ASM_TRAMPOLINE_TEMPLATE pa_asm_trampoline_template
372 #undef TARGET_TRAMPOLINE_INIT
373 #define TARGET_TRAMPOLINE_INIT pa_trampoline_init
374 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
375 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS pa_trampoline_adjust_address
376 #undef TARGET_DELEGITIMIZE_ADDRESS
377 #define TARGET_DELEGITIMIZE_ADDRESS pa_delegitimize_address
378 #undef TARGET_INTERNAL_ARG_POINTER
379 #define TARGET_INTERNAL_ARG_POINTER pa_internal_arg_pointer
380 #undef TARGET_CAN_ELIMINATE
381 #define TARGET_CAN_ELIMINATE pa_can_eliminate
382 #undef TARGET_CONDITIONAL_REGISTER_USAGE
383 #define TARGET_CONDITIONAL_REGISTER_USAGE pa_conditional_register_usage
384 #undef TARGET_C_MODE_FOR_SUFFIX
385 #define TARGET_C_MODE_FOR_SUFFIX pa_c_mode_for_suffix
386 #undef TARGET_ASM_FUNCTION_SECTION
387 #define TARGET_ASM_FUNCTION_SECTION pa_function_section
389 #undef TARGET_LEGITIMATE_CONSTANT_P
390 #define TARGET_LEGITIMATE_CONSTANT_P pa_legitimate_constant_p
391 #undef TARGET_SECTION_TYPE_FLAGS
392 #define TARGET_SECTION_TYPE_FLAGS pa_section_type_flags
393 #undef TARGET_LEGITIMATE_ADDRESS_P
394 #define TARGET_LEGITIMATE_ADDRESS_P pa_legitimate_address_p
396 struct gcc_target targetm = TARGET_INITIALIZER;
398 /* Parse the -mfixed-range= option string. */
400 static void
401 fix_range (const char *const_str)
403 int i, first, last;
404 char *str, *dash, *comma;
406 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
407 REG2 are either register names or register numbers. The effect
408 of this option is to mark the registers in the range from REG1 to
409 REG2 as ``fixed'' so they won't be used by the compiler. This is
410 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
412 i = strlen (const_str);
413 str = (char *) alloca (i + 1);
414 memcpy (str, const_str, i + 1);
416 while (1)
418 dash = strchr (str, '-');
419 if (!dash)
421 warning (0, "value of -mfixed-range must have form REG1-REG2");
422 return;
424 *dash = '\0';
426 comma = strchr (dash + 1, ',');
427 if (comma)
428 *comma = '\0';
430 first = decode_reg_name (str);
431 if (first < 0)
433 warning (0, "unknown register name: %s", str);
434 return;
437 last = decode_reg_name (dash + 1);
438 if (last < 0)
440 warning (0, "unknown register name: %s", dash + 1);
441 return;
444 *dash = '-';
446 if (first > last)
448 warning (0, "%s-%s is an empty range", str, dash + 1);
449 return;
452 for (i = first; i <= last; ++i)
453 fixed_regs[i] = call_used_regs[i] = 1;
455 if (!comma)
456 break;
458 *comma = ',';
459 str = comma + 1;
462 /* Check if all floating point registers have been fixed. */
463 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
464 if (!fixed_regs[i])
465 break;
467 if (i > FP_REG_LAST)
468 target_flags |= MASK_DISABLE_FPREGS;
471 /* Implement the TARGET_OPTION_OVERRIDE hook. */
473 static void
474 pa_option_override (void)
476 unsigned int i;
477 cl_deferred_option *opt;
478 vec<cl_deferred_option> *v
479 = (vec<cl_deferred_option> *) pa_deferred_options;
481 if (v)
482 FOR_EACH_VEC_ELT (*v, i, opt)
484 switch (opt->opt_index)
486 case OPT_mfixed_range_:
487 fix_range (opt->arg);
488 break;
490 default:
491 gcc_unreachable ();
495 /* Unconditional branches in the delay slot are not compatible with dwarf2
496 call frame information. There is no benefit in using this optimization
497 on PA8000 and later processors. */
498 if (pa_cpu >= PROCESSOR_8000
499 || (targetm_common.except_unwind_info (&global_options) == UI_DWARF2
500 && flag_exceptions)
501 || flag_unwind_tables)
502 target_flags &= ~MASK_JUMP_IN_DELAY;
504 if (flag_pic && TARGET_PORTABLE_RUNTIME)
506 warning (0, "PIC code generation is not supported in the portable runtime model");
509 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
511 warning (0, "PIC code generation is not compatible with fast indirect calls");
514 if (! TARGET_GAS && write_symbols != NO_DEBUG)
516 warning (0, "-g is only supported when using GAS on this processor,");
517 warning (0, "-g option disabled");
518 write_symbols = NO_DEBUG;
521 /* We only support the "big PIC" model now. And we always generate PIC
522 code when in 64bit mode. */
523 if (flag_pic == 1 || TARGET_64BIT)
524 flag_pic = 2;
526 /* Disable -freorder-blocks-and-partition as we don't support hot and
527 cold partitioning. */
528 if (flag_reorder_blocks_and_partition)
530 inform (input_location,
531 "-freorder-blocks-and-partition does not work "
532 "on this architecture");
533 flag_reorder_blocks_and_partition = 0;
534 flag_reorder_blocks = 1;
537 /* We can't guarantee that .dword is available for 32-bit targets. */
538 if (UNITS_PER_WORD == 4)
539 targetm.asm_out.aligned_op.di = NULL;
541 /* The unaligned ops are only available when using GAS. */
542 if (!TARGET_GAS)
544 targetm.asm_out.unaligned_op.hi = NULL;
545 targetm.asm_out.unaligned_op.si = NULL;
546 targetm.asm_out.unaligned_op.di = NULL;
549 init_machine_status = pa_init_machine_status;
552 enum pa_builtins
554 PA_BUILTIN_COPYSIGNQ,
555 PA_BUILTIN_FABSQ,
556 PA_BUILTIN_INFQ,
557 PA_BUILTIN_HUGE_VALQ,
558 PA_BUILTIN_max
561 static GTY(()) tree pa_builtins[(int) PA_BUILTIN_max];
563 static void
564 pa_init_builtins (void)
566 #ifdef DONT_HAVE_FPUTC_UNLOCKED
568 tree decl = builtin_decl_explicit (BUILT_IN_PUTC_UNLOCKED);
569 set_builtin_decl (BUILT_IN_FPUTC_UNLOCKED, decl,
570 builtin_decl_implicit_p (BUILT_IN_PUTC_UNLOCKED));
572 #endif
573 #if TARGET_HPUX_11
575 tree decl;
577 if ((decl = builtin_decl_explicit (BUILT_IN_FINITE)) != NULL_TREE)
578 set_user_assembler_name (decl, "_Isfinite");
579 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEF)) != NULL_TREE)
580 set_user_assembler_name (decl, "_Isfinitef");
582 #endif
584 if (HPUX_LONG_DOUBLE_LIBRARY)
586 tree decl, ftype;
588 /* Under HPUX, the __float128 type is a synonym for "long double". */
589 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
590 "__float128");
592 /* TFmode support builtins. */
593 ftype = build_function_type_list (long_double_type_node,
594 long_double_type_node,
595 NULL_TREE);
596 decl = add_builtin_function ("__builtin_fabsq", ftype,
597 PA_BUILTIN_FABSQ, BUILT_IN_MD,
598 "_U_Qfabs", NULL_TREE);
599 TREE_READONLY (decl) = 1;
600 pa_builtins[PA_BUILTIN_FABSQ] = decl;
602 ftype = build_function_type_list (long_double_type_node,
603 long_double_type_node,
604 long_double_type_node,
605 NULL_TREE);
606 decl = add_builtin_function ("__builtin_copysignq", ftype,
607 PA_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
608 "_U_Qfcopysign", NULL_TREE);
609 TREE_READONLY (decl) = 1;
610 pa_builtins[PA_BUILTIN_COPYSIGNQ] = decl;
612 ftype = build_function_type_list (long_double_type_node, NULL_TREE);
613 decl = add_builtin_function ("__builtin_infq", ftype,
614 PA_BUILTIN_INFQ, BUILT_IN_MD,
615 NULL, NULL_TREE);
616 pa_builtins[PA_BUILTIN_INFQ] = decl;
618 decl = add_builtin_function ("__builtin_huge_valq", ftype,
619 PA_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
620 NULL, NULL_TREE);
621 pa_builtins[PA_BUILTIN_HUGE_VALQ] = decl;
625 static rtx
626 pa_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
627 enum machine_mode mode ATTRIBUTE_UNUSED,
628 int ignore ATTRIBUTE_UNUSED)
630 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
631 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
633 switch (fcode)
635 case PA_BUILTIN_FABSQ:
636 case PA_BUILTIN_COPYSIGNQ:
637 return expand_call (exp, target, ignore);
639 case PA_BUILTIN_INFQ:
640 case PA_BUILTIN_HUGE_VALQ:
642 enum machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
643 REAL_VALUE_TYPE inf;
644 rtx tmp;
646 real_inf (&inf);
647 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, target_mode);
649 tmp = validize_mem (force_const_mem (target_mode, tmp));
651 if (target == 0)
652 target = gen_reg_rtx (target_mode);
654 emit_move_insn (target, tmp);
655 return target;
658 default:
659 gcc_unreachable ();
662 return NULL_RTX;
665 /* Function to init struct machine_function.
666 This will be called, via a pointer variable,
667 from push_function_context. */
669 static struct machine_function *
670 pa_init_machine_status (void)
672 return ggc_cleared_alloc<machine_function> ();
675 /* If FROM is a probable pointer register, mark TO as a probable
676 pointer register with the same pointer alignment as FROM. */
678 static void
679 copy_reg_pointer (rtx to, rtx from)
681 if (REG_POINTER (from))
682 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
685 /* Return 1 if X contains a symbolic expression. We know these
686 expressions will have one of a few well defined forms, so
687 we need only check those forms. */
689 pa_symbolic_expression_p (rtx x)
692 /* Strip off any HIGH. */
693 if (GET_CODE (x) == HIGH)
694 x = XEXP (x, 0);
696 return symbolic_operand (x, VOIDmode);
699 /* Accept any constant that can be moved in one instruction into a
700 general register. */
702 pa_cint_ok_for_move (HOST_WIDE_INT ival)
704 /* OK if ldo, ldil, or zdepi, can be used. */
705 return (VAL_14_BITS_P (ival)
706 || pa_ldil_cint_p (ival)
707 || pa_zdepi_cint_p (ival));
710 /* True iff ldil can be used to load this CONST_INT. The least
711 significant 11 bits of the value must be zero and the value must
712 not change sign when extended from 32 to 64 bits. */
714 pa_ldil_cint_p (HOST_WIDE_INT ival)
716 HOST_WIDE_INT x = ival & (((HOST_WIDE_INT) -1 << 31) | 0x7ff);
718 return x == 0 || x == ((HOST_WIDE_INT) -1 << 31);
721 /* True iff zdepi can be used to generate this CONST_INT.
722 zdepi first sign extends a 5-bit signed number to a given field
723 length, then places this field anywhere in a zero. */
725 pa_zdepi_cint_p (unsigned HOST_WIDE_INT x)
727 unsigned HOST_WIDE_INT lsb_mask, t;
729 /* This might not be obvious, but it's at least fast.
730 This function is critical; we don't have the time loops would take. */
731 lsb_mask = x & -x;
732 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
733 /* Return true iff t is a power of two. */
734 return ((t & (t - 1)) == 0);
737 /* True iff depi or extru can be used to compute (reg & mask).
738 Accept bit pattern like these:
739 0....01....1
740 1....10....0
741 1..10..01..1 */
743 pa_and_mask_p (unsigned HOST_WIDE_INT mask)
745 mask = ~mask;
746 mask += mask & -mask;
747 return (mask & (mask - 1)) == 0;
750 /* True iff depi can be used to compute (reg | MASK). */
752 pa_ior_mask_p (unsigned HOST_WIDE_INT mask)
754 mask += mask & -mask;
755 return (mask & (mask - 1)) == 0;
758 /* Legitimize PIC addresses. If the address is already
759 position-independent, we return ORIG. Newly generated
760 position-independent addresses go to REG. If we need more
761 than one register, we lose. */
763 static rtx
764 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
766 rtx pic_ref = orig;
768 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
770 /* Labels need special handling. */
771 if (pic_label_operand (orig, mode))
773 rtx insn;
775 /* We do not want to go through the movXX expanders here since that
776 would create recursion.
778 Nor do we really want to call a generator for a named pattern
779 since that requires multiple patterns if we want to support
780 multiple word sizes.
782 So instead we just emit the raw set, which avoids the movXX
783 expanders completely. */
784 mark_reg_pointer (reg, BITS_PER_UNIT);
785 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
787 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
788 add_reg_note (insn, REG_EQUAL, orig);
790 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
791 and update LABEL_NUSES because this is not done automatically. */
792 if (reload_in_progress || reload_completed)
794 /* Extract LABEL_REF. */
795 if (GET_CODE (orig) == CONST)
796 orig = XEXP (XEXP (orig, 0), 0);
797 /* Extract CODE_LABEL. */
798 orig = XEXP (orig, 0);
799 add_reg_note (insn, REG_LABEL_OPERAND, orig);
800 /* Make sure we have label and not a note. */
801 if (LABEL_P (orig))
802 LABEL_NUSES (orig)++;
804 crtl->uses_pic_offset_table = 1;
805 return reg;
807 if (GET_CODE (orig) == SYMBOL_REF)
809 rtx insn, tmp_reg;
811 gcc_assert (reg);
813 /* Before reload, allocate a temporary register for the intermediate
814 result. This allows the sequence to be deleted when the final
815 result is unused and the insns are trivially dead. */
816 tmp_reg = ((reload_in_progress || reload_completed)
817 ? reg : gen_reg_rtx (Pmode));
819 if (function_label_operand (orig, VOIDmode))
821 /* Force function label into memory in word mode. */
822 orig = XEXP (force_const_mem (word_mode, orig), 0);
823 /* Load plabel address from DLT. */
824 emit_move_insn (tmp_reg,
825 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
826 gen_rtx_HIGH (word_mode, orig)));
827 pic_ref
828 = gen_const_mem (Pmode,
829 gen_rtx_LO_SUM (Pmode, tmp_reg,
830 gen_rtx_UNSPEC (Pmode,
831 gen_rtvec (1, orig),
832 UNSPEC_DLTIND14R)));
833 emit_move_insn (reg, pic_ref);
834 /* Now load address of function descriptor. */
835 pic_ref = gen_rtx_MEM (Pmode, reg);
837 else
839 /* Load symbol reference from DLT. */
840 emit_move_insn (tmp_reg,
841 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
842 gen_rtx_HIGH (word_mode, orig)));
843 pic_ref
844 = gen_const_mem (Pmode,
845 gen_rtx_LO_SUM (Pmode, tmp_reg,
846 gen_rtx_UNSPEC (Pmode,
847 gen_rtvec (1, orig),
848 UNSPEC_DLTIND14R)));
851 crtl->uses_pic_offset_table = 1;
852 mark_reg_pointer (reg, BITS_PER_UNIT);
853 insn = emit_move_insn (reg, pic_ref);
855 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
856 set_unique_reg_note (insn, REG_EQUAL, orig);
858 return reg;
860 else if (GET_CODE (orig) == CONST)
862 rtx base;
864 if (GET_CODE (XEXP (orig, 0)) == PLUS
865 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
866 return orig;
868 gcc_assert (reg);
869 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
871 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
872 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
873 base == reg ? 0 : reg);
875 if (GET_CODE (orig) == CONST_INT)
877 if (INT_14_BITS (orig))
878 return plus_constant (Pmode, base, INTVAL (orig));
879 orig = force_reg (Pmode, orig);
881 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
882 /* Likewise, should we set special REG_NOTEs here? */
885 return pic_ref;
888 static GTY(()) rtx gen_tls_tga;
890 static rtx
891 gen_tls_get_addr (void)
893 if (!gen_tls_tga)
894 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
895 return gen_tls_tga;
898 static rtx
899 hppa_tls_call (rtx arg)
901 rtx ret;
903 ret = gen_reg_rtx (Pmode);
904 emit_library_call_value (gen_tls_get_addr (), ret,
905 LCT_CONST, Pmode, 1, arg, Pmode);
907 return ret;
910 static rtx
911 legitimize_tls_address (rtx addr)
913 rtx ret, insn, tmp, t1, t2, tp;
915 /* Currently, we can't handle anything but a SYMBOL_REF. */
916 if (GET_CODE (addr) != SYMBOL_REF)
917 return addr;
919 switch (SYMBOL_REF_TLS_MODEL (addr))
921 case TLS_MODEL_GLOBAL_DYNAMIC:
922 tmp = gen_reg_rtx (Pmode);
923 if (flag_pic)
924 emit_insn (gen_tgd_load_pic (tmp, addr));
925 else
926 emit_insn (gen_tgd_load (tmp, addr));
927 ret = hppa_tls_call (tmp);
928 break;
930 case TLS_MODEL_LOCAL_DYNAMIC:
931 ret = gen_reg_rtx (Pmode);
932 tmp = gen_reg_rtx (Pmode);
933 start_sequence ();
934 if (flag_pic)
935 emit_insn (gen_tld_load_pic (tmp, addr));
936 else
937 emit_insn (gen_tld_load (tmp, addr));
938 t1 = hppa_tls_call (tmp);
939 insn = get_insns ();
940 end_sequence ();
941 t2 = gen_reg_rtx (Pmode);
942 emit_libcall_block (insn, t2, t1,
943 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
944 UNSPEC_TLSLDBASE));
945 emit_insn (gen_tld_offset_load (ret, addr, t2));
946 break;
948 case TLS_MODEL_INITIAL_EXEC:
949 tp = gen_reg_rtx (Pmode);
950 tmp = gen_reg_rtx (Pmode);
951 ret = gen_reg_rtx (Pmode);
952 emit_insn (gen_tp_load (tp));
953 if (flag_pic)
954 emit_insn (gen_tie_load_pic (tmp, addr));
955 else
956 emit_insn (gen_tie_load (tmp, addr));
957 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
958 break;
960 case TLS_MODEL_LOCAL_EXEC:
961 tp = gen_reg_rtx (Pmode);
962 ret = gen_reg_rtx (Pmode);
963 emit_insn (gen_tp_load (tp));
964 emit_insn (gen_tle_load (ret, addr, tp));
965 break;
967 default:
968 gcc_unreachable ();
971 return ret;
974 /* Try machine-dependent ways of modifying an illegitimate address
975 to be legitimate. If we find one, return the new, valid address.
976 This macro is used in only one place: `memory_address' in explow.c.
978 OLDX is the address as it was before break_out_memory_refs was called.
979 In some cases it is useful to look at this to decide what needs to be done.
981 It is always safe for this macro to do nothing. It exists to recognize
982 opportunities to optimize the output.
984 For the PA, transform:
986 memory(X + <large int>)
988 into:
990 if (<large int> & mask) >= 16
991 Y = (<large int> & ~mask) + mask + 1 Round up.
992 else
993 Y = (<large int> & ~mask) Round down.
994 Z = X + Y
995 memory (Z + (<large int> - Y));
997 This is for CSE to find several similar references, and only use one Z.
999 X can either be a SYMBOL_REF or REG, but because combine cannot
1000 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
1001 D will not fit in 14 bits.
1003 MODE_FLOAT references allow displacements which fit in 5 bits, so use
1004 0x1f as the mask.
1006 MODE_INT references allow displacements which fit in 14 bits, so use
1007 0x3fff as the mask.
1009 This relies on the fact that most mode MODE_FLOAT references will use FP
1010 registers and most mode MODE_INT references will use integer registers.
1011 (In the rare case of an FP register used in an integer MODE, we depend
1012 on secondary reloads to clean things up.)
1015 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
1016 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
1017 addressing modes to be used).
1019 Put X and Z into registers. Then put the entire expression into
1020 a register. */
1023 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1024 enum machine_mode mode)
1026 rtx orig = x;
1028 /* We need to canonicalize the order of operands in unscaled indexed
1029 addresses since the code that checks if an address is valid doesn't
1030 always try both orders. */
1031 if (!TARGET_NO_SPACE_REGS
1032 && GET_CODE (x) == PLUS
1033 && GET_MODE (x) == Pmode
1034 && REG_P (XEXP (x, 0))
1035 && REG_P (XEXP (x, 1))
1036 && REG_POINTER (XEXP (x, 0))
1037 && !REG_POINTER (XEXP (x, 1)))
1038 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
1040 if (tls_referenced_p (x))
1041 return legitimize_tls_address (x);
1042 else if (flag_pic)
1043 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
1045 /* Strip off CONST. */
1046 if (GET_CODE (x) == CONST)
1047 x = XEXP (x, 0);
1049 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1050 That should always be safe. */
1051 if (GET_CODE (x) == PLUS
1052 && GET_CODE (XEXP (x, 0)) == REG
1053 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
1055 rtx reg = force_reg (Pmode, XEXP (x, 1));
1056 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
1059 /* Note we must reject symbols which represent function addresses
1060 since the assembler/linker can't handle arithmetic on plabels. */
1061 if (GET_CODE (x) == PLUS
1062 && GET_CODE (XEXP (x, 1)) == CONST_INT
1063 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1064 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
1065 || GET_CODE (XEXP (x, 0)) == REG))
1067 rtx int_part, ptr_reg;
1068 int newoffset;
1069 int offset = INTVAL (XEXP (x, 1));
1070 int mask;
1072 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
1073 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
1075 /* Choose which way to round the offset. Round up if we
1076 are >= halfway to the next boundary. */
1077 if ((offset & mask) >= ((mask + 1) / 2))
1078 newoffset = (offset & ~ mask) + mask + 1;
1079 else
1080 newoffset = (offset & ~ mask);
1082 /* If the newoffset will not fit in 14 bits (ldo), then
1083 handling this would take 4 or 5 instructions (2 to load
1084 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1085 add the new offset and the SYMBOL_REF.) Combine can
1086 not handle 4->2 or 5->2 combinations, so do not create
1087 them. */
1088 if (! VAL_14_BITS_P (newoffset)
1089 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
1091 rtx const_part = plus_constant (Pmode, XEXP (x, 0), newoffset);
1092 rtx tmp_reg
1093 = force_reg (Pmode,
1094 gen_rtx_HIGH (Pmode, const_part));
1095 ptr_reg
1096 = force_reg (Pmode,
1097 gen_rtx_LO_SUM (Pmode,
1098 tmp_reg, const_part));
1100 else
1102 if (! VAL_14_BITS_P (newoffset))
1103 int_part = force_reg (Pmode, GEN_INT (newoffset));
1104 else
1105 int_part = GEN_INT (newoffset);
1107 ptr_reg = force_reg (Pmode,
1108 gen_rtx_PLUS (Pmode,
1109 force_reg (Pmode, XEXP (x, 0)),
1110 int_part));
1112 return plus_constant (Pmode, ptr_reg, offset - newoffset);
1115 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
1117 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
1118 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1119 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
1120 && (OBJECT_P (XEXP (x, 1))
1121 || GET_CODE (XEXP (x, 1)) == SUBREG)
1122 && GET_CODE (XEXP (x, 1)) != CONST)
1124 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1125 rtx reg1, reg2;
1127 reg1 = XEXP (x, 1);
1128 if (GET_CODE (reg1) != REG)
1129 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1131 reg2 = XEXP (XEXP (x, 0), 0);
1132 if (GET_CODE (reg2) != REG)
1133 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1135 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1136 gen_rtx_MULT (Pmode,
1137 reg2,
1138 GEN_INT (val)),
1139 reg1));
1142 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
1144 Only do so for floating point modes since this is more speculative
1145 and we lose if it's an integer store. */
1146 if (GET_CODE (x) == PLUS
1147 && GET_CODE (XEXP (x, 0)) == PLUS
1148 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
1149 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
1150 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
1151 && (mode == SFmode || mode == DFmode))
1154 /* First, try and figure out what to use as a base register. */
1155 rtx reg1, reg2, base, idx;
1157 reg1 = XEXP (XEXP (x, 0), 1);
1158 reg2 = XEXP (x, 1);
1159 base = NULL_RTX;
1160 idx = NULL_RTX;
1162 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1163 then pa_emit_move_sequence will turn on REG_POINTER so we'll know
1164 it's a base register below. */
1165 if (GET_CODE (reg1) != REG)
1166 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1168 if (GET_CODE (reg2) != REG)
1169 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1171 /* Figure out what the base and index are. */
1173 if (GET_CODE (reg1) == REG
1174 && REG_POINTER (reg1))
1176 base = reg1;
1177 idx = gen_rtx_PLUS (Pmode,
1178 gen_rtx_MULT (Pmode,
1179 XEXP (XEXP (XEXP (x, 0), 0), 0),
1180 XEXP (XEXP (XEXP (x, 0), 0), 1)),
1181 XEXP (x, 1));
1183 else if (GET_CODE (reg2) == REG
1184 && REG_POINTER (reg2))
1186 base = reg2;
1187 idx = XEXP (x, 0);
1190 if (base == 0)
1191 return orig;
1193 /* If the index adds a large constant, try to scale the
1194 constant so that it can be loaded with only one insn. */
1195 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1196 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1197 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1198 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1200 /* Divide the CONST_INT by the scale factor, then add it to A. */
1201 int val = INTVAL (XEXP (idx, 1));
1203 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
1204 reg1 = XEXP (XEXP (idx, 0), 0);
1205 if (GET_CODE (reg1) != REG)
1206 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1208 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1210 /* We can now generate a simple scaled indexed address. */
1211 return
1212 force_reg
1213 (Pmode, gen_rtx_PLUS (Pmode,
1214 gen_rtx_MULT (Pmode, reg1,
1215 XEXP (XEXP (idx, 0), 1)),
1216 base));
1219 /* If B + C is still a valid base register, then add them. */
1220 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1221 && INTVAL (XEXP (idx, 1)) <= 4096
1222 && INTVAL (XEXP (idx, 1)) >= -4096)
1224 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
1225 rtx reg1, reg2;
1227 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1229 reg2 = XEXP (XEXP (idx, 0), 0);
1230 if (GET_CODE (reg2) != CONST_INT)
1231 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1233 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1234 gen_rtx_MULT (Pmode,
1235 reg2,
1236 GEN_INT (val)),
1237 reg1));
1240 /* Get the index into a register, then add the base + index and
1241 return a register holding the result. */
1243 /* First get A into a register. */
1244 reg1 = XEXP (XEXP (idx, 0), 0);
1245 if (GET_CODE (reg1) != REG)
1246 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1248 /* And get B into a register. */
1249 reg2 = XEXP (idx, 1);
1250 if (GET_CODE (reg2) != REG)
1251 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1253 reg1 = force_reg (Pmode,
1254 gen_rtx_PLUS (Pmode,
1255 gen_rtx_MULT (Pmode, reg1,
1256 XEXP (XEXP (idx, 0), 1)),
1257 reg2));
1259 /* Add the result to our base register and return. */
1260 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1264 /* Uh-oh. We might have an address for x[n-100000]. This needs
1265 special handling to avoid creating an indexed memory address
1266 with x-100000 as the base.
1268 If the constant part is small enough, then it's still safe because
1269 there is a guard page at the beginning and end of the data segment.
1271 Scaled references are common enough that we want to try and rearrange the
1272 terms so that we can use indexing for these addresses too. Only
1273 do the optimization for floatint point modes. */
1275 if (GET_CODE (x) == PLUS
1276 && pa_symbolic_expression_p (XEXP (x, 1)))
1278 /* Ugly. We modify things here so that the address offset specified
1279 by the index expression is computed first, then added to x to form
1280 the entire address. */
1282 rtx regx1, regx2, regy1, regy2, y;
1284 /* Strip off any CONST. */
1285 y = XEXP (x, 1);
1286 if (GET_CODE (y) == CONST)
1287 y = XEXP (y, 0);
1289 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1291 /* See if this looks like
1292 (plus (mult (reg) (shadd_const))
1293 (const (plus (symbol_ref) (const_int))))
1295 Where const_int is small. In that case the const
1296 expression is a valid pointer for indexing.
1298 If const_int is big, but can be divided evenly by shadd_const
1299 and added to (reg). This allows more scaled indexed addresses. */
1300 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1301 && GET_CODE (XEXP (x, 0)) == MULT
1302 && GET_CODE (XEXP (y, 1)) == CONST_INT
1303 && INTVAL (XEXP (y, 1)) >= -4096
1304 && INTVAL (XEXP (y, 1)) <= 4095
1305 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1306 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1308 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1309 rtx reg1, reg2;
1311 reg1 = XEXP (x, 1);
1312 if (GET_CODE (reg1) != REG)
1313 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1315 reg2 = XEXP (XEXP (x, 0), 0);
1316 if (GET_CODE (reg2) != REG)
1317 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1319 return force_reg (Pmode,
1320 gen_rtx_PLUS (Pmode,
1321 gen_rtx_MULT (Pmode,
1322 reg2,
1323 GEN_INT (val)),
1324 reg1));
1326 else if ((mode == DFmode || mode == SFmode)
1327 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1328 && GET_CODE (XEXP (x, 0)) == MULT
1329 && GET_CODE (XEXP (y, 1)) == CONST_INT
1330 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1331 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1332 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1334 regx1
1335 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1336 / INTVAL (XEXP (XEXP (x, 0), 1))));
1337 regx2 = XEXP (XEXP (x, 0), 0);
1338 if (GET_CODE (regx2) != REG)
1339 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1340 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1341 regx2, regx1));
1342 return
1343 force_reg (Pmode,
1344 gen_rtx_PLUS (Pmode,
1345 gen_rtx_MULT (Pmode, regx2,
1346 XEXP (XEXP (x, 0), 1)),
1347 force_reg (Pmode, XEXP (y, 0))));
1349 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1350 && INTVAL (XEXP (y, 1)) >= -4096
1351 && INTVAL (XEXP (y, 1)) <= 4095)
1353 /* This is safe because of the guard page at the
1354 beginning and end of the data space. Just
1355 return the original address. */
1356 return orig;
1358 else
1360 /* Doesn't look like one we can optimize. */
1361 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1362 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1363 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1364 regx1 = force_reg (Pmode,
1365 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1366 regx1, regy2));
1367 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1372 return orig;
1375 /* Implement the TARGET_REGISTER_MOVE_COST hook.
1377 Compute extra cost of moving data between one register class
1378 and another.
1380 Make moves from SAR so expensive they should never happen. We used to
1381 have 0xffff here, but that generates overflow in rare cases.
1383 Copies involving a FP register and a non-FP register are relatively
1384 expensive because they must go through memory.
1386 Other copies are reasonably cheap. */
1388 static int
1389 hppa_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
1390 reg_class_t from, reg_class_t to)
1392 if (from == SHIFT_REGS)
1393 return 0x100;
1394 else if (to == SHIFT_REGS && FP_REG_CLASS_P (from))
1395 return 18;
1396 else if ((FP_REG_CLASS_P (from) && ! FP_REG_CLASS_P (to))
1397 || (FP_REG_CLASS_P (to) && ! FP_REG_CLASS_P (from)))
1398 return 16;
1399 else
1400 return 2;
1403 /* For the HPPA, REG and REG+CONST is cost 0
1404 and addresses involving symbolic constants are cost 2.
1406 PIC addresses are very expensive.
1408 It is no coincidence that this has the same structure
1409 as pa_legitimate_address_p. */
1411 static int
1412 hppa_address_cost (rtx X, enum machine_mode mode ATTRIBUTE_UNUSED,
1413 addr_space_t as ATTRIBUTE_UNUSED,
1414 bool speed ATTRIBUTE_UNUSED)
1416 switch (GET_CODE (X))
1418 case REG:
1419 case PLUS:
1420 case LO_SUM:
1421 return 1;
1422 case HIGH:
1423 return 2;
1424 default:
1425 return 4;
1429 /* Compute a (partial) cost for rtx X. Return true if the complete
1430 cost has been computed, and false if subexpressions should be
1431 scanned. In either case, *TOTAL contains the cost result. */
1433 static bool
1434 hppa_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
1435 int *total, bool speed ATTRIBUTE_UNUSED)
1437 int factor;
1439 switch (code)
1441 case CONST_INT:
1442 if (INTVAL (x) == 0)
1443 *total = 0;
1444 else if (INT_14_BITS (x))
1445 *total = 1;
1446 else
1447 *total = 2;
1448 return true;
1450 case HIGH:
1451 *total = 2;
1452 return true;
1454 case CONST:
1455 case LABEL_REF:
1456 case SYMBOL_REF:
1457 *total = 4;
1458 return true;
1460 case CONST_DOUBLE:
1461 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1462 && outer_code != SET)
1463 *total = 0;
1464 else
1465 *total = 8;
1466 return true;
1468 case MULT:
1469 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1471 *total = COSTS_N_INSNS (3);
1472 return true;
1475 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1476 factor = GET_MODE_SIZE (GET_MODE (x)) / 4;
1477 if (factor == 0)
1478 factor = 1;
1480 if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1481 *total = factor * factor * COSTS_N_INSNS (8);
1482 else
1483 *total = factor * factor * COSTS_N_INSNS (20);
1484 return true;
1486 case DIV:
1487 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1489 *total = COSTS_N_INSNS (14);
1490 return true;
1492 /* FALLTHRU */
1494 case UDIV:
1495 case MOD:
1496 case UMOD:
1497 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1498 factor = GET_MODE_SIZE (GET_MODE (x)) / 4;
1499 if (factor == 0)
1500 factor = 1;
1502 *total = factor * factor * COSTS_N_INSNS (60);
1503 return true;
1505 case PLUS: /* this includes shNadd insns */
1506 case MINUS:
1507 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1509 *total = COSTS_N_INSNS (3);
1510 return true;
1513 /* A size N times larger than UNITS_PER_WORD needs N times as
1514 many insns, taking N times as long. */
1515 factor = GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD;
1516 if (factor == 0)
1517 factor = 1;
1518 *total = factor * COSTS_N_INSNS (1);
1519 return true;
1521 case ASHIFT:
1522 case ASHIFTRT:
1523 case LSHIFTRT:
1524 *total = COSTS_N_INSNS (1);
1525 return true;
1527 default:
1528 return false;
1532 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1533 new rtx with the correct mode. */
1534 static inline rtx
1535 force_mode (enum machine_mode mode, rtx orig)
1537 if (mode == GET_MODE (orig))
1538 return orig;
1540 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1542 return gen_rtx_REG (mode, REGNO (orig));
1545 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1547 static bool
1548 pa_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1550 return tls_referenced_p (x);
1553 /* Emit insns to move operands[1] into operands[0].
1555 Return 1 if we have written out everything that needs to be done to
1556 do the move. Otherwise, return 0 and the caller will emit the move
1557 normally.
1559 Note SCRATCH_REG may not be in the proper mode depending on how it
1560 will be used. This routine is responsible for creating a new copy
1561 of SCRATCH_REG in the proper mode. */
1564 pa_emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
1566 register rtx operand0 = operands[0];
1567 register rtx operand1 = operands[1];
1568 register rtx tem;
1570 /* We can only handle indexed addresses in the destination operand
1571 of floating point stores. Thus, we need to break out indexed
1572 addresses from the destination operand. */
1573 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1575 gcc_assert (can_create_pseudo_p ());
1577 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1578 operand0 = replace_equiv_address (operand0, tem);
1581 /* On targets with non-equivalent space registers, break out unscaled
1582 indexed addresses from the source operand before the final CSE.
1583 We have to do this because the REG_POINTER flag is not correctly
1584 carried through various optimization passes and CSE may substitute
1585 a pseudo without the pointer set for one with the pointer set. As
1586 a result, we loose various opportunities to create insns with
1587 unscaled indexed addresses. */
1588 if (!TARGET_NO_SPACE_REGS
1589 && !cse_not_expected
1590 && GET_CODE (operand1) == MEM
1591 && GET_CODE (XEXP (operand1, 0)) == PLUS
1592 && REG_P (XEXP (XEXP (operand1, 0), 0))
1593 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1594 operand1
1595 = replace_equiv_address (operand1,
1596 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1598 if (scratch_reg
1599 && reload_in_progress && GET_CODE (operand0) == REG
1600 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1601 operand0 = reg_equiv_mem (REGNO (operand0));
1602 else if (scratch_reg
1603 && reload_in_progress && GET_CODE (operand0) == SUBREG
1604 && GET_CODE (SUBREG_REG (operand0)) == REG
1605 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1607 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1608 the code which tracks sets/uses for delete_output_reload. */
1609 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1610 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
1611 SUBREG_BYTE (operand0));
1612 operand0 = alter_subreg (&temp, true);
1615 if (scratch_reg
1616 && reload_in_progress && GET_CODE (operand1) == REG
1617 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1618 operand1 = reg_equiv_mem (REGNO (operand1));
1619 else if (scratch_reg
1620 && reload_in_progress && GET_CODE (operand1) == SUBREG
1621 && GET_CODE (SUBREG_REG (operand1)) == REG
1622 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1624 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1625 the code which tracks sets/uses for delete_output_reload. */
1626 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1627 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
1628 SUBREG_BYTE (operand1));
1629 operand1 = alter_subreg (&temp, true);
1632 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1633 && ((tem = find_replacement (&XEXP (operand0, 0)))
1634 != XEXP (operand0, 0)))
1635 operand0 = replace_equiv_address (operand0, tem);
1637 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1638 && ((tem = find_replacement (&XEXP (operand1, 0)))
1639 != XEXP (operand1, 0)))
1640 operand1 = replace_equiv_address (operand1, tem);
1642 /* Handle secondary reloads for loads/stores of FP registers from
1643 REG+D addresses where D does not fit in 5 or 14 bits, including
1644 (subreg (mem (addr))) cases. */
1645 if (scratch_reg
1646 && fp_reg_operand (operand0, mode)
1647 && (MEM_P (operand1)
1648 || (GET_CODE (operand1) == SUBREG
1649 && MEM_P (XEXP (operand1, 0))))
1650 && !floating_point_store_memory_operand (operand1, mode))
1652 if (GET_CODE (operand1) == SUBREG)
1653 operand1 = XEXP (operand1, 0);
1655 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1656 it in WORD_MODE regardless of what mode it was originally given
1657 to us. */
1658 scratch_reg = force_mode (word_mode, scratch_reg);
1660 /* D might not fit in 14 bits either; for such cases load D into
1661 scratch reg. */
1662 if (reg_plus_base_memory_operand (operand1, mode)
1663 && !(TARGET_PA_20
1664 && !TARGET_ELF32
1665 && INT_14_BITS (XEXP (XEXP (operand1, 0), 1))))
1667 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1668 emit_move_insn (scratch_reg,
1669 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1670 Pmode,
1671 XEXP (XEXP (operand1, 0), 0),
1672 scratch_reg));
1674 else
1675 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1676 emit_insn (gen_rtx_SET (VOIDmode, operand0,
1677 replace_equiv_address (operand1, scratch_reg)));
1678 return 1;
1680 else if (scratch_reg
1681 && fp_reg_operand (operand1, mode)
1682 && (MEM_P (operand0)
1683 || (GET_CODE (operand0) == SUBREG
1684 && MEM_P (XEXP (operand0, 0))))
1685 && !floating_point_store_memory_operand (operand0, mode))
1687 if (GET_CODE (operand0) == SUBREG)
1688 operand0 = XEXP (operand0, 0);
1690 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1691 it in WORD_MODE regardless of what mode it was originally given
1692 to us. */
1693 scratch_reg = force_mode (word_mode, scratch_reg);
1695 /* D might not fit in 14 bits either; for such cases load D into
1696 scratch reg. */
1697 if (reg_plus_base_memory_operand (operand0, mode)
1698 && !(TARGET_PA_20
1699 && !TARGET_ELF32
1700 && INT_14_BITS (XEXP (XEXP (operand0, 0), 1))))
1702 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1703 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1704 0)),
1705 Pmode,
1706 XEXP (XEXP (operand0, 0),
1708 scratch_reg));
1710 else
1711 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1712 emit_insn (gen_rtx_SET (VOIDmode,
1713 replace_equiv_address (operand0, scratch_reg),
1714 operand1));
1715 return 1;
1717 /* Handle secondary reloads for loads of FP registers from constant
1718 expressions by forcing the constant into memory. For the most part,
1719 this is only necessary for SImode and DImode.
1721 Use scratch_reg to hold the address of the memory location. */
1722 else if (scratch_reg
1723 && CONSTANT_P (operand1)
1724 && fp_reg_operand (operand0, mode))
1726 rtx const_mem, xoperands[2];
1728 if (operand1 == CONST0_RTX (mode))
1730 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1731 return 1;
1734 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1735 it in WORD_MODE regardless of what mode it was originally given
1736 to us. */
1737 scratch_reg = force_mode (word_mode, scratch_reg);
1739 /* Force the constant into memory and put the address of the
1740 memory location into scratch_reg. */
1741 const_mem = force_const_mem (mode, operand1);
1742 xoperands[0] = scratch_reg;
1743 xoperands[1] = XEXP (const_mem, 0);
1744 pa_emit_move_sequence (xoperands, Pmode, 0);
1746 /* Now load the destination register. */
1747 emit_insn (gen_rtx_SET (mode, operand0,
1748 replace_equiv_address (const_mem, scratch_reg)));
1749 return 1;
1751 /* Handle secondary reloads for SAR. These occur when trying to load
1752 the SAR from memory or a constant. */
1753 else if (scratch_reg
1754 && GET_CODE (operand0) == REG
1755 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1756 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1757 && (GET_CODE (operand1) == MEM || GET_CODE (operand1) == CONST_INT))
1759 /* D might not fit in 14 bits either; for such cases load D into
1760 scratch reg. */
1761 if (GET_CODE (operand1) == MEM
1762 && !memory_address_p (GET_MODE (operand0), XEXP (operand1, 0)))
1764 /* We are reloading the address into the scratch register, so we
1765 want to make sure the scratch register is a full register. */
1766 scratch_reg = force_mode (word_mode, scratch_reg);
1768 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1769 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1770 0)),
1771 Pmode,
1772 XEXP (XEXP (operand1, 0),
1774 scratch_reg));
1776 /* Now we are going to load the scratch register from memory,
1777 we want to load it in the same width as the original MEM,
1778 which must be the same as the width of the ultimate destination,
1779 OPERAND0. */
1780 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1782 emit_move_insn (scratch_reg,
1783 replace_equiv_address (operand1, scratch_reg));
1785 else
1787 /* We want to load the scratch register using the same mode as
1788 the ultimate destination. */
1789 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1791 emit_move_insn (scratch_reg, operand1);
1794 /* And emit the insn to set the ultimate destination. We know that
1795 the scratch register has the same mode as the destination at this
1796 point. */
1797 emit_move_insn (operand0, scratch_reg);
1798 return 1;
1800 /* Handle the most common case: storing into a register. */
1801 else if (register_operand (operand0, mode))
1803 /* Legitimize TLS symbol references. This happens for references
1804 that aren't a legitimate constant. */
1805 if (PA_SYMBOL_REF_TLS_P (operand1))
1806 operand1 = legitimize_tls_address (operand1);
1808 if (register_operand (operand1, mode)
1809 || (GET_CODE (operand1) == CONST_INT
1810 && pa_cint_ok_for_move (INTVAL (operand1)))
1811 || (operand1 == CONST0_RTX (mode))
1812 || (GET_CODE (operand1) == HIGH
1813 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1814 /* Only `general_operands' can come here, so MEM is ok. */
1815 || GET_CODE (operand1) == MEM)
1817 /* Various sets are created during RTL generation which don't
1818 have the REG_POINTER flag correctly set. After the CSE pass,
1819 instruction recognition can fail if we don't consistently
1820 set this flag when performing register copies. This should
1821 also improve the opportunities for creating insns that use
1822 unscaled indexing. */
1823 if (REG_P (operand0) && REG_P (operand1))
1825 if (REG_POINTER (operand1)
1826 && !REG_POINTER (operand0)
1827 && !HARD_REGISTER_P (operand0))
1828 copy_reg_pointer (operand0, operand1);
1831 /* When MEMs are broken out, the REG_POINTER flag doesn't
1832 get set. In some cases, we can set the REG_POINTER flag
1833 from the declaration for the MEM. */
1834 if (REG_P (operand0)
1835 && GET_CODE (operand1) == MEM
1836 && !REG_POINTER (operand0))
1838 tree decl = MEM_EXPR (operand1);
1840 /* Set the register pointer flag and register alignment
1841 if the declaration for this memory reference is a
1842 pointer type. */
1843 if (decl)
1845 tree type;
1847 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1848 tree operand 1. */
1849 if (TREE_CODE (decl) == COMPONENT_REF)
1850 decl = TREE_OPERAND (decl, 1);
1852 type = TREE_TYPE (decl);
1853 type = strip_array_types (type);
1855 if (POINTER_TYPE_P (type))
1857 int align;
1859 type = TREE_TYPE (type);
1860 /* Using TYPE_ALIGN_OK is rather conservative as
1861 only the ada frontend actually sets it. */
1862 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1863 : BITS_PER_UNIT);
1864 mark_reg_pointer (operand0, align);
1869 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1870 return 1;
1873 else if (GET_CODE (operand0) == MEM)
1875 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1876 && !(reload_in_progress || reload_completed))
1878 rtx temp = gen_reg_rtx (DFmode);
1880 emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
1881 emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
1882 return 1;
1884 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1886 /* Run this case quickly. */
1887 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1888 return 1;
1890 if (! (reload_in_progress || reload_completed))
1892 operands[0] = validize_mem (operand0);
1893 operands[1] = operand1 = force_reg (mode, operand1);
1897 /* Simplify the source if we need to.
1898 Note we do have to handle function labels here, even though we do
1899 not consider them legitimate constants. Loop optimizations can
1900 call the emit_move_xxx with one as a source. */
1901 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1902 || (GET_CODE (operand1) == HIGH
1903 && symbolic_operand (XEXP (operand1, 0), mode))
1904 || function_label_operand (operand1, VOIDmode)
1905 || tls_referenced_p (operand1))
1907 int ishighonly = 0;
1909 if (GET_CODE (operand1) == HIGH)
1911 ishighonly = 1;
1912 operand1 = XEXP (operand1, 0);
1914 if (symbolic_operand (operand1, mode))
1916 /* Argh. The assembler and linker can't handle arithmetic
1917 involving plabels.
1919 So we force the plabel into memory, load operand0 from
1920 the memory location, then add in the constant part. */
1921 if ((GET_CODE (operand1) == CONST
1922 && GET_CODE (XEXP (operand1, 0)) == PLUS
1923 && function_label_operand (XEXP (XEXP (operand1, 0), 0),
1924 VOIDmode))
1925 || function_label_operand (operand1, VOIDmode))
1927 rtx temp, const_part;
1929 /* Figure out what (if any) scratch register to use. */
1930 if (reload_in_progress || reload_completed)
1932 scratch_reg = scratch_reg ? scratch_reg : operand0;
1933 /* SCRATCH_REG will hold an address and maybe the actual
1934 data. We want it in WORD_MODE regardless of what mode it
1935 was originally given to us. */
1936 scratch_reg = force_mode (word_mode, scratch_reg);
1938 else if (flag_pic)
1939 scratch_reg = gen_reg_rtx (Pmode);
1941 if (GET_CODE (operand1) == CONST)
1943 /* Save away the constant part of the expression. */
1944 const_part = XEXP (XEXP (operand1, 0), 1);
1945 gcc_assert (GET_CODE (const_part) == CONST_INT);
1947 /* Force the function label into memory. */
1948 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1950 else
1952 /* No constant part. */
1953 const_part = NULL_RTX;
1955 /* Force the function label into memory. */
1956 temp = force_const_mem (mode, operand1);
1960 /* Get the address of the memory location. PIC-ify it if
1961 necessary. */
1962 temp = XEXP (temp, 0);
1963 if (flag_pic)
1964 temp = legitimize_pic_address (temp, mode, scratch_reg);
1966 /* Put the address of the memory location into our destination
1967 register. */
1968 operands[1] = temp;
1969 pa_emit_move_sequence (operands, mode, scratch_reg);
1971 /* Now load from the memory location into our destination
1972 register. */
1973 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
1974 pa_emit_move_sequence (operands, mode, scratch_reg);
1976 /* And add back in the constant part. */
1977 if (const_part != NULL_RTX)
1978 expand_inc (operand0, const_part);
1980 return 1;
1983 if (flag_pic)
1985 rtx temp;
1987 if (reload_in_progress || reload_completed)
1989 temp = scratch_reg ? scratch_reg : operand0;
1990 /* TEMP will hold an address and maybe the actual
1991 data. We want it in WORD_MODE regardless of what mode it
1992 was originally given to us. */
1993 temp = force_mode (word_mode, temp);
1995 else
1996 temp = gen_reg_rtx (Pmode);
1998 /* (const (plus (symbol) (const_int))) must be forced to
1999 memory during/after reload if the const_int will not fit
2000 in 14 bits. */
2001 if (GET_CODE (operand1) == CONST
2002 && GET_CODE (XEXP (operand1, 0)) == PLUS
2003 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
2004 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
2005 && (reload_completed || reload_in_progress)
2006 && flag_pic)
2008 rtx const_mem = force_const_mem (mode, operand1);
2009 operands[1] = legitimize_pic_address (XEXP (const_mem, 0),
2010 mode, temp);
2011 operands[1] = replace_equiv_address (const_mem, operands[1]);
2012 pa_emit_move_sequence (operands, mode, temp);
2014 else
2016 operands[1] = legitimize_pic_address (operand1, mode, temp);
2017 if (REG_P (operand0) && REG_P (operands[1]))
2018 copy_reg_pointer (operand0, operands[1]);
2019 emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
2022 /* On the HPPA, references to data space are supposed to use dp,
2023 register 27, but showing it in the RTL inhibits various cse
2024 and loop optimizations. */
2025 else
2027 rtx temp, set;
2029 if (reload_in_progress || reload_completed)
2031 temp = scratch_reg ? scratch_reg : operand0;
2032 /* TEMP will hold an address and maybe the actual
2033 data. We want it in WORD_MODE regardless of what mode it
2034 was originally given to us. */
2035 temp = force_mode (word_mode, temp);
2037 else
2038 temp = gen_reg_rtx (mode);
2040 /* Loading a SYMBOL_REF into a register makes that register
2041 safe to be used as the base in an indexed address.
2043 Don't mark hard registers though. That loses. */
2044 if (GET_CODE (operand0) == REG
2045 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
2046 mark_reg_pointer (operand0, BITS_PER_UNIT);
2047 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
2048 mark_reg_pointer (temp, BITS_PER_UNIT);
2050 if (ishighonly)
2051 set = gen_rtx_SET (mode, operand0, temp);
2052 else
2053 set = gen_rtx_SET (VOIDmode,
2054 operand0,
2055 gen_rtx_LO_SUM (mode, temp, operand1));
2057 emit_insn (gen_rtx_SET (VOIDmode,
2058 temp,
2059 gen_rtx_HIGH (mode, operand1)));
2060 emit_insn (set);
2063 return 1;
2065 else if (tls_referenced_p (operand1))
2067 rtx tmp = operand1;
2068 rtx addend = NULL;
2070 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
2072 addend = XEXP (XEXP (tmp, 0), 1);
2073 tmp = XEXP (XEXP (tmp, 0), 0);
2076 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
2077 tmp = legitimize_tls_address (tmp);
2078 if (addend)
2080 tmp = gen_rtx_PLUS (mode, tmp, addend);
2081 tmp = force_operand (tmp, operands[0]);
2083 operands[1] = tmp;
2085 else if (GET_CODE (operand1) != CONST_INT
2086 || !pa_cint_ok_for_move (INTVAL (operand1)))
2088 rtx insn, temp;
2089 rtx op1 = operand1;
2090 HOST_WIDE_INT value = 0;
2091 HOST_WIDE_INT insv = 0;
2092 int insert = 0;
2094 if (GET_CODE (operand1) == CONST_INT)
2095 value = INTVAL (operand1);
2097 if (TARGET_64BIT
2098 && GET_CODE (operand1) == CONST_INT
2099 && HOST_BITS_PER_WIDE_INT > 32
2100 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
2102 HOST_WIDE_INT nval;
2104 /* Extract the low order 32 bits of the value and sign extend.
2105 If the new value is the same as the original value, we can
2106 can use the original value as-is. If the new value is
2107 different, we use it and insert the most-significant 32-bits
2108 of the original value into the final result. */
2109 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
2110 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
2111 if (value != nval)
2113 #if HOST_BITS_PER_WIDE_INT > 32
2114 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
2115 #endif
2116 insert = 1;
2117 value = nval;
2118 operand1 = GEN_INT (nval);
2122 if (reload_in_progress || reload_completed)
2123 temp = scratch_reg ? scratch_reg : operand0;
2124 else
2125 temp = gen_reg_rtx (mode);
2127 /* We don't directly split DImode constants on 32-bit targets
2128 because PLUS uses an 11-bit immediate and the insn sequence
2129 generated is not as efficient as the one using HIGH/LO_SUM. */
2130 if (GET_CODE (operand1) == CONST_INT
2131 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
2132 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2133 && !insert)
2135 /* Directly break constant into high and low parts. This
2136 provides better optimization opportunities because various
2137 passes recognize constants split with PLUS but not LO_SUM.
2138 We use a 14-bit signed low part except when the addition
2139 of 0x4000 to the high part might change the sign of the
2140 high part. */
2141 HOST_WIDE_INT low = value & 0x3fff;
2142 HOST_WIDE_INT high = value & ~ 0x3fff;
2144 if (low >= 0x2000)
2146 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2147 high += 0x2000;
2148 else
2149 high += 0x4000;
2152 low = value - high;
2154 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
2155 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2157 else
2159 emit_insn (gen_rtx_SET (VOIDmode, temp,
2160 gen_rtx_HIGH (mode, operand1)));
2161 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2164 insn = emit_move_insn (operands[0], operands[1]);
2166 /* Now insert the most significant 32 bits of the value
2167 into the register. When we don't have a second register
2168 available, it could take up to nine instructions to load
2169 a 64-bit integer constant. Prior to reload, we force
2170 constants that would take more than three instructions
2171 to load to the constant pool. During and after reload,
2172 we have to handle all possible values. */
2173 if (insert)
2175 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2176 register and the value to be inserted is outside the
2177 range that can be loaded with three depdi instructions. */
2178 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2180 operand1 = GEN_INT (insv);
2182 emit_insn (gen_rtx_SET (VOIDmode, temp,
2183 gen_rtx_HIGH (mode, operand1)));
2184 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2185 if (mode == DImode)
2186 emit_insn (gen_insvdi (operand0, GEN_INT (32),
2187 const0_rtx, temp));
2188 else
2189 emit_insn (gen_insvsi (operand0, GEN_INT (32),
2190 const0_rtx, temp));
2192 else
2194 int len = 5, pos = 27;
2196 /* Insert the bits using the depdi instruction. */
2197 while (pos >= 0)
2199 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2200 HOST_WIDE_INT sign = v5 < 0;
2202 /* Left extend the insertion. */
2203 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2204 while (pos > 0 && (insv & 1) == sign)
2206 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2207 len += 1;
2208 pos -= 1;
2211 if (mode == DImode)
2212 emit_insn (gen_insvdi (operand0, GEN_INT (len),
2213 GEN_INT (pos), GEN_INT (v5)));
2214 else
2215 emit_insn (gen_insvsi (operand0, GEN_INT (len),
2216 GEN_INT (pos), GEN_INT (v5)));
2218 len = pos > 0 && pos < 5 ? pos : 5;
2219 pos -= len;
2224 set_unique_reg_note (insn, REG_EQUAL, op1);
2226 return 1;
2229 /* Now have insn-emit do whatever it normally does. */
2230 return 0;
2233 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2234 it will need a link/runtime reloc). */
2237 pa_reloc_needed (tree exp)
2239 int reloc = 0;
2241 switch (TREE_CODE (exp))
2243 case ADDR_EXPR:
2244 return 1;
2246 case POINTER_PLUS_EXPR:
2247 case PLUS_EXPR:
2248 case MINUS_EXPR:
2249 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2250 reloc |= pa_reloc_needed (TREE_OPERAND (exp, 1));
2251 break;
2253 CASE_CONVERT:
2254 case NON_LVALUE_EXPR:
2255 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2256 break;
2258 case CONSTRUCTOR:
2260 tree value;
2261 unsigned HOST_WIDE_INT ix;
2263 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2264 if (value)
2265 reloc |= pa_reloc_needed (value);
2267 break;
2269 case ERROR_MARK:
2270 break;
2272 default:
2273 break;
2275 return reloc;
2279 /* Return the best assembler insn template
2280 for moving operands[1] into operands[0] as a fullword. */
2281 const char *
2282 pa_singlemove_string (rtx *operands)
2284 HOST_WIDE_INT intval;
2286 if (GET_CODE (operands[0]) == MEM)
2287 return "stw %r1,%0";
2288 if (GET_CODE (operands[1]) == MEM)
2289 return "ldw %1,%0";
2290 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2292 long i;
2293 REAL_VALUE_TYPE d;
2295 gcc_assert (GET_MODE (operands[1]) == SFmode);
2297 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2298 bit pattern. */
2299 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
2300 REAL_VALUE_TO_TARGET_SINGLE (d, i);
2302 operands[1] = GEN_INT (i);
2303 /* Fall through to CONST_INT case. */
2305 if (GET_CODE (operands[1]) == CONST_INT)
2307 intval = INTVAL (operands[1]);
2309 if (VAL_14_BITS_P (intval))
2310 return "ldi %1,%0";
2311 else if ((intval & 0x7ff) == 0)
2312 return "ldil L'%1,%0";
2313 else if (pa_zdepi_cint_p (intval))
2314 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2315 else
2316 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2318 return "copy %1,%0";
2322 /* Compute position (in OP[1]) and width (in OP[2])
2323 useful for copying IMM to a register using the zdepi
2324 instructions. Store the immediate value to insert in OP[0]. */
2325 static void
2326 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2328 int lsb, len;
2330 /* Find the least significant set bit in IMM. */
2331 for (lsb = 0; lsb < 32; lsb++)
2333 if ((imm & 1) != 0)
2334 break;
2335 imm >>= 1;
2338 /* Choose variants based on *sign* of the 5-bit field. */
2339 if ((imm & 0x10) == 0)
2340 len = (lsb <= 28) ? 4 : 32 - lsb;
2341 else
2343 /* Find the width of the bitstring in IMM. */
2344 for (len = 5; len < 32 - lsb; len++)
2346 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2347 break;
2350 /* Sign extend IMM as a 5-bit value. */
2351 imm = (imm & 0xf) - 0x10;
2354 op[0] = imm;
2355 op[1] = 31 - lsb;
2356 op[2] = len;
2359 /* Compute position (in OP[1]) and width (in OP[2])
2360 useful for copying IMM to a register using the depdi,z
2361 instructions. Store the immediate value to insert in OP[0]. */
2363 static void
2364 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2366 int lsb, len, maxlen;
2368 maxlen = MIN (HOST_BITS_PER_WIDE_INT, 64);
2370 /* Find the least significant set bit in IMM. */
2371 for (lsb = 0; lsb < maxlen; lsb++)
2373 if ((imm & 1) != 0)
2374 break;
2375 imm >>= 1;
2378 /* Choose variants based on *sign* of the 5-bit field. */
2379 if ((imm & 0x10) == 0)
2380 len = (lsb <= maxlen - 4) ? 4 : maxlen - lsb;
2381 else
2383 /* Find the width of the bitstring in IMM. */
2384 for (len = 5; len < maxlen - lsb; len++)
2386 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2387 break;
2390 /* Extend length if host is narrow and IMM is negative. */
2391 if (HOST_BITS_PER_WIDE_INT == 32 && len == maxlen - lsb)
2392 len += 32;
2394 /* Sign extend IMM as a 5-bit value. */
2395 imm = (imm & 0xf) - 0x10;
2398 op[0] = imm;
2399 op[1] = 63 - lsb;
2400 op[2] = len;
2403 /* Output assembler code to perform a doubleword move insn
2404 with operands OPERANDS. */
2406 const char *
2407 pa_output_move_double (rtx *operands)
2409 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2410 rtx latehalf[2];
2411 rtx addreg0 = 0, addreg1 = 0;
2413 /* First classify both operands. */
2415 if (REG_P (operands[0]))
2416 optype0 = REGOP;
2417 else if (offsettable_memref_p (operands[0]))
2418 optype0 = OFFSOP;
2419 else if (GET_CODE (operands[0]) == MEM)
2420 optype0 = MEMOP;
2421 else
2422 optype0 = RNDOP;
2424 if (REG_P (operands[1]))
2425 optype1 = REGOP;
2426 else if (CONSTANT_P (operands[1]))
2427 optype1 = CNSTOP;
2428 else if (offsettable_memref_p (operands[1]))
2429 optype1 = OFFSOP;
2430 else if (GET_CODE (operands[1]) == MEM)
2431 optype1 = MEMOP;
2432 else
2433 optype1 = RNDOP;
2435 /* Check for the cases that the operand constraints are not
2436 supposed to allow to happen. */
2437 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2439 /* Handle copies between general and floating registers. */
2441 if (optype0 == REGOP && optype1 == REGOP
2442 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2444 if (FP_REG_P (operands[0]))
2446 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2447 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2448 return "{fldds|fldd} -16(%%sp),%0";
2450 else
2452 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2453 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2454 return "{ldws|ldw} -12(%%sp),%R0";
2458 /* Handle auto decrementing and incrementing loads and stores
2459 specifically, since the structure of the function doesn't work
2460 for them without major modification. Do it better when we learn
2461 this port about the general inc/dec addressing of PA.
2462 (This was written by tege. Chide him if it doesn't work.) */
2464 if (optype0 == MEMOP)
2466 /* We have to output the address syntax ourselves, since print_operand
2467 doesn't deal with the addresses we want to use. Fix this later. */
2469 rtx addr = XEXP (operands[0], 0);
2470 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2472 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2474 operands[0] = XEXP (addr, 0);
2475 gcc_assert (GET_CODE (operands[1]) == REG
2476 && GET_CODE (operands[0]) == REG);
2478 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2480 /* No overlap between high target register and address
2481 register. (We do this in a non-obvious way to
2482 save a register file writeback) */
2483 if (GET_CODE (addr) == POST_INC)
2484 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2485 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2487 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2489 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2491 operands[0] = XEXP (addr, 0);
2492 gcc_assert (GET_CODE (operands[1]) == REG
2493 && GET_CODE (operands[0]) == REG);
2495 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2496 /* No overlap between high target register and address
2497 register. (We do this in a non-obvious way to save a
2498 register file writeback) */
2499 if (GET_CODE (addr) == PRE_INC)
2500 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2501 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2504 if (optype1 == MEMOP)
2506 /* We have to output the address syntax ourselves, since print_operand
2507 doesn't deal with the addresses we want to use. Fix this later. */
2509 rtx addr = XEXP (operands[1], 0);
2510 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2512 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2514 operands[1] = XEXP (addr, 0);
2515 gcc_assert (GET_CODE (operands[0]) == REG
2516 && GET_CODE (operands[1]) == REG);
2518 if (!reg_overlap_mentioned_p (high_reg, addr))
2520 /* No overlap between high target register and address
2521 register. (We do this in a non-obvious way to
2522 save a register file writeback) */
2523 if (GET_CODE (addr) == POST_INC)
2524 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2525 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2527 else
2529 /* This is an undefined situation. We should load into the
2530 address register *and* update that register. Probably
2531 we don't need to handle this at all. */
2532 if (GET_CODE (addr) == POST_INC)
2533 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2534 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2537 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2539 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2541 operands[1] = XEXP (addr, 0);
2542 gcc_assert (GET_CODE (operands[0]) == REG
2543 && GET_CODE (operands[1]) == REG);
2545 if (!reg_overlap_mentioned_p (high_reg, addr))
2547 /* No overlap between high target register and address
2548 register. (We do this in a non-obvious way to
2549 save a register file writeback) */
2550 if (GET_CODE (addr) == PRE_INC)
2551 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2552 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2554 else
2556 /* This is an undefined situation. We should load into the
2557 address register *and* update that register. Probably
2558 we don't need to handle this at all. */
2559 if (GET_CODE (addr) == PRE_INC)
2560 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2561 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2564 else if (GET_CODE (addr) == PLUS
2565 && GET_CODE (XEXP (addr, 0)) == MULT)
2567 rtx xoperands[4];
2568 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2570 if (!reg_overlap_mentioned_p (high_reg, addr))
2572 xoperands[0] = high_reg;
2573 xoperands[1] = XEXP (addr, 1);
2574 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2575 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2576 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2577 xoperands);
2578 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2580 else
2582 xoperands[0] = high_reg;
2583 xoperands[1] = XEXP (addr, 1);
2584 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2585 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2586 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2587 xoperands);
2588 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2593 /* If an operand is an unoffsettable memory ref, find a register
2594 we can increment temporarily to make it refer to the second word. */
2596 if (optype0 == MEMOP)
2597 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2599 if (optype1 == MEMOP)
2600 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2602 /* Ok, we can do one word at a time.
2603 Normally we do the low-numbered word first.
2605 In either case, set up in LATEHALF the operands to use
2606 for the high-numbered word and in some cases alter the
2607 operands in OPERANDS to be suitable for the low-numbered word. */
2609 if (optype0 == REGOP)
2610 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2611 else if (optype0 == OFFSOP)
2612 latehalf[0] = adjust_address_nv (operands[0], SImode, 4);
2613 else
2614 latehalf[0] = operands[0];
2616 if (optype1 == REGOP)
2617 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2618 else if (optype1 == OFFSOP)
2619 latehalf[1] = adjust_address_nv (operands[1], SImode, 4);
2620 else if (optype1 == CNSTOP)
2621 split_double (operands[1], &operands[1], &latehalf[1]);
2622 else
2623 latehalf[1] = operands[1];
2625 /* If the first move would clobber the source of the second one,
2626 do them in the other order.
2628 This can happen in two cases:
2630 mem -> register where the first half of the destination register
2631 is the same register used in the memory's address. Reload
2632 can create such insns.
2634 mem in this case will be either register indirect or register
2635 indirect plus a valid offset.
2637 register -> register move where REGNO(dst) == REGNO(src + 1)
2638 someone (Tim/Tege?) claimed this can happen for parameter loads.
2640 Handle mem -> register case first. */
2641 if (optype0 == REGOP
2642 && (optype1 == MEMOP || optype1 == OFFSOP)
2643 && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
2644 operands[1], 0))
2646 /* Do the late half first. */
2647 if (addreg1)
2648 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2649 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2651 /* Then clobber. */
2652 if (addreg1)
2653 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2654 return pa_singlemove_string (operands);
2657 /* Now handle register -> register case. */
2658 if (optype0 == REGOP && optype1 == REGOP
2659 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2661 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2662 return pa_singlemove_string (operands);
2665 /* Normal case: do the two words, low-numbered first. */
2667 output_asm_insn (pa_singlemove_string (operands), operands);
2669 /* Make any unoffsettable addresses point at high-numbered word. */
2670 if (addreg0)
2671 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2672 if (addreg1)
2673 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2675 /* Do that word. */
2676 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2678 /* Undo the adds we just did. */
2679 if (addreg0)
2680 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2681 if (addreg1)
2682 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2684 return "";
2687 const char *
2688 pa_output_fp_move_double (rtx *operands)
2690 if (FP_REG_P (operands[0]))
2692 if (FP_REG_P (operands[1])
2693 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2694 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2695 else
2696 output_asm_insn ("fldd%F1 %1,%0", operands);
2698 else if (FP_REG_P (operands[1]))
2700 output_asm_insn ("fstd%F0 %1,%0", operands);
2702 else
2704 rtx xoperands[2];
2706 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2708 /* This is a pain. You have to be prepared to deal with an
2709 arbitrary address here including pre/post increment/decrement.
2711 so avoid this in the MD. */
2712 gcc_assert (GET_CODE (operands[0]) == REG);
2714 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2715 xoperands[0] = operands[0];
2716 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2718 return "";
2721 /* Return a REG that occurs in ADDR with coefficient 1.
2722 ADDR can be effectively incremented by incrementing REG. */
2724 static rtx
2725 find_addr_reg (rtx addr)
2727 while (GET_CODE (addr) == PLUS)
2729 if (GET_CODE (XEXP (addr, 0)) == REG)
2730 addr = XEXP (addr, 0);
2731 else if (GET_CODE (XEXP (addr, 1)) == REG)
2732 addr = XEXP (addr, 1);
2733 else if (CONSTANT_P (XEXP (addr, 0)))
2734 addr = XEXP (addr, 1);
2735 else if (CONSTANT_P (XEXP (addr, 1)))
2736 addr = XEXP (addr, 0);
2737 else
2738 gcc_unreachable ();
2740 gcc_assert (GET_CODE (addr) == REG);
2741 return addr;
2744 /* Emit code to perform a block move.
2746 OPERANDS[0] is the destination pointer as a REG, clobbered.
2747 OPERANDS[1] is the source pointer as a REG, clobbered.
2748 OPERANDS[2] is a register for temporary storage.
2749 OPERANDS[3] is a register for temporary storage.
2750 OPERANDS[4] is the size as a CONST_INT
2751 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2752 OPERANDS[6] is another temporary register. */
2754 const char *
2755 pa_output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2757 int align = INTVAL (operands[5]);
2758 unsigned long n_bytes = INTVAL (operands[4]);
2760 /* We can't move more than a word at a time because the PA
2761 has no longer integer move insns. (Could use fp mem ops?) */
2762 if (align > (TARGET_64BIT ? 8 : 4))
2763 align = (TARGET_64BIT ? 8 : 4);
2765 /* Note that we know each loop below will execute at least twice
2766 (else we would have open-coded the copy). */
2767 switch (align)
2769 case 8:
2770 /* Pre-adjust the loop counter. */
2771 operands[4] = GEN_INT (n_bytes - 16);
2772 output_asm_insn ("ldi %4,%2", operands);
2774 /* Copying loop. */
2775 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2776 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2777 output_asm_insn ("std,ma %3,8(%0)", operands);
2778 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2779 output_asm_insn ("std,ma %6,8(%0)", operands);
2781 /* Handle the residual. There could be up to 7 bytes of
2782 residual to copy! */
2783 if (n_bytes % 16 != 0)
2785 operands[4] = GEN_INT (n_bytes % 8);
2786 if (n_bytes % 16 >= 8)
2787 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2788 if (n_bytes % 8 != 0)
2789 output_asm_insn ("ldd 0(%1),%6", operands);
2790 if (n_bytes % 16 >= 8)
2791 output_asm_insn ("std,ma %3,8(%0)", operands);
2792 if (n_bytes % 8 != 0)
2793 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2795 return "";
2797 case 4:
2798 /* Pre-adjust the loop counter. */
2799 operands[4] = GEN_INT (n_bytes - 8);
2800 output_asm_insn ("ldi %4,%2", operands);
2802 /* Copying loop. */
2803 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2804 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2805 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2806 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2807 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2809 /* Handle the residual. There could be up to 7 bytes of
2810 residual to copy! */
2811 if (n_bytes % 8 != 0)
2813 operands[4] = GEN_INT (n_bytes % 4);
2814 if (n_bytes % 8 >= 4)
2815 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2816 if (n_bytes % 4 != 0)
2817 output_asm_insn ("ldw 0(%1),%6", operands);
2818 if (n_bytes % 8 >= 4)
2819 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2820 if (n_bytes % 4 != 0)
2821 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2823 return "";
2825 case 2:
2826 /* Pre-adjust the loop counter. */
2827 operands[4] = GEN_INT (n_bytes - 4);
2828 output_asm_insn ("ldi %4,%2", operands);
2830 /* Copying loop. */
2831 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2832 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2833 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2834 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2835 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2837 /* Handle the residual. */
2838 if (n_bytes % 4 != 0)
2840 if (n_bytes % 4 >= 2)
2841 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2842 if (n_bytes % 2 != 0)
2843 output_asm_insn ("ldb 0(%1),%6", operands);
2844 if (n_bytes % 4 >= 2)
2845 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2846 if (n_bytes % 2 != 0)
2847 output_asm_insn ("stb %6,0(%0)", operands);
2849 return "";
2851 case 1:
2852 /* Pre-adjust the loop counter. */
2853 operands[4] = GEN_INT (n_bytes - 2);
2854 output_asm_insn ("ldi %4,%2", operands);
2856 /* Copying loop. */
2857 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2858 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2859 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2860 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2861 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2863 /* Handle the residual. */
2864 if (n_bytes % 2 != 0)
2866 output_asm_insn ("ldb 0(%1),%3", operands);
2867 output_asm_insn ("stb %3,0(%0)", operands);
2869 return "";
2871 default:
2872 gcc_unreachable ();
2876 /* Count the number of insns necessary to handle this block move.
2878 Basic structure is the same as emit_block_move, except that we
2879 count insns rather than emit them. */
2881 static int
2882 compute_movmem_length (rtx insn)
2884 rtx pat = PATTERN (insn);
2885 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2886 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2887 unsigned int n_insns = 0;
2889 /* We can't move more than four bytes at a time because the PA
2890 has no longer integer move insns. (Could use fp mem ops?) */
2891 if (align > (TARGET_64BIT ? 8 : 4))
2892 align = (TARGET_64BIT ? 8 : 4);
2894 /* The basic copying loop. */
2895 n_insns = 6;
2897 /* Residuals. */
2898 if (n_bytes % (2 * align) != 0)
2900 if ((n_bytes % (2 * align)) >= align)
2901 n_insns += 2;
2903 if ((n_bytes % align) != 0)
2904 n_insns += 2;
2907 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2908 return n_insns * 4;
2911 /* Emit code to perform a block clear.
2913 OPERANDS[0] is the destination pointer as a REG, clobbered.
2914 OPERANDS[1] is a register for temporary storage.
2915 OPERANDS[2] is the size as a CONST_INT
2916 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2918 const char *
2919 pa_output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2921 int align = INTVAL (operands[3]);
2922 unsigned long n_bytes = INTVAL (operands[2]);
2924 /* We can't clear more than a word at a time because the PA
2925 has no longer integer move insns. */
2926 if (align > (TARGET_64BIT ? 8 : 4))
2927 align = (TARGET_64BIT ? 8 : 4);
2929 /* Note that we know each loop below will execute at least twice
2930 (else we would have open-coded the copy). */
2931 switch (align)
2933 case 8:
2934 /* Pre-adjust the loop counter. */
2935 operands[2] = GEN_INT (n_bytes - 16);
2936 output_asm_insn ("ldi %2,%1", operands);
2938 /* Loop. */
2939 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2940 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2941 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2943 /* Handle the residual. There could be up to 7 bytes of
2944 residual to copy! */
2945 if (n_bytes % 16 != 0)
2947 operands[2] = GEN_INT (n_bytes % 8);
2948 if (n_bytes % 16 >= 8)
2949 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2950 if (n_bytes % 8 != 0)
2951 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2953 return "";
2955 case 4:
2956 /* Pre-adjust the loop counter. */
2957 operands[2] = GEN_INT (n_bytes - 8);
2958 output_asm_insn ("ldi %2,%1", operands);
2960 /* Loop. */
2961 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2962 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2963 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2965 /* Handle the residual. There could be up to 7 bytes of
2966 residual to copy! */
2967 if (n_bytes % 8 != 0)
2969 operands[2] = GEN_INT (n_bytes % 4);
2970 if (n_bytes % 8 >= 4)
2971 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2972 if (n_bytes % 4 != 0)
2973 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
2975 return "";
2977 case 2:
2978 /* Pre-adjust the loop counter. */
2979 operands[2] = GEN_INT (n_bytes - 4);
2980 output_asm_insn ("ldi %2,%1", operands);
2982 /* Loop. */
2983 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2984 output_asm_insn ("addib,>= -4,%1,.-4", operands);
2985 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2987 /* Handle the residual. */
2988 if (n_bytes % 4 != 0)
2990 if (n_bytes % 4 >= 2)
2991 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2992 if (n_bytes % 2 != 0)
2993 output_asm_insn ("stb %%r0,0(%0)", operands);
2995 return "";
2997 case 1:
2998 /* Pre-adjust the loop counter. */
2999 operands[2] = GEN_INT (n_bytes - 2);
3000 output_asm_insn ("ldi %2,%1", operands);
3002 /* Loop. */
3003 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3004 output_asm_insn ("addib,>= -2,%1,.-4", operands);
3005 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3007 /* Handle the residual. */
3008 if (n_bytes % 2 != 0)
3009 output_asm_insn ("stb %%r0,0(%0)", operands);
3011 return "";
3013 default:
3014 gcc_unreachable ();
3018 /* Count the number of insns necessary to handle this block move.
3020 Basic structure is the same as emit_block_move, except that we
3021 count insns rather than emit them. */
3023 static int
3024 compute_clrmem_length (rtx insn)
3026 rtx pat = PATTERN (insn);
3027 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
3028 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
3029 unsigned int n_insns = 0;
3031 /* We can't clear more than a word at a time because the PA
3032 has no longer integer move insns. */
3033 if (align > (TARGET_64BIT ? 8 : 4))
3034 align = (TARGET_64BIT ? 8 : 4);
3036 /* The basic loop. */
3037 n_insns = 4;
3039 /* Residuals. */
3040 if (n_bytes % (2 * align) != 0)
3042 if ((n_bytes % (2 * align)) >= align)
3043 n_insns++;
3045 if ((n_bytes % align) != 0)
3046 n_insns++;
3049 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3050 return n_insns * 4;
3054 const char *
3055 pa_output_and (rtx *operands)
3057 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3059 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3060 int ls0, ls1, ms0, p, len;
3062 for (ls0 = 0; ls0 < 32; ls0++)
3063 if ((mask & (1 << ls0)) == 0)
3064 break;
3066 for (ls1 = ls0; ls1 < 32; ls1++)
3067 if ((mask & (1 << ls1)) != 0)
3068 break;
3070 for (ms0 = ls1; ms0 < 32; ms0++)
3071 if ((mask & (1 << ms0)) == 0)
3072 break;
3074 gcc_assert (ms0 == 32);
3076 if (ls1 == 32)
3078 len = ls0;
3080 gcc_assert (len);
3082 operands[2] = GEN_INT (len);
3083 return "{extru|extrw,u} %1,31,%2,%0";
3085 else
3087 /* We could use this `depi' for the case above as well, but `depi'
3088 requires one more register file access than an `extru'. */
3090 p = 31 - ls0;
3091 len = ls1 - ls0;
3093 operands[2] = GEN_INT (p);
3094 operands[3] = GEN_INT (len);
3095 return "{depi|depwi} 0,%2,%3,%0";
3098 else
3099 return "and %1,%2,%0";
3102 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3103 storing the result in operands[0]. */
3104 const char *
3105 pa_output_64bit_and (rtx *operands)
3107 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3109 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3110 int ls0, ls1, ms0, p, len;
3112 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
3113 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
3114 break;
3116 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
3117 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
3118 break;
3120 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
3121 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
3122 break;
3124 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
3126 if (ls1 == HOST_BITS_PER_WIDE_INT)
3128 len = ls0;
3130 gcc_assert (len);
3132 operands[2] = GEN_INT (len);
3133 return "extrd,u %1,63,%2,%0";
3135 else
3137 /* We could use this `depi' for the case above as well, but `depi'
3138 requires one more register file access than an `extru'. */
3140 p = 63 - ls0;
3141 len = ls1 - ls0;
3143 operands[2] = GEN_INT (p);
3144 operands[3] = GEN_INT (len);
3145 return "depdi 0,%2,%3,%0";
3148 else
3149 return "and %1,%2,%0";
3152 const char *
3153 pa_output_ior (rtx *operands)
3155 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3156 int bs0, bs1, p, len;
3158 if (INTVAL (operands[2]) == 0)
3159 return "copy %1,%0";
3161 for (bs0 = 0; bs0 < 32; bs0++)
3162 if ((mask & (1 << bs0)) != 0)
3163 break;
3165 for (bs1 = bs0; bs1 < 32; bs1++)
3166 if ((mask & (1 << bs1)) == 0)
3167 break;
3169 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3171 p = 31 - bs0;
3172 len = bs1 - bs0;
3174 operands[2] = GEN_INT (p);
3175 operands[3] = GEN_INT (len);
3176 return "{depi|depwi} -1,%2,%3,%0";
3179 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3180 storing the result in operands[0]. */
3181 const char *
3182 pa_output_64bit_ior (rtx *operands)
3184 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3185 int bs0, bs1, p, len;
3187 if (INTVAL (operands[2]) == 0)
3188 return "copy %1,%0";
3190 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3191 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3192 break;
3194 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3195 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3196 break;
3198 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
3199 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3201 p = 63 - bs0;
3202 len = bs1 - bs0;
3204 operands[2] = GEN_INT (p);
3205 operands[3] = GEN_INT (len);
3206 return "depdi -1,%2,%3,%0";
3209 /* Target hook for assembling integer objects. This code handles
3210 aligned SI and DI integers specially since function references
3211 must be preceded by P%. */
3213 static bool
3214 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3216 if (size == UNITS_PER_WORD
3217 && aligned_p
3218 && function_label_operand (x, VOIDmode))
3220 fputs (size == 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file);
3221 output_addr_const (asm_out_file, x);
3222 fputc ('\n', asm_out_file);
3223 return true;
3225 return default_assemble_integer (x, size, aligned_p);
3228 /* Output an ascii string. */
3229 void
3230 pa_output_ascii (FILE *file, const char *p, int size)
3232 int i;
3233 int chars_output;
3234 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3236 /* The HP assembler can only take strings of 256 characters at one
3237 time. This is a limitation on input line length, *not* the
3238 length of the string. Sigh. Even worse, it seems that the
3239 restriction is in number of input characters (see \xnn &
3240 \whatever). So we have to do this very carefully. */
3242 fputs ("\t.STRING \"", file);
3244 chars_output = 0;
3245 for (i = 0; i < size; i += 4)
3247 int co = 0;
3248 int io = 0;
3249 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3251 register unsigned int c = (unsigned char) p[i + io];
3253 if (c == '\"' || c == '\\')
3254 partial_output[co++] = '\\';
3255 if (c >= ' ' && c < 0177)
3256 partial_output[co++] = c;
3257 else
3259 unsigned int hexd;
3260 partial_output[co++] = '\\';
3261 partial_output[co++] = 'x';
3262 hexd = c / 16 - 0 + '0';
3263 if (hexd > '9')
3264 hexd -= '9' - 'a' + 1;
3265 partial_output[co++] = hexd;
3266 hexd = c % 16 - 0 + '0';
3267 if (hexd > '9')
3268 hexd -= '9' - 'a' + 1;
3269 partial_output[co++] = hexd;
3272 if (chars_output + co > 243)
3274 fputs ("\"\n\t.STRING \"", file);
3275 chars_output = 0;
3277 fwrite (partial_output, 1, (size_t) co, file);
3278 chars_output += co;
3279 co = 0;
3281 fputs ("\"\n", file);
3284 /* Try to rewrite floating point comparisons & branches to avoid
3285 useless add,tr insns.
3287 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3288 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3289 first attempt to remove useless add,tr insns. It is zero
3290 for the second pass as reorg sometimes leaves bogus REG_DEAD
3291 notes lying around.
3293 When CHECK_NOTES is zero we can only eliminate add,tr insns
3294 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3295 instructions. */
3296 static void
3297 remove_useless_addtr_insns (int check_notes)
3299 rtx insn;
3300 static int pass = 0;
3302 /* This is fairly cheap, so always run it when optimizing. */
3303 if (optimize > 0)
3305 int fcmp_count = 0;
3306 int fbranch_count = 0;
3308 /* Walk all the insns in this function looking for fcmp & fbranch
3309 instructions. Keep track of how many of each we find. */
3310 for (insn = get_insns (); insn; insn = next_insn (insn))
3312 rtx tmp;
3314 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3315 if (! NONJUMP_INSN_P (insn) && ! JUMP_P (insn))
3316 continue;
3318 tmp = PATTERN (insn);
3320 /* It must be a set. */
3321 if (GET_CODE (tmp) != SET)
3322 continue;
3324 /* If the destination is CCFP, then we've found an fcmp insn. */
3325 tmp = SET_DEST (tmp);
3326 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3328 fcmp_count++;
3329 continue;
3332 tmp = PATTERN (insn);
3333 /* If this is an fbranch instruction, bump the fbranch counter. */
3334 if (GET_CODE (tmp) == SET
3335 && SET_DEST (tmp) == pc_rtx
3336 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3337 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3338 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3339 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3341 fbranch_count++;
3342 continue;
3347 /* Find all floating point compare + branch insns. If possible,
3348 reverse the comparison & the branch to avoid add,tr insns. */
3349 for (insn = get_insns (); insn; insn = next_insn (insn))
3351 rtx tmp, next;
3353 /* Ignore anything that isn't an INSN. */
3354 if (! NONJUMP_INSN_P (insn))
3355 continue;
3357 tmp = PATTERN (insn);
3359 /* It must be a set. */
3360 if (GET_CODE (tmp) != SET)
3361 continue;
3363 /* The destination must be CCFP, which is register zero. */
3364 tmp = SET_DEST (tmp);
3365 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3366 continue;
3368 /* INSN should be a set of CCFP.
3370 See if the result of this insn is used in a reversed FP
3371 conditional branch. If so, reverse our condition and
3372 the branch. Doing so avoids useless add,tr insns. */
3373 next = next_insn (insn);
3374 while (next)
3376 /* Jumps, calls and labels stop our search. */
3377 if (JUMP_P (next) || CALL_P (next) || LABEL_P (next))
3378 break;
3380 /* As does another fcmp insn. */
3381 if (NONJUMP_INSN_P (next)
3382 && GET_CODE (PATTERN (next)) == SET
3383 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3384 && REGNO (SET_DEST (PATTERN (next))) == 0)
3385 break;
3387 next = next_insn (next);
3390 /* Is NEXT_INSN a branch? */
3391 if (next && JUMP_P (next))
3393 rtx pattern = PATTERN (next);
3395 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3396 and CCFP dies, then reverse our conditional and the branch
3397 to avoid the add,tr. */
3398 if (GET_CODE (pattern) == SET
3399 && SET_DEST (pattern) == pc_rtx
3400 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3401 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3402 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3403 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3404 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3405 && (fcmp_count == fbranch_count
3406 || (check_notes
3407 && find_regno_note (next, REG_DEAD, 0))))
3409 /* Reverse the branch. */
3410 tmp = XEXP (SET_SRC (pattern), 1);
3411 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3412 XEXP (SET_SRC (pattern), 2) = tmp;
3413 INSN_CODE (next) = -1;
3415 /* Reverse our condition. */
3416 tmp = PATTERN (insn);
3417 PUT_CODE (XEXP (tmp, 1),
3418 (reverse_condition_maybe_unordered
3419 (GET_CODE (XEXP (tmp, 1)))));
3425 pass = !pass;
3429 /* You may have trouble believing this, but this is the 32 bit HP-PA
3430 stack layout. Wow.
3432 Offset Contents
3434 Variable arguments (optional; any number may be allocated)
3436 SP-(4*(N+9)) arg word N
3438 SP-56 arg word 5
3439 SP-52 arg word 4
3441 Fixed arguments (must be allocated; may remain unused)
3443 SP-48 arg word 3
3444 SP-44 arg word 2
3445 SP-40 arg word 1
3446 SP-36 arg word 0
3448 Frame Marker
3450 SP-32 External Data Pointer (DP)
3451 SP-28 External sr4
3452 SP-24 External/stub RP (RP')
3453 SP-20 Current RP
3454 SP-16 Static Link
3455 SP-12 Clean up
3456 SP-8 Calling Stub RP (RP'')
3457 SP-4 Previous SP
3459 Top of Frame
3461 SP-0 Stack Pointer (points to next available address)
3465 /* This function saves registers as follows. Registers marked with ' are
3466 this function's registers (as opposed to the previous function's).
3467 If a frame_pointer isn't needed, r4 is saved as a general register;
3468 the space for the frame pointer is still allocated, though, to keep
3469 things simple.
3472 Top of Frame
3474 SP (FP') Previous FP
3475 SP + 4 Alignment filler (sigh)
3476 SP + 8 Space for locals reserved here.
3480 SP + n All call saved register used.
3484 SP + o All call saved fp registers used.
3488 SP + p (SP') points to next available address.
3492 /* Global variables set by output_function_prologue(). */
3493 /* Size of frame. Need to know this to emit return insns from
3494 leaf procedures. */
3495 static HOST_WIDE_INT actual_fsize, local_fsize;
3496 static int save_fregs;
3498 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3499 Handle case where DISP > 8k by using the add_high_const patterns.
3501 Note in DISP > 8k case, we will leave the high part of the address
3502 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3504 static void
3505 store_reg (int reg, HOST_WIDE_INT disp, int base)
3507 rtx insn, dest, src, basereg;
3509 src = gen_rtx_REG (word_mode, reg);
3510 basereg = gen_rtx_REG (Pmode, base);
3511 if (VAL_14_BITS_P (disp))
3513 dest = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
3514 insn = emit_move_insn (dest, src);
3516 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3518 rtx delta = GEN_INT (disp);
3519 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3521 emit_move_insn (tmpreg, delta);
3522 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3523 if (DO_FRAME_NOTES)
3525 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3526 gen_rtx_SET (VOIDmode, tmpreg,
3527 gen_rtx_PLUS (Pmode, basereg, delta)));
3528 RTX_FRAME_RELATED_P (insn) = 1;
3530 dest = gen_rtx_MEM (word_mode, tmpreg);
3531 insn = emit_move_insn (dest, src);
3533 else
3535 rtx delta = GEN_INT (disp);
3536 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3537 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3539 emit_move_insn (tmpreg, high);
3540 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3541 insn = emit_move_insn (dest, src);
3542 if (DO_FRAME_NOTES)
3543 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3544 gen_rtx_SET (VOIDmode,
3545 gen_rtx_MEM (word_mode,
3546 gen_rtx_PLUS (word_mode,
3547 basereg,
3548 delta)),
3549 src));
3552 if (DO_FRAME_NOTES)
3553 RTX_FRAME_RELATED_P (insn) = 1;
3556 /* Emit RTL to store REG at the memory location specified by BASE and then
3557 add MOD to BASE. MOD must be <= 8k. */
3559 static void
3560 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3562 rtx insn, basereg, srcreg, delta;
3564 gcc_assert (VAL_14_BITS_P (mod));
3566 basereg = gen_rtx_REG (Pmode, base);
3567 srcreg = gen_rtx_REG (word_mode, reg);
3568 delta = GEN_INT (mod);
3570 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3571 if (DO_FRAME_NOTES)
3573 RTX_FRAME_RELATED_P (insn) = 1;
3575 /* RTX_FRAME_RELATED_P must be set on each frame related set
3576 in a parallel with more than one element. */
3577 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3578 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3582 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3583 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3584 whether to add a frame note or not.
3586 In the DISP > 8k case, we leave the high part of the address in %r1.
3587 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3589 static void
3590 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3592 rtx insn;
3594 if (VAL_14_BITS_P (disp))
3596 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3597 plus_constant (Pmode,
3598 gen_rtx_REG (Pmode, base), disp));
3600 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3602 rtx basereg = gen_rtx_REG (Pmode, base);
3603 rtx delta = GEN_INT (disp);
3604 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3606 emit_move_insn (tmpreg, delta);
3607 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3608 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3609 if (DO_FRAME_NOTES)
3610 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3611 gen_rtx_SET (VOIDmode, tmpreg,
3612 gen_rtx_PLUS (Pmode, basereg, delta)));
3614 else
3616 rtx basereg = gen_rtx_REG (Pmode, base);
3617 rtx delta = GEN_INT (disp);
3618 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3620 emit_move_insn (tmpreg,
3621 gen_rtx_PLUS (Pmode, basereg,
3622 gen_rtx_HIGH (Pmode, delta)));
3623 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3624 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3627 if (DO_FRAME_NOTES && note)
3628 RTX_FRAME_RELATED_P (insn) = 1;
3631 HOST_WIDE_INT
3632 pa_compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3634 int freg_saved = 0;
3635 int i, j;
3637 /* The code in pa_expand_prologue and pa_expand_epilogue must
3638 be consistent with the rounding and size calculation done here.
3639 Change them at the same time. */
3641 /* We do our own stack alignment. First, round the size of the
3642 stack locals up to a word boundary. */
3643 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3645 /* Space for previous frame pointer + filler. If any frame is
3646 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3647 waste some space here for the sake of HP compatibility. The
3648 first slot is only used when the frame pointer is needed. */
3649 if (size || frame_pointer_needed)
3650 size += STARTING_FRAME_OFFSET;
3652 /* If the current function calls __builtin_eh_return, then we need
3653 to allocate stack space for registers that will hold data for
3654 the exception handler. */
3655 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3657 unsigned int i;
3659 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3660 continue;
3661 size += i * UNITS_PER_WORD;
3664 /* Account for space used by the callee general register saves. */
3665 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3666 if (df_regs_ever_live_p (i))
3667 size += UNITS_PER_WORD;
3669 /* Account for space used by the callee floating point register saves. */
3670 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3671 if (df_regs_ever_live_p (i)
3672 || (!TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3674 freg_saved = 1;
3676 /* We always save both halves of the FP register, so always
3677 increment the frame size by 8 bytes. */
3678 size += 8;
3681 /* If any of the floating registers are saved, account for the
3682 alignment needed for the floating point register save block. */
3683 if (freg_saved)
3685 size = (size + 7) & ~7;
3686 if (fregs_live)
3687 *fregs_live = 1;
3690 /* The various ABIs include space for the outgoing parameters in the
3691 size of the current function's stack frame. We don't need to align
3692 for the outgoing arguments as their alignment is set by the final
3693 rounding for the frame as a whole. */
3694 size += crtl->outgoing_args_size;
3696 /* Allocate space for the fixed frame marker. This space must be
3697 allocated for any function that makes calls or allocates
3698 stack space. */
3699 if (!crtl->is_leaf || size)
3700 size += TARGET_64BIT ? 48 : 32;
3702 /* Finally, round to the preferred stack boundary. */
3703 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3704 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3707 /* Generate the assembly code for function entry. FILE is a stdio
3708 stream to output the code to. SIZE is an int: how many units of
3709 temporary storage to allocate.
3711 Refer to the array `regs_ever_live' to determine which registers to
3712 save; `regs_ever_live[I]' is nonzero if register number I is ever
3713 used in the function. This function is responsible for knowing
3714 which registers should not be saved even if used. */
3716 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3717 of memory. If any fpu reg is used in the function, we allocate
3718 such a block here, at the bottom of the frame, just in case it's needed.
3720 If this function is a leaf procedure, then we may choose not
3721 to do a "save" insn. The decision about whether or not
3722 to do this is made in regclass.c. */
3724 static void
3725 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3727 /* The function's label and associated .PROC must never be
3728 separated and must be output *after* any profiling declarations
3729 to avoid changing spaces/subspaces within a procedure. */
3730 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3731 fputs ("\t.PROC\n", file);
3733 /* pa_expand_prologue does the dirty work now. We just need
3734 to output the assembler directives which denote the start
3735 of a function. */
3736 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3737 if (crtl->is_leaf)
3738 fputs (",NO_CALLS", file);
3739 else
3740 fputs (",CALLS", file);
3741 if (rp_saved)
3742 fputs (",SAVE_RP", file);
3744 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3745 at the beginning of the frame and that it is used as the frame
3746 pointer for the frame. We do this because our current frame
3747 layout doesn't conform to that specified in the HP runtime
3748 documentation and we need a way to indicate to programs such as
3749 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3750 isn't used by HP compilers but is supported by the assembler.
3751 However, SAVE_SP is supposed to indicate that the previous stack
3752 pointer has been saved in the frame marker. */
3753 if (frame_pointer_needed)
3754 fputs (",SAVE_SP", file);
3756 /* Pass on information about the number of callee register saves
3757 performed in the prologue.
3759 The compiler is supposed to pass the highest register number
3760 saved, the assembler then has to adjust that number before
3761 entering it into the unwind descriptor (to account for any
3762 caller saved registers with lower register numbers than the
3763 first callee saved register). */
3764 if (gr_saved)
3765 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3767 if (fr_saved)
3768 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3770 fputs ("\n\t.ENTRY\n", file);
3772 remove_useless_addtr_insns (0);
3775 void
3776 pa_expand_prologue (void)
3778 int merge_sp_adjust_with_store = 0;
3779 HOST_WIDE_INT size = get_frame_size ();
3780 HOST_WIDE_INT offset;
3781 int i;
3782 rtx insn, tmpreg;
3784 gr_saved = 0;
3785 fr_saved = 0;
3786 save_fregs = 0;
3788 /* Compute total size for frame pointer, filler, locals and rounding to
3789 the next word boundary. Similar code appears in pa_compute_frame_size
3790 and must be changed in tandem with this code. */
3791 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3792 if (local_fsize || frame_pointer_needed)
3793 local_fsize += STARTING_FRAME_OFFSET;
3795 actual_fsize = pa_compute_frame_size (size, &save_fregs);
3796 if (flag_stack_usage_info)
3797 current_function_static_stack_size = actual_fsize;
3799 /* Compute a few things we will use often. */
3800 tmpreg = gen_rtx_REG (word_mode, 1);
3802 /* Save RP first. The calling conventions manual states RP will
3803 always be stored into the caller's frame at sp - 20 or sp - 16
3804 depending on which ABI is in use. */
3805 if (df_regs_ever_live_p (2) || crtl->calls_eh_return)
3807 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3808 rp_saved = true;
3810 else
3811 rp_saved = false;
3813 /* Allocate the local frame and set up the frame pointer if needed. */
3814 if (actual_fsize != 0)
3816 if (frame_pointer_needed)
3818 /* Copy the old frame pointer temporarily into %r1. Set up the
3819 new stack pointer, then store away the saved old frame pointer
3820 into the stack at sp and at the same time update the stack
3821 pointer by actual_fsize bytes. Two versions, first
3822 handles small (<8k) frames. The second handles large (>=8k)
3823 frames. */
3824 insn = emit_move_insn (tmpreg, hard_frame_pointer_rtx);
3825 if (DO_FRAME_NOTES)
3826 RTX_FRAME_RELATED_P (insn) = 1;
3828 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3829 if (DO_FRAME_NOTES)
3830 RTX_FRAME_RELATED_P (insn) = 1;
3832 if (VAL_14_BITS_P (actual_fsize))
3833 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3834 else
3836 /* It is incorrect to store the saved frame pointer at *sp,
3837 then increment sp (writes beyond the current stack boundary).
3839 So instead use stwm to store at *sp and post-increment the
3840 stack pointer as an atomic operation. Then increment sp to
3841 finish allocating the new frame. */
3842 HOST_WIDE_INT adjust1 = 8192 - 64;
3843 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3845 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3846 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3847 adjust2, 1);
3850 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3851 we need to store the previous stack pointer (frame pointer)
3852 into the frame marker on targets that use the HP unwind
3853 library. This allows the HP unwind library to be used to
3854 unwind GCC frames. However, we are not fully compatible
3855 with the HP library because our frame layout differs from
3856 that specified in the HP runtime specification.
3858 We don't want a frame note on this instruction as the frame
3859 marker moves during dynamic stack allocation.
3861 This instruction also serves as a blockage to prevent
3862 register spills from being scheduled before the stack
3863 pointer is raised. This is necessary as we store
3864 registers using the frame pointer as a base register,
3865 and the frame pointer is set before sp is raised. */
3866 if (TARGET_HPUX_UNWIND_LIBRARY)
3868 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3869 GEN_INT (TARGET_64BIT ? -8 : -4));
3871 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3872 hard_frame_pointer_rtx);
3874 else
3875 emit_insn (gen_blockage ());
3877 /* no frame pointer needed. */
3878 else
3880 /* In some cases we can perform the first callee register save
3881 and allocating the stack frame at the same time. If so, just
3882 make a note of it and defer allocating the frame until saving
3883 the callee registers. */
3884 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3885 merge_sp_adjust_with_store = 1;
3886 /* Can not optimize. Adjust the stack frame by actual_fsize
3887 bytes. */
3888 else
3889 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3890 actual_fsize, 1);
3894 /* Normal register save.
3896 Do not save the frame pointer in the frame_pointer_needed case. It
3897 was done earlier. */
3898 if (frame_pointer_needed)
3900 offset = local_fsize;
3902 /* Saving the EH return data registers in the frame is the simplest
3903 way to get the frame unwind information emitted. We put them
3904 just before the general registers. */
3905 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3907 unsigned int i, regno;
3909 for (i = 0; ; ++i)
3911 regno = EH_RETURN_DATA_REGNO (i);
3912 if (regno == INVALID_REGNUM)
3913 break;
3915 store_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
3916 offset += UNITS_PER_WORD;
3920 for (i = 18; i >= 4; i--)
3921 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3923 store_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
3924 offset += UNITS_PER_WORD;
3925 gr_saved++;
3927 /* Account for %r3 which is saved in a special place. */
3928 gr_saved++;
3930 /* No frame pointer needed. */
3931 else
3933 offset = local_fsize - actual_fsize;
3935 /* Saving the EH return data registers in the frame is the simplest
3936 way to get the frame unwind information emitted. */
3937 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3939 unsigned int i, regno;
3941 for (i = 0; ; ++i)
3943 regno = EH_RETURN_DATA_REGNO (i);
3944 if (regno == INVALID_REGNUM)
3945 break;
3947 /* If merge_sp_adjust_with_store is nonzero, then we can
3948 optimize the first save. */
3949 if (merge_sp_adjust_with_store)
3951 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
3952 merge_sp_adjust_with_store = 0;
3954 else
3955 store_reg (regno, offset, STACK_POINTER_REGNUM);
3956 offset += UNITS_PER_WORD;
3960 for (i = 18; i >= 3; i--)
3961 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3963 /* If merge_sp_adjust_with_store is nonzero, then we can
3964 optimize the first GR save. */
3965 if (merge_sp_adjust_with_store)
3967 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
3968 merge_sp_adjust_with_store = 0;
3970 else
3971 store_reg (i, offset, STACK_POINTER_REGNUM);
3972 offset += UNITS_PER_WORD;
3973 gr_saved++;
3976 /* If we wanted to merge the SP adjustment with a GR save, but we never
3977 did any GR saves, then just emit the adjustment here. */
3978 if (merge_sp_adjust_with_store)
3979 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3980 actual_fsize, 1);
3983 /* The hppa calling conventions say that %r19, the pic offset
3984 register, is saved at sp - 32 (in this function's frame)
3985 when generating PIC code. FIXME: What is the correct thing
3986 to do for functions which make no calls and allocate no
3987 frame? Do we need to allocate a frame, or can we just omit
3988 the save? For now we'll just omit the save.
3990 We don't want a note on this insn as the frame marker can
3991 move if there is a dynamic stack allocation. */
3992 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
3994 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
3996 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
4000 /* Align pointer properly (doubleword boundary). */
4001 offset = (offset + 7) & ~7;
4003 /* Floating point register store. */
4004 if (save_fregs)
4006 rtx base;
4008 /* First get the frame or stack pointer to the start of the FP register
4009 save area. */
4010 if (frame_pointer_needed)
4012 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4013 base = hard_frame_pointer_rtx;
4015 else
4017 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4018 base = stack_pointer_rtx;
4021 /* Now actually save the FP registers. */
4022 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4024 if (df_regs_ever_live_p (i)
4025 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4027 rtx addr, insn, reg;
4028 addr = gen_rtx_MEM (DFmode,
4029 gen_rtx_POST_INC (word_mode, tmpreg));
4030 reg = gen_rtx_REG (DFmode, i);
4031 insn = emit_move_insn (addr, reg);
4032 if (DO_FRAME_NOTES)
4034 RTX_FRAME_RELATED_P (insn) = 1;
4035 if (TARGET_64BIT)
4037 rtx mem = gen_rtx_MEM (DFmode,
4038 plus_constant (Pmode, base,
4039 offset));
4040 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4041 gen_rtx_SET (VOIDmode, mem, reg));
4043 else
4045 rtx meml = gen_rtx_MEM (SFmode,
4046 plus_constant (Pmode, base,
4047 offset));
4048 rtx memr = gen_rtx_MEM (SFmode,
4049 plus_constant (Pmode, base,
4050 offset + 4));
4051 rtx regl = gen_rtx_REG (SFmode, i);
4052 rtx regr = gen_rtx_REG (SFmode, i + 1);
4053 rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
4054 rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
4055 rtvec vec;
4057 RTX_FRAME_RELATED_P (setl) = 1;
4058 RTX_FRAME_RELATED_P (setr) = 1;
4059 vec = gen_rtvec (2, setl, setr);
4060 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4061 gen_rtx_SEQUENCE (VOIDmode, vec));
4064 offset += GET_MODE_SIZE (DFmode);
4065 fr_saved++;
4071 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4072 Handle case where DISP > 8k by using the add_high_const patterns. */
4074 static void
4075 load_reg (int reg, HOST_WIDE_INT disp, int base)
4077 rtx dest = gen_rtx_REG (word_mode, reg);
4078 rtx basereg = gen_rtx_REG (Pmode, base);
4079 rtx src;
4081 if (VAL_14_BITS_P (disp))
4082 src = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
4083 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
4085 rtx delta = GEN_INT (disp);
4086 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4088 emit_move_insn (tmpreg, delta);
4089 if (TARGET_DISABLE_INDEXING)
4091 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4092 src = gen_rtx_MEM (word_mode, tmpreg);
4094 else
4095 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4097 else
4099 rtx delta = GEN_INT (disp);
4100 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
4101 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4103 emit_move_insn (tmpreg, high);
4104 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
4107 emit_move_insn (dest, src);
4110 /* Update the total code bytes output to the text section. */
4112 static void
4113 update_total_code_bytes (unsigned int nbytes)
4115 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
4116 && !IN_NAMED_SECTION_P (cfun->decl))
4118 unsigned int old_total = total_code_bytes;
4120 total_code_bytes += nbytes;
4122 /* Be prepared to handle overflows. */
4123 if (old_total > total_code_bytes)
4124 total_code_bytes = UINT_MAX;
4128 /* This function generates the assembly code for function exit.
4129 Args are as for output_function_prologue ().
4131 The function epilogue should not depend on the current stack
4132 pointer! It should use the frame pointer only. This is mandatory
4133 because of alloca; we also take advantage of it to omit stack
4134 adjustments before returning. */
4136 static void
4137 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4139 rtx insn = get_last_insn ();
4141 last_address = 0;
4143 /* pa_expand_epilogue does the dirty work now. We just need
4144 to output the assembler directives which denote the end
4145 of a function.
4147 To make debuggers happy, emit a nop if the epilogue was completely
4148 eliminated due to a volatile call as the last insn in the
4149 current function. That way the return address (in %r2) will
4150 always point to a valid instruction in the current function. */
4152 /* Get the last real insn. */
4153 if (NOTE_P (insn))
4154 insn = prev_real_insn (insn);
4156 /* If it is a sequence, then look inside. */
4157 if (insn && NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
4158 insn = XVECEXP (PATTERN (insn), 0, 0);
4160 /* If insn is a CALL_INSN, then it must be a call to a volatile
4161 function (otherwise there would be epilogue insns). */
4162 if (insn && CALL_P (insn))
4164 fputs ("\tnop\n", file);
4165 last_address += 4;
4168 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4170 if (TARGET_SOM && TARGET_GAS)
4172 /* We are done with this subspace except possibly for some additional
4173 debug information. Forget that we are in this subspace to ensure
4174 that the next function is output in its own subspace. */
4175 in_section = NULL;
4176 cfun->machine->in_nsubspa = 2;
4179 /* Thunks do their own accounting. */
4180 if (cfun->is_thunk)
4181 return;
4183 if (INSN_ADDRESSES_SET_P ())
4185 insn = get_last_nonnote_insn ();
4186 last_address += INSN_ADDRESSES (INSN_UID (insn));
4187 if (INSN_P (insn))
4188 last_address += insn_default_length (insn);
4189 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4190 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4192 else
4193 last_address = UINT_MAX;
4195 /* Finally, update the total number of code bytes output so far. */
4196 update_total_code_bytes (last_address);
4199 void
4200 pa_expand_epilogue (void)
4202 rtx tmpreg;
4203 HOST_WIDE_INT offset;
4204 HOST_WIDE_INT ret_off = 0;
4205 int i;
4206 int merge_sp_adjust_with_load = 0;
4208 /* We will use this often. */
4209 tmpreg = gen_rtx_REG (word_mode, 1);
4211 /* Try to restore RP early to avoid load/use interlocks when
4212 RP gets used in the return (bv) instruction. This appears to still
4213 be necessary even when we schedule the prologue and epilogue. */
4214 if (rp_saved)
4216 ret_off = TARGET_64BIT ? -16 : -20;
4217 if (frame_pointer_needed)
4219 load_reg (2, ret_off, HARD_FRAME_POINTER_REGNUM);
4220 ret_off = 0;
4222 else
4224 /* No frame pointer, and stack is smaller than 8k. */
4225 if (VAL_14_BITS_P (ret_off - actual_fsize))
4227 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4228 ret_off = 0;
4233 /* General register restores. */
4234 if (frame_pointer_needed)
4236 offset = local_fsize;
4238 /* If the current function calls __builtin_eh_return, then we need
4239 to restore the saved EH data registers. */
4240 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4242 unsigned int i, regno;
4244 for (i = 0; ; ++i)
4246 regno = EH_RETURN_DATA_REGNO (i);
4247 if (regno == INVALID_REGNUM)
4248 break;
4250 load_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4251 offset += UNITS_PER_WORD;
4255 for (i = 18; i >= 4; i--)
4256 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4258 load_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4259 offset += UNITS_PER_WORD;
4262 else
4264 offset = local_fsize - actual_fsize;
4266 /* If the current function calls __builtin_eh_return, then we need
4267 to restore the saved EH data registers. */
4268 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4270 unsigned int i, regno;
4272 for (i = 0; ; ++i)
4274 regno = EH_RETURN_DATA_REGNO (i);
4275 if (regno == INVALID_REGNUM)
4276 break;
4278 /* Only for the first load.
4279 merge_sp_adjust_with_load holds the register load
4280 with which we will merge the sp adjustment. */
4281 if (merge_sp_adjust_with_load == 0
4282 && local_fsize == 0
4283 && VAL_14_BITS_P (-actual_fsize))
4284 merge_sp_adjust_with_load = regno;
4285 else
4286 load_reg (regno, offset, STACK_POINTER_REGNUM);
4287 offset += UNITS_PER_WORD;
4291 for (i = 18; i >= 3; i--)
4293 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4295 /* Only for the first load.
4296 merge_sp_adjust_with_load holds the register load
4297 with which we will merge the sp adjustment. */
4298 if (merge_sp_adjust_with_load == 0
4299 && local_fsize == 0
4300 && VAL_14_BITS_P (-actual_fsize))
4301 merge_sp_adjust_with_load = i;
4302 else
4303 load_reg (i, offset, STACK_POINTER_REGNUM);
4304 offset += UNITS_PER_WORD;
4309 /* Align pointer properly (doubleword boundary). */
4310 offset = (offset + 7) & ~7;
4312 /* FP register restores. */
4313 if (save_fregs)
4315 /* Adjust the register to index off of. */
4316 if (frame_pointer_needed)
4317 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4318 else
4319 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4321 /* Actually do the restores now. */
4322 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4323 if (df_regs_ever_live_p (i)
4324 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4326 rtx src = gen_rtx_MEM (DFmode,
4327 gen_rtx_POST_INC (word_mode, tmpreg));
4328 rtx dest = gen_rtx_REG (DFmode, i);
4329 emit_move_insn (dest, src);
4333 /* Emit a blockage insn here to keep these insns from being moved to
4334 an earlier spot in the epilogue, or into the main instruction stream.
4336 This is necessary as we must not cut the stack back before all the
4337 restores are finished. */
4338 emit_insn (gen_blockage ());
4340 /* Reset stack pointer (and possibly frame pointer). The stack
4341 pointer is initially set to fp + 64 to avoid a race condition. */
4342 if (frame_pointer_needed)
4344 rtx delta = GEN_INT (-64);
4346 set_reg_plus_d (STACK_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM, 64, 0);
4347 emit_insn (gen_pre_load (hard_frame_pointer_rtx,
4348 stack_pointer_rtx, delta));
4350 /* If we were deferring a callee register restore, do it now. */
4351 else if (merge_sp_adjust_with_load)
4353 rtx delta = GEN_INT (-actual_fsize);
4354 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4356 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4358 else if (actual_fsize != 0)
4359 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4360 - actual_fsize, 0);
4362 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4363 frame greater than 8k), do so now. */
4364 if (ret_off != 0)
4365 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4367 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4369 rtx sa = EH_RETURN_STACKADJ_RTX;
4371 emit_insn (gen_blockage ());
4372 emit_insn (TARGET_64BIT
4373 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4374 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4378 bool
4379 pa_can_use_return_insn (void)
4381 if (!reload_completed)
4382 return false;
4384 if (frame_pointer_needed)
4385 return false;
4387 if (df_regs_ever_live_p (2))
4388 return false;
4390 if (crtl->profile)
4391 return false;
4393 return pa_compute_frame_size (get_frame_size (), 0) == 0;
4397 hppa_pic_save_rtx (void)
4399 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4402 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4403 #define NO_DEFERRED_PROFILE_COUNTERS 0
4404 #endif
4407 /* Vector of funcdef numbers. */
4408 static vec<int> funcdef_nos;
4410 /* Output deferred profile counters. */
4411 static void
4412 output_deferred_profile_counters (void)
4414 unsigned int i;
4415 int align, n;
4417 if (funcdef_nos.is_empty ())
4418 return;
4420 switch_to_section (data_section);
4421 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4422 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4424 for (i = 0; funcdef_nos.iterate (i, &n); i++)
4426 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4427 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4430 funcdef_nos.release ();
4433 void
4434 hppa_profile_hook (int label_no)
4436 /* We use SImode for the address of the function in both 32 and
4437 64-bit code to avoid having to provide DImode versions of the
4438 lcla2 and load_offset_label_address insn patterns. */
4439 rtx reg = gen_reg_rtx (SImode);
4440 rtx label_rtx = gen_label_rtx ();
4441 rtx begin_label_rtx, call_insn;
4442 char begin_label_name[16];
4444 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4445 label_no);
4446 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4448 if (TARGET_64BIT)
4449 emit_move_insn (arg_pointer_rtx,
4450 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4451 GEN_INT (64)));
4453 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4455 /* The address of the function is loaded into %r25 with an instruction-
4456 relative sequence that avoids the use of relocations. The sequence
4457 is split so that the load_offset_label_address instruction can
4458 occupy the delay slot of the call to _mcount. */
4459 if (TARGET_PA_20)
4460 emit_insn (gen_lcla2 (reg, label_rtx));
4461 else
4462 emit_insn (gen_lcla1 (reg, label_rtx));
4464 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4465 reg, begin_label_rtx, label_rtx));
4467 #if !NO_DEFERRED_PROFILE_COUNTERS
4469 rtx count_label_rtx, addr, r24;
4470 char count_label_name[16];
4472 funcdef_nos.safe_push (label_no);
4473 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4474 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4476 addr = force_reg (Pmode, count_label_rtx);
4477 r24 = gen_rtx_REG (Pmode, 24);
4478 emit_move_insn (r24, addr);
4480 call_insn =
4481 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4482 gen_rtx_SYMBOL_REF (Pmode,
4483 "_mcount")),
4484 GEN_INT (TARGET_64BIT ? 24 : 12)));
4486 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4488 #else
4490 call_insn =
4491 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4492 gen_rtx_SYMBOL_REF (Pmode,
4493 "_mcount")),
4494 GEN_INT (TARGET_64BIT ? 16 : 8)));
4496 #endif
4498 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4499 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4501 /* Indicate the _mcount call cannot throw, nor will it execute a
4502 non-local goto. */
4503 make_reg_eh_region_note_nothrow_nononlocal (call_insn);
4506 /* Fetch the return address for the frame COUNT steps up from
4507 the current frame, after the prologue. FRAMEADDR is the
4508 frame pointer of the COUNT frame.
4510 We want to ignore any export stub remnants here. To handle this,
4511 we examine the code at the return address, and if it is an export
4512 stub, we return a memory rtx for the stub return address stored
4513 at frame-24.
4515 The value returned is used in two different ways:
4517 1. To find a function's caller.
4519 2. To change the return address for a function.
4521 This function handles most instances of case 1; however, it will
4522 fail if there are two levels of stubs to execute on the return
4523 path. The only way I believe that can happen is if the return value
4524 needs a parameter relocation, which never happens for C code.
4526 This function handles most instances of case 2; however, it will
4527 fail if we did not originally have stub code on the return path
4528 but will need stub code on the new return path. This can happen if
4529 the caller & callee are both in the main program, but the new
4530 return location is in a shared library. */
4533 pa_return_addr_rtx (int count, rtx frameaddr)
4535 rtx label;
4536 rtx rp;
4537 rtx saved_rp;
4538 rtx ins;
4540 /* The instruction stream at the return address of a PA1.X export stub is:
4542 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4543 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4544 0x00011820 | stub+16: mtsp r1,sr0
4545 0xe0400002 | stub+20: be,n 0(sr0,rp)
4547 0xe0400002 must be specified as -532676606 so that it won't be
4548 rejected as an invalid immediate operand on 64-bit hosts.
4550 The instruction stream at the return address of a PA2.0 export stub is:
4552 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4553 0xe840d002 | stub+12: bve,n (rp)
4556 HOST_WIDE_INT insns[4];
4557 int i, len;
4559 if (count != 0)
4560 return NULL_RTX;
4562 rp = get_hard_reg_initial_val (Pmode, 2);
4564 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4565 return rp;
4567 /* If there is no export stub then just use the value saved from
4568 the return pointer register. */
4570 saved_rp = gen_reg_rtx (Pmode);
4571 emit_move_insn (saved_rp, rp);
4573 /* Get pointer to the instruction stream. We have to mask out the
4574 privilege level from the two low order bits of the return address
4575 pointer here so that ins will point to the start of the first
4576 instruction that would have been executed if we returned. */
4577 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4578 label = gen_label_rtx ();
4580 if (TARGET_PA_20)
4582 insns[0] = 0x4bc23fd1;
4583 insns[1] = -398405630;
4584 len = 2;
4586 else
4588 insns[0] = 0x4bc23fd1;
4589 insns[1] = 0x004010a1;
4590 insns[2] = 0x00011820;
4591 insns[3] = -532676606;
4592 len = 4;
4595 /* Check the instruction stream at the normal return address for the
4596 export stub. If it is an export stub, than our return address is
4597 really in -24[frameaddr]. */
4599 for (i = 0; i < len; i++)
4601 rtx op0 = gen_rtx_MEM (SImode, plus_constant (Pmode, ins, i * 4));
4602 rtx op1 = GEN_INT (insns[i]);
4603 emit_cmp_and_jump_insns (op0, op1, NE, NULL, SImode, 0, label);
4606 /* Here we know that our return address points to an export
4607 stub. We don't want to return the address of the export stub,
4608 but rather the return address of the export stub. That return
4609 address is stored at -24[frameaddr]. */
4611 emit_move_insn (saved_rp,
4612 gen_rtx_MEM (Pmode,
4613 memory_address (Pmode,
4614 plus_constant (Pmode, frameaddr,
4615 -24))));
4617 emit_label (label);
4619 return saved_rp;
4622 void
4623 pa_emit_bcond_fp (rtx operands[])
4625 enum rtx_code code = GET_CODE (operands[0]);
4626 rtx operand0 = operands[1];
4627 rtx operand1 = operands[2];
4628 rtx label = operands[3];
4630 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0),
4631 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1)));
4633 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
4634 gen_rtx_IF_THEN_ELSE (VOIDmode,
4635 gen_rtx_fmt_ee (NE,
4636 VOIDmode,
4637 gen_rtx_REG (CCFPmode, 0),
4638 const0_rtx),
4639 gen_rtx_LABEL_REF (VOIDmode, label),
4640 pc_rtx)));
4644 /* Adjust the cost of a scheduling dependency. Return the new cost of
4645 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4647 static int
4648 pa_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4650 enum attr_type attr_type;
4652 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4653 true dependencies as they are described with bypasses now. */
4654 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4655 return cost;
4657 if (! recog_memoized (insn))
4658 return 0;
4660 attr_type = get_attr_type (insn);
4662 switch (REG_NOTE_KIND (link))
4664 case REG_DEP_ANTI:
4665 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4666 cycles later. */
4668 if (attr_type == TYPE_FPLOAD)
4670 rtx pat = PATTERN (insn);
4671 rtx dep_pat = PATTERN (dep_insn);
4672 if (GET_CODE (pat) == PARALLEL)
4674 /* This happens for the fldXs,mb patterns. */
4675 pat = XVECEXP (pat, 0, 0);
4677 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4678 /* If this happens, we have to extend this to schedule
4679 optimally. Return 0 for now. */
4680 return 0;
4682 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4684 if (! recog_memoized (dep_insn))
4685 return 0;
4686 switch (get_attr_type (dep_insn))
4688 case TYPE_FPALU:
4689 case TYPE_FPMULSGL:
4690 case TYPE_FPMULDBL:
4691 case TYPE_FPDIVSGL:
4692 case TYPE_FPDIVDBL:
4693 case TYPE_FPSQRTSGL:
4694 case TYPE_FPSQRTDBL:
4695 /* A fpload can't be issued until one cycle before a
4696 preceding arithmetic operation has finished if
4697 the target of the fpload is any of the sources
4698 (or destination) of the arithmetic operation. */
4699 return insn_default_latency (dep_insn) - 1;
4701 default:
4702 return 0;
4706 else if (attr_type == TYPE_FPALU)
4708 rtx pat = PATTERN (insn);
4709 rtx dep_pat = PATTERN (dep_insn);
4710 if (GET_CODE (pat) == PARALLEL)
4712 /* This happens for the fldXs,mb patterns. */
4713 pat = XVECEXP (pat, 0, 0);
4715 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4716 /* If this happens, we have to extend this to schedule
4717 optimally. Return 0 for now. */
4718 return 0;
4720 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4722 if (! recog_memoized (dep_insn))
4723 return 0;
4724 switch (get_attr_type (dep_insn))
4726 case TYPE_FPDIVSGL:
4727 case TYPE_FPDIVDBL:
4728 case TYPE_FPSQRTSGL:
4729 case TYPE_FPSQRTDBL:
4730 /* An ALU flop can't be issued until two cycles before a
4731 preceding divide or sqrt operation has finished if
4732 the target of the ALU flop is any of the sources
4733 (or destination) of the divide or sqrt operation. */
4734 return insn_default_latency (dep_insn) - 2;
4736 default:
4737 return 0;
4742 /* For other anti dependencies, the cost is 0. */
4743 return 0;
4745 case REG_DEP_OUTPUT:
4746 /* Output dependency; DEP_INSN writes a register that INSN writes some
4747 cycles later. */
4748 if (attr_type == TYPE_FPLOAD)
4750 rtx pat = PATTERN (insn);
4751 rtx dep_pat = PATTERN (dep_insn);
4752 if (GET_CODE (pat) == PARALLEL)
4754 /* This happens for the fldXs,mb patterns. */
4755 pat = XVECEXP (pat, 0, 0);
4757 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4758 /* If this happens, we have to extend this to schedule
4759 optimally. Return 0 for now. */
4760 return 0;
4762 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4764 if (! recog_memoized (dep_insn))
4765 return 0;
4766 switch (get_attr_type (dep_insn))
4768 case TYPE_FPALU:
4769 case TYPE_FPMULSGL:
4770 case TYPE_FPMULDBL:
4771 case TYPE_FPDIVSGL:
4772 case TYPE_FPDIVDBL:
4773 case TYPE_FPSQRTSGL:
4774 case TYPE_FPSQRTDBL:
4775 /* A fpload can't be issued until one cycle before a
4776 preceding arithmetic operation has finished if
4777 the target of the fpload is the destination of the
4778 arithmetic operation.
4780 Exception: For PA7100LC, PA7200 and PA7300, the cost
4781 is 3 cycles, unless they bundle together. We also
4782 pay the penalty if the second insn is a fpload. */
4783 return insn_default_latency (dep_insn) - 1;
4785 default:
4786 return 0;
4790 else if (attr_type == TYPE_FPALU)
4792 rtx pat = PATTERN (insn);
4793 rtx dep_pat = PATTERN (dep_insn);
4794 if (GET_CODE (pat) == PARALLEL)
4796 /* This happens for the fldXs,mb patterns. */
4797 pat = XVECEXP (pat, 0, 0);
4799 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4800 /* If this happens, we have to extend this to schedule
4801 optimally. Return 0 for now. */
4802 return 0;
4804 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4806 if (! recog_memoized (dep_insn))
4807 return 0;
4808 switch (get_attr_type (dep_insn))
4810 case TYPE_FPDIVSGL:
4811 case TYPE_FPDIVDBL:
4812 case TYPE_FPSQRTSGL:
4813 case TYPE_FPSQRTDBL:
4814 /* An ALU flop can't be issued until two cycles before a
4815 preceding divide or sqrt operation has finished if
4816 the target of the ALU flop is also the target of
4817 the divide or sqrt operation. */
4818 return insn_default_latency (dep_insn) - 2;
4820 default:
4821 return 0;
4826 /* For other output dependencies, the cost is 0. */
4827 return 0;
4829 default:
4830 gcc_unreachable ();
4834 /* Adjust scheduling priorities. We use this to try and keep addil
4835 and the next use of %r1 close together. */
4836 static int
4837 pa_adjust_priority (rtx insn, int priority)
4839 rtx set = single_set (insn);
4840 rtx src, dest;
4841 if (set)
4843 src = SET_SRC (set);
4844 dest = SET_DEST (set);
4845 if (GET_CODE (src) == LO_SUM
4846 && symbolic_operand (XEXP (src, 1), VOIDmode)
4847 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4848 priority >>= 3;
4850 else if (GET_CODE (src) == MEM
4851 && GET_CODE (XEXP (src, 0)) == LO_SUM
4852 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4853 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4854 priority >>= 1;
4856 else if (GET_CODE (dest) == MEM
4857 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4858 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4859 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4860 priority >>= 3;
4862 return priority;
4865 /* The 700 can only issue a single insn at a time.
4866 The 7XXX processors can issue two insns at a time.
4867 The 8000 can issue 4 insns at a time. */
4868 static int
4869 pa_issue_rate (void)
4871 switch (pa_cpu)
4873 case PROCESSOR_700: return 1;
4874 case PROCESSOR_7100: return 2;
4875 case PROCESSOR_7100LC: return 2;
4876 case PROCESSOR_7200: return 2;
4877 case PROCESSOR_7300: return 2;
4878 case PROCESSOR_8000: return 4;
4880 default:
4881 gcc_unreachable ();
4887 /* Return any length plus adjustment needed by INSN which already has
4888 its length computed as LENGTH. Return LENGTH if no adjustment is
4889 necessary.
4891 Also compute the length of an inline block move here as it is too
4892 complicated to express as a length attribute in pa.md. */
4894 pa_adjust_insn_length (rtx insn, int length)
4896 rtx pat = PATTERN (insn);
4898 /* If length is negative or undefined, provide initial length. */
4899 if ((unsigned int) length >= INT_MAX)
4901 if (GET_CODE (pat) == SEQUENCE)
4902 insn = XVECEXP (pat, 0, 0);
4904 switch (get_attr_type (insn))
4906 case TYPE_MILLI:
4907 length = pa_attr_length_millicode_call (insn);
4908 break;
4909 case TYPE_CALL:
4910 length = pa_attr_length_call (insn, 0);
4911 break;
4912 case TYPE_SIBCALL:
4913 length = pa_attr_length_call (insn, 1);
4914 break;
4915 case TYPE_DYNCALL:
4916 length = pa_attr_length_indirect_call (insn);
4917 break;
4918 case TYPE_SH_FUNC_ADRS:
4919 length = pa_attr_length_millicode_call (insn) + 20;
4920 break;
4921 default:
4922 gcc_unreachable ();
4926 /* Block move pattern. */
4927 if (NONJUMP_INSN_P (insn)
4928 && GET_CODE (pat) == PARALLEL
4929 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4930 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4931 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4932 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4933 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4934 length += compute_movmem_length (insn) - 4;
4935 /* Block clear pattern. */
4936 else if (NONJUMP_INSN_P (insn)
4937 && GET_CODE (pat) == PARALLEL
4938 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4939 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4940 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4941 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4942 length += compute_clrmem_length (insn) - 4;
4943 /* Conditional branch with an unfilled delay slot. */
4944 else if (JUMP_P (insn) && ! simplejump_p (insn))
4946 /* Adjust a short backwards conditional with an unfilled delay slot. */
4947 if (GET_CODE (pat) == SET
4948 && length == 4
4949 && JUMP_LABEL (insn) != NULL_RTX
4950 && ! forward_branch_p (insn))
4951 length += 4;
4952 else if (GET_CODE (pat) == PARALLEL
4953 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4954 && length == 4)
4955 length += 4;
4956 /* Adjust dbra insn with short backwards conditional branch with
4957 unfilled delay slot -- only for case where counter is in a
4958 general register register. */
4959 else if (GET_CODE (pat) == PARALLEL
4960 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
4961 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
4962 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
4963 && length == 4
4964 && ! forward_branch_p (insn))
4965 length += 4;
4967 return length;
4970 /* Implement the TARGET_PRINT_OPERAND_PUNCT_VALID_P hook. */
4972 static bool
4973 pa_print_operand_punct_valid_p (unsigned char code)
4975 if (code == '@'
4976 || code == '#'
4977 || code == '*'
4978 || code == '^')
4979 return true;
4981 return false;
4984 /* Print operand X (an rtx) in assembler syntax to file FILE.
4985 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
4986 For `%' followed by punctuation, CODE is the punctuation and X is null. */
4988 void
4989 pa_print_operand (FILE *file, rtx x, int code)
4991 switch (code)
4993 case '#':
4994 /* Output a 'nop' if there's nothing for the delay slot. */
4995 if (dbr_sequence_length () == 0)
4996 fputs ("\n\tnop", file);
4997 return;
4998 case '*':
4999 /* Output a nullification completer if there's nothing for the */
5000 /* delay slot or nullification is requested. */
5001 if (dbr_sequence_length () == 0 ||
5002 (final_sequence &&
5003 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
5004 fputs (",n", file);
5005 return;
5006 case 'R':
5007 /* Print out the second register name of a register pair.
5008 I.e., R (6) => 7. */
5009 fputs (reg_names[REGNO (x) + 1], file);
5010 return;
5011 case 'r':
5012 /* A register or zero. */
5013 if (x == const0_rtx
5014 || (x == CONST0_RTX (DFmode))
5015 || (x == CONST0_RTX (SFmode)))
5017 fputs ("%r0", file);
5018 return;
5020 else
5021 break;
5022 case 'f':
5023 /* A register or zero (floating point). */
5024 if (x == const0_rtx
5025 || (x == CONST0_RTX (DFmode))
5026 || (x == CONST0_RTX (SFmode)))
5028 fputs ("%fr0", file);
5029 return;
5031 else
5032 break;
5033 case 'A':
5035 rtx xoperands[2];
5037 xoperands[0] = XEXP (XEXP (x, 0), 0);
5038 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
5039 pa_output_global_address (file, xoperands[1], 0);
5040 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
5041 return;
5044 case 'C': /* Plain (C)ondition */
5045 case 'X':
5046 switch (GET_CODE (x))
5048 case EQ:
5049 fputs ("=", file); break;
5050 case NE:
5051 fputs ("<>", file); break;
5052 case GT:
5053 fputs (">", file); break;
5054 case GE:
5055 fputs (">=", file); break;
5056 case GEU:
5057 fputs (">>=", file); break;
5058 case GTU:
5059 fputs (">>", file); break;
5060 case LT:
5061 fputs ("<", file); break;
5062 case LE:
5063 fputs ("<=", file); break;
5064 case LEU:
5065 fputs ("<<=", file); break;
5066 case LTU:
5067 fputs ("<<", file); break;
5068 default:
5069 gcc_unreachable ();
5071 return;
5072 case 'N': /* Condition, (N)egated */
5073 switch (GET_CODE (x))
5075 case EQ:
5076 fputs ("<>", file); break;
5077 case NE:
5078 fputs ("=", file); break;
5079 case GT:
5080 fputs ("<=", file); break;
5081 case GE:
5082 fputs ("<", file); break;
5083 case GEU:
5084 fputs ("<<", file); break;
5085 case GTU:
5086 fputs ("<<=", file); break;
5087 case LT:
5088 fputs (">=", file); break;
5089 case LE:
5090 fputs (">", file); break;
5091 case LEU:
5092 fputs (">>", file); break;
5093 case LTU:
5094 fputs (">>=", file); break;
5095 default:
5096 gcc_unreachable ();
5098 return;
5099 /* For floating point comparisons. Note that the output
5100 predicates are the complement of the desired mode. The
5101 conditions for GT, GE, LT, LE and LTGT cause an invalid
5102 operation exception if the result is unordered and this
5103 exception is enabled in the floating-point status register. */
5104 case 'Y':
5105 switch (GET_CODE (x))
5107 case EQ:
5108 fputs ("!=", file); break;
5109 case NE:
5110 fputs ("=", file); break;
5111 case GT:
5112 fputs ("!>", file); break;
5113 case GE:
5114 fputs ("!>=", file); break;
5115 case LT:
5116 fputs ("!<", file); break;
5117 case LE:
5118 fputs ("!<=", file); break;
5119 case LTGT:
5120 fputs ("!<>", file); break;
5121 case UNLE:
5122 fputs ("!?<=", file); break;
5123 case UNLT:
5124 fputs ("!?<", file); break;
5125 case UNGE:
5126 fputs ("!?>=", file); break;
5127 case UNGT:
5128 fputs ("!?>", file); break;
5129 case UNEQ:
5130 fputs ("!?=", file); break;
5131 case UNORDERED:
5132 fputs ("!?", file); break;
5133 case ORDERED:
5134 fputs ("?", file); break;
5135 default:
5136 gcc_unreachable ();
5138 return;
5139 case 'S': /* Condition, operands are (S)wapped. */
5140 switch (GET_CODE (x))
5142 case EQ:
5143 fputs ("=", file); break;
5144 case NE:
5145 fputs ("<>", file); break;
5146 case GT:
5147 fputs ("<", file); break;
5148 case GE:
5149 fputs ("<=", file); break;
5150 case GEU:
5151 fputs ("<<=", file); break;
5152 case GTU:
5153 fputs ("<<", file); break;
5154 case LT:
5155 fputs (">", file); break;
5156 case LE:
5157 fputs (">=", file); break;
5158 case LEU:
5159 fputs (">>=", file); break;
5160 case LTU:
5161 fputs (">>", file); break;
5162 default:
5163 gcc_unreachable ();
5165 return;
5166 case 'B': /* Condition, (B)oth swapped and negate. */
5167 switch (GET_CODE (x))
5169 case EQ:
5170 fputs ("<>", file); break;
5171 case NE:
5172 fputs ("=", file); break;
5173 case GT:
5174 fputs (">=", file); break;
5175 case GE:
5176 fputs (">", file); break;
5177 case GEU:
5178 fputs (">>", file); break;
5179 case GTU:
5180 fputs (">>=", file); break;
5181 case LT:
5182 fputs ("<=", file); break;
5183 case LE:
5184 fputs ("<", file); break;
5185 case LEU:
5186 fputs ("<<", file); break;
5187 case LTU:
5188 fputs ("<<=", file); break;
5189 default:
5190 gcc_unreachable ();
5192 return;
5193 case 'k':
5194 gcc_assert (GET_CODE (x) == CONST_INT);
5195 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
5196 return;
5197 case 'Q':
5198 gcc_assert (GET_CODE (x) == CONST_INT);
5199 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5200 return;
5201 case 'L':
5202 gcc_assert (GET_CODE (x) == CONST_INT);
5203 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5204 return;
5205 case 'O':
5206 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
5207 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5208 return;
5209 case 'p':
5210 gcc_assert (GET_CODE (x) == CONST_INT);
5211 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5212 return;
5213 case 'P':
5214 gcc_assert (GET_CODE (x) == CONST_INT);
5215 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5216 return;
5217 case 'I':
5218 if (GET_CODE (x) == CONST_INT)
5219 fputs ("i", file);
5220 return;
5221 case 'M':
5222 case 'F':
5223 switch (GET_CODE (XEXP (x, 0)))
5225 case PRE_DEC:
5226 case PRE_INC:
5227 if (ASSEMBLER_DIALECT == 0)
5228 fputs ("s,mb", file);
5229 else
5230 fputs (",mb", file);
5231 break;
5232 case POST_DEC:
5233 case POST_INC:
5234 if (ASSEMBLER_DIALECT == 0)
5235 fputs ("s,ma", file);
5236 else
5237 fputs (",ma", file);
5238 break;
5239 case PLUS:
5240 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5241 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5243 if (ASSEMBLER_DIALECT == 0)
5244 fputs ("x", file);
5246 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5247 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5249 if (ASSEMBLER_DIALECT == 0)
5250 fputs ("x,s", file);
5251 else
5252 fputs (",s", file);
5254 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5255 fputs ("s", file);
5256 break;
5257 default:
5258 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5259 fputs ("s", file);
5260 break;
5262 return;
5263 case 'G':
5264 pa_output_global_address (file, x, 0);
5265 return;
5266 case 'H':
5267 pa_output_global_address (file, x, 1);
5268 return;
5269 case 0: /* Don't do anything special */
5270 break;
5271 case 'Z':
5273 unsigned op[3];
5274 compute_zdepwi_operands (INTVAL (x), op);
5275 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5276 return;
5278 case 'z':
5280 unsigned op[3];
5281 compute_zdepdi_operands (INTVAL (x), op);
5282 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5283 return;
5285 case 'c':
5286 /* We can get here from a .vtable_inherit due to our
5287 CONSTANT_ADDRESS_P rejecting perfectly good constant
5288 addresses. */
5289 break;
5290 default:
5291 gcc_unreachable ();
5293 if (GET_CODE (x) == REG)
5295 fputs (reg_names [REGNO (x)], file);
5296 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5298 fputs ("R", file);
5299 return;
5301 if (FP_REG_P (x)
5302 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5303 && (REGNO (x) & 1) == 0)
5304 fputs ("L", file);
5306 else if (GET_CODE (x) == MEM)
5308 int size = GET_MODE_SIZE (GET_MODE (x));
5309 rtx base = NULL_RTX;
5310 switch (GET_CODE (XEXP (x, 0)))
5312 case PRE_DEC:
5313 case POST_DEC:
5314 base = XEXP (XEXP (x, 0), 0);
5315 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5316 break;
5317 case PRE_INC:
5318 case POST_INC:
5319 base = XEXP (XEXP (x, 0), 0);
5320 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5321 break;
5322 case PLUS:
5323 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5324 fprintf (file, "%s(%s)",
5325 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5326 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5327 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5328 fprintf (file, "%s(%s)",
5329 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5330 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5331 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5332 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5334 /* Because the REG_POINTER flag can get lost during reload,
5335 pa_legitimate_address_p canonicalizes the order of the
5336 index and base registers in the combined move patterns. */
5337 rtx base = XEXP (XEXP (x, 0), 1);
5338 rtx index = XEXP (XEXP (x, 0), 0);
5340 fprintf (file, "%s(%s)",
5341 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5343 else
5344 output_address (XEXP (x, 0));
5345 break;
5346 default:
5347 output_address (XEXP (x, 0));
5348 break;
5351 else
5352 output_addr_const (file, x);
5355 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5357 void
5358 pa_output_global_address (FILE *file, rtx x, int round_constant)
5361 /* Imagine (high (const (plus ...))). */
5362 if (GET_CODE (x) == HIGH)
5363 x = XEXP (x, 0);
5365 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5366 output_addr_const (file, x);
5367 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5369 output_addr_const (file, x);
5370 fputs ("-$global$", file);
5372 else if (GET_CODE (x) == CONST)
5374 const char *sep = "";
5375 int offset = 0; /* assembler wants -$global$ at end */
5376 rtx base = NULL_RTX;
5378 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5380 case SYMBOL_REF:
5381 base = XEXP (XEXP (x, 0), 0);
5382 output_addr_const (file, base);
5383 break;
5384 case CONST_INT:
5385 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5386 break;
5387 default:
5388 gcc_unreachable ();
5391 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5393 case SYMBOL_REF:
5394 base = XEXP (XEXP (x, 0), 1);
5395 output_addr_const (file, base);
5396 break;
5397 case CONST_INT:
5398 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5399 break;
5400 default:
5401 gcc_unreachable ();
5404 /* How bogus. The compiler is apparently responsible for
5405 rounding the constant if it uses an LR field selector.
5407 The linker and/or assembler seem a better place since
5408 they have to do this kind of thing already.
5410 If we fail to do this, HP's optimizing linker may eliminate
5411 an addil, but not update the ldw/stw/ldo instruction that
5412 uses the result of the addil. */
5413 if (round_constant)
5414 offset = ((offset + 0x1000) & ~0x1fff);
5416 switch (GET_CODE (XEXP (x, 0)))
5418 case PLUS:
5419 if (offset < 0)
5421 offset = -offset;
5422 sep = "-";
5424 else
5425 sep = "+";
5426 break;
5428 case MINUS:
5429 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5430 sep = "-";
5431 break;
5433 default:
5434 gcc_unreachable ();
5437 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5438 fputs ("-$global$", file);
5439 if (offset)
5440 fprintf (file, "%s%d", sep, offset);
5442 else
5443 output_addr_const (file, x);
5446 /* Output boilerplate text to appear at the beginning of the file.
5447 There are several possible versions. */
5448 #define aputs(x) fputs(x, asm_out_file)
5449 static inline void
5450 pa_file_start_level (void)
5452 if (TARGET_64BIT)
5453 aputs ("\t.LEVEL 2.0w\n");
5454 else if (TARGET_PA_20)
5455 aputs ("\t.LEVEL 2.0\n");
5456 else if (TARGET_PA_11)
5457 aputs ("\t.LEVEL 1.1\n");
5458 else
5459 aputs ("\t.LEVEL 1.0\n");
5462 static inline void
5463 pa_file_start_space (int sortspace)
5465 aputs ("\t.SPACE $PRIVATE$");
5466 if (sortspace)
5467 aputs (",SORT=16");
5468 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31");
5469 if (flag_tm)
5470 aputs ("\n\t.SUBSPA $TM_CLONE_TABLE$,QUAD=1,ALIGN=8,ACCESS=31");
5471 aputs ("\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5472 "\n\t.SPACE $TEXT$");
5473 if (sortspace)
5474 aputs (",SORT=8");
5475 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5476 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5479 static inline void
5480 pa_file_start_file (int want_version)
5482 if (write_symbols != NO_DEBUG)
5484 output_file_directive (asm_out_file, main_input_filename);
5485 if (want_version)
5486 aputs ("\t.version\t\"01.01\"\n");
5490 static inline void
5491 pa_file_start_mcount (const char *aswhat)
5493 if (profile_flag)
5494 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5497 static void
5498 pa_elf_file_start (void)
5500 pa_file_start_level ();
5501 pa_file_start_mcount ("ENTRY");
5502 pa_file_start_file (0);
5505 static void
5506 pa_som_file_start (void)
5508 pa_file_start_level ();
5509 pa_file_start_space (0);
5510 aputs ("\t.IMPORT $global$,DATA\n"
5511 "\t.IMPORT $$dyncall,MILLICODE\n");
5512 pa_file_start_mcount ("CODE");
5513 pa_file_start_file (0);
5516 static void
5517 pa_linux_file_start (void)
5519 pa_file_start_file (1);
5520 pa_file_start_level ();
5521 pa_file_start_mcount ("CODE");
5524 static void
5525 pa_hpux64_gas_file_start (void)
5527 pa_file_start_level ();
5528 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5529 if (profile_flag)
5530 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5531 #endif
5532 pa_file_start_file (1);
5535 static void
5536 pa_hpux64_hpas_file_start (void)
5538 pa_file_start_level ();
5539 pa_file_start_space (1);
5540 pa_file_start_mcount ("CODE");
5541 pa_file_start_file (0);
5543 #undef aputs
5545 /* Search the deferred plabel list for SYMBOL and return its internal
5546 label. If an entry for SYMBOL is not found, a new entry is created. */
5549 pa_get_deferred_plabel (rtx symbol)
5551 const char *fname = XSTR (symbol, 0);
5552 size_t i;
5554 /* See if we have already put this function on the list of deferred
5555 plabels. This list is generally small, so a liner search is not
5556 too ugly. If it proves too slow replace it with something faster. */
5557 for (i = 0; i < n_deferred_plabels; i++)
5558 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5559 break;
5561 /* If the deferred plabel list is empty, or this entry was not found
5562 on the list, create a new entry on the list. */
5563 if (deferred_plabels == NULL || i == n_deferred_plabels)
5565 tree id;
5567 if (deferred_plabels == 0)
5568 deferred_plabels = ggc_alloc<deferred_plabel> ();
5569 else
5570 deferred_plabels = GGC_RESIZEVEC (struct deferred_plabel,
5571 deferred_plabels,
5572 n_deferred_plabels + 1);
5574 i = n_deferred_plabels++;
5575 deferred_plabels[i].internal_label = gen_label_rtx ();
5576 deferred_plabels[i].symbol = symbol;
5578 /* Gross. We have just implicitly taken the address of this
5579 function. Mark it in the same manner as assemble_name. */
5580 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5581 if (id)
5582 mark_referenced (id);
5585 return deferred_plabels[i].internal_label;
5588 static void
5589 output_deferred_plabels (void)
5591 size_t i;
5593 /* If we have some deferred plabels, then we need to switch into the
5594 data or readonly data section, and align it to a 4 byte boundary
5595 before outputting the deferred plabels. */
5596 if (n_deferred_plabels)
5598 switch_to_section (flag_pic ? data_section : readonly_data_section);
5599 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5602 /* Now output the deferred plabels. */
5603 for (i = 0; i < n_deferred_plabels; i++)
5605 targetm.asm_out.internal_label (asm_out_file, "L",
5606 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5607 assemble_integer (deferred_plabels[i].symbol,
5608 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5612 /* Initialize optabs to point to emulation routines. */
5614 static void
5615 pa_init_libfuncs (void)
5617 if (HPUX_LONG_DOUBLE_LIBRARY)
5619 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5620 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5621 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5622 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5623 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5624 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5625 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5626 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5627 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5629 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5630 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5631 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5632 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5633 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5634 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5635 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5637 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5638 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5639 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5640 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5642 set_conv_libfunc (sfix_optab, SImode, TFmode,
5643 TARGET_64BIT ? "__U_Qfcnvfxt_quad_to_sgl"
5644 : "_U_Qfcnvfxt_quad_to_sgl");
5645 set_conv_libfunc (sfix_optab, DImode, TFmode,
5646 "_U_Qfcnvfxt_quad_to_dbl");
5647 set_conv_libfunc (ufix_optab, SImode, TFmode,
5648 "_U_Qfcnvfxt_quad_to_usgl");
5649 set_conv_libfunc (ufix_optab, DImode, TFmode,
5650 "_U_Qfcnvfxt_quad_to_udbl");
5652 set_conv_libfunc (sfloat_optab, TFmode, SImode,
5653 "_U_Qfcnvxf_sgl_to_quad");
5654 set_conv_libfunc (sfloat_optab, TFmode, DImode,
5655 "_U_Qfcnvxf_dbl_to_quad");
5656 set_conv_libfunc (ufloat_optab, TFmode, SImode,
5657 "_U_Qfcnvxf_usgl_to_quad");
5658 set_conv_libfunc (ufloat_optab, TFmode, DImode,
5659 "_U_Qfcnvxf_udbl_to_quad");
5662 if (TARGET_SYNC_LIBCALL)
5663 init_sync_libfuncs (UNITS_PER_WORD);
5666 /* HP's millicode routines mean something special to the assembler.
5667 Keep track of which ones we have used. */
5669 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5670 static void import_milli (enum millicodes);
5671 static char imported[(int) end1000];
5672 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5673 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5674 #define MILLI_START 10
5676 static void
5677 import_milli (enum millicodes code)
5679 char str[sizeof (import_string)];
5681 if (!imported[(int) code])
5683 imported[(int) code] = 1;
5684 strcpy (str, import_string);
5685 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5686 output_asm_insn (str, 0);
5690 /* The register constraints have put the operands and return value in
5691 the proper registers. */
5693 const char *
5694 pa_output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx insn)
5696 import_milli (mulI);
5697 return pa_output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5700 /* Emit the rtl for doing a division by a constant. */
5702 /* Do magic division millicodes exist for this value? */
5703 const int pa_magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5705 /* We'll use an array to keep track of the magic millicodes and
5706 whether or not we've used them already. [n][0] is signed, [n][1] is
5707 unsigned. */
5709 static int div_milli[16][2];
5712 pa_emit_hpdiv_const (rtx *operands, int unsignedp)
5714 if (GET_CODE (operands[2]) == CONST_INT
5715 && INTVAL (operands[2]) > 0
5716 && INTVAL (operands[2]) < 16
5717 && pa_magic_milli[INTVAL (operands[2])])
5719 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5721 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5722 emit
5723 (gen_rtx_PARALLEL
5724 (VOIDmode,
5725 gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29),
5726 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5727 SImode,
5728 gen_rtx_REG (SImode, 26),
5729 operands[2])),
5730 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5731 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5732 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5733 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5734 gen_rtx_CLOBBER (VOIDmode, ret))));
5735 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5736 return 1;
5738 return 0;
5741 const char *
5742 pa_output_div_insn (rtx *operands, int unsignedp, rtx insn)
5744 int divisor;
5746 /* If the divisor is a constant, try to use one of the special
5747 opcodes .*/
5748 if (GET_CODE (operands[0]) == CONST_INT)
5750 static char buf[100];
5751 divisor = INTVAL (operands[0]);
5752 if (!div_milli[divisor][unsignedp])
5754 div_milli[divisor][unsignedp] = 1;
5755 if (unsignedp)
5756 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5757 else
5758 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5760 if (unsignedp)
5762 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5763 INTVAL (operands[0]));
5764 return pa_output_millicode_call (insn,
5765 gen_rtx_SYMBOL_REF (SImode, buf));
5767 else
5769 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5770 INTVAL (operands[0]));
5771 return pa_output_millicode_call (insn,
5772 gen_rtx_SYMBOL_REF (SImode, buf));
5775 /* Divisor isn't a special constant. */
5776 else
5778 if (unsignedp)
5780 import_milli (divU);
5781 return pa_output_millicode_call (insn,
5782 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5784 else
5786 import_milli (divI);
5787 return pa_output_millicode_call (insn,
5788 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5793 /* Output a $$rem millicode to do mod. */
5795 const char *
5796 pa_output_mod_insn (int unsignedp, rtx insn)
5798 if (unsignedp)
5800 import_milli (remU);
5801 return pa_output_millicode_call (insn,
5802 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5804 else
5806 import_milli (remI);
5807 return pa_output_millicode_call (insn,
5808 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5812 void
5813 pa_output_arg_descriptor (rtx call_insn)
5815 const char *arg_regs[4];
5816 enum machine_mode arg_mode;
5817 rtx link;
5818 int i, output_flag = 0;
5819 int regno;
5821 /* We neither need nor want argument location descriptors for the
5822 64bit runtime environment or the ELF32 environment. */
5823 if (TARGET_64BIT || TARGET_ELF32)
5824 return;
5826 for (i = 0; i < 4; i++)
5827 arg_regs[i] = 0;
5829 /* Specify explicitly that no argument relocations should take place
5830 if using the portable runtime calling conventions. */
5831 if (TARGET_PORTABLE_RUNTIME)
5833 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5834 asm_out_file);
5835 return;
5838 gcc_assert (CALL_P (call_insn));
5839 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5840 link; link = XEXP (link, 1))
5842 rtx use = XEXP (link, 0);
5844 if (! (GET_CODE (use) == USE
5845 && GET_CODE (XEXP (use, 0)) == REG
5846 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5847 continue;
5849 arg_mode = GET_MODE (XEXP (use, 0));
5850 regno = REGNO (XEXP (use, 0));
5851 if (regno >= 23 && regno <= 26)
5853 arg_regs[26 - regno] = "GR";
5854 if (arg_mode == DImode)
5855 arg_regs[25 - regno] = "GR";
5857 else if (regno >= 32 && regno <= 39)
5859 if (arg_mode == SFmode)
5860 arg_regs[(regno - 32) / 2] = "FR";
5861 else
5863 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5864 arg_regs[(regno - 34) / 2] = "FR";
5865 arg_regs[(regno - 34) / 2 + 1] = "FU";
5866 #else
5867 arg_regs[(regno - 34) / 2] = "FU";
5868 arg_regs[(regno - 34) / 2 + 1] = "FR";
5869 #endif
5873 fputs ("\t.CALL ", asm_out_file);
5874 for (i = 0; i < 4; i++)
5876 if (arg_regs[i])
5878 if (output_flag++)
5879 fputc (',', asm_out_file);
5880 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5883 fputc ('\n', asm_out_file);
5886 /* Inform reload about cases where moving X with a mode MODE to or from
5887 a register in RCLASS requires an extra scratch or immediate register.
5888 Return the class needed for the immediate register. */
5890 static reg_class_t
5891 pa_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
5892 enum machine_mode mode, secondary_reload_info *sri)
5894 int regno;
5895 enum reg_class rclass = (enum reg_class) rclass_i;
5897 /* Handle the easy stuff first. */
5898 if (rclass == R1_REGS)
5899 return NO_REGS;
5901 if (REG_P (x))
5903 regno = REGNO (x);
5904 if (rclass == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
5905 return NO_REGS;
5907 else
5908 regno = -1;
5910 /* If we have something like (mem (mem (...)), we can safely assume the
5911 inner MEM will end up in a general register after reloading, so there's
5912 no need for a secondary reload. */
5913 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
5914 return NO_REGS;
5916 /* Trying to load a constant into a FP register during PIC code
5917 generation requires %r1 as a scratch register. For float modes,
5918 the only legitimate constant is CONST0_RTX. However, there are
5919 a few patterns that accept constant double operands. */
5920 if (flag_pic
5921 && FP_REG_CLASS_P (rclass)
5922 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
5924 switch (mode)
5926 case SImode:
5927 sri->icode = CODE_FOR_reload_insi_r1;
5928 break;
5930 case DImode:
5931 sri->icode = CODE_FOR_reload_indi_r1;
5932 break;
5934 case SFmode:
5935 sri->icode = CODE_FOR_reload_insf_r1;
5936 break;
5938 case DFmode:
5939 sri->icode = CODE_FOR_reload_indf_r1;
5940 break;
5942 default:
5943 gcc_unreachable ();
5945 return NO_REGS;
5948 /* Secondary reloads of symbolic expressions require %r1 as a scratch
5949 register when we're generating PIC code or when the operand isn't
5950 readonly. */
5951 if (pa_symbolic_expression_p (x))
5953 if (GET_CODE (x) == HIGH)
5954 x = XEXP (x, 0);
5956 if (flag_pic || !read_only_operand (x, VOIDmode))
5958 switch (mode)
5960 case SImode:
5961 sri->icode = CODE_FOR_reload_insi_r1;
5962 break;
5964 case DImode:
5965 sri->icode = CODE_FOR_reload_indi_r1;
5966 break;
5968 default:
5969 gcc_unreachable ();
5971 return NO_REGS;
5975 /* Profiling showed the PA port spends about 1.3% of its compilation
5976 time in true_regnum from calls inside pa_secondary_reload_class. */
5977 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
5978 regno = true_regnum (x);
5980 /* Handle reloads for floating point loads and stores. */
5981 if ((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
5982 && FP_REG_CLASS_P (rclass))
5984 if (MEM_P (x))
5986 x = XEXP (x, 0);
5988 /* We don't need an intermediate for indexed and LO_SUM DLT
5989 memory addresses. When INT14_OK_STRICT is true, it might
5990 appear that we could directly allow register indirect
5991 memory addresses. However, this doesn't work because we
5992 don't support SUBREGs in floating-point register copies
5993 and reload doesn't tell us when it's going to use a SUBREG. */
5994 if (IS_INDEX_ADDR_P (x)
5995 || IS_LO_SUM_DLT_ADDR_P (x))
5996 return NO_REGS;
5998 /* Request intermediate general register. */
5999 return GENERAL_REGS;
6002 /* Request a secondary reload with a general scratch register
6003 for everything else. ??? Could symbolic operands be handled
6004 directly when generating non-pic PA 2.0 code? */
6005 sri->icode = (in_p
6006 ? direct_optab_handler (reload_in_optab, mode)
6007 : direct_optab_handler (reload_out_optab, mode));
6008 return NO_REGS;
6011 /* A SAR<->FP register copy requires an intermediate general register
6012 and secondary memory. We need a secondary reload with a general
6013 scratch register for spills. */
6014 if (rclass == SHIFT_REGS)
6016 /* Handle spill. */
6017 if (regno >= FIRST_PSEUDO_REGISTER || regno < 0)
6019 sri->icode = (in_p
6020 ? direct_optab_handler (reload_in_optab, mode)
6021 : direct_optab_handler (reload_out_optab, mode));
6022 return NO_REGS;
6025 /* Handle FP copy. */
6026 if (FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))
6027 return GENERAL_REGS;
6030 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
6031 && REGNO_REG_CLASS (regno) == SHIFT_REGS
6032 && FP_REG_CLASS_P (rclass))
6033 return GENERAL_REGS;
6035 return NO_REGS;
6038 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
6039 is only marked as live on entry by df-scan when it is a fixed
6040 register. It isn't a fixed register in the 64-bit runtime,
6041 so we need to mark it here. */
6043 static void
6044 pa_extra_live_on_entry (bitmap regs)
6046 if (TARGET_64BIT)
6047 bitmap_set_bit (regs, ARG_POINTER_REGNUM);
6050 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
6051 to prevent it from being deleted. */
6054 pa_eh_return_handler_rtx (void)
6056 rtx tmp;
6058 tmp = gen_rtx_PLUS (word_mode, hard_frame_pointer_rtx,
6059 TARGET_64BIT ? GEN_INT (-16) : GEN_INT (-20));
6060 tmp = gen_rtx_MEM (word_mode, tmp);
6061 tmp->volatil = 1;
6062 return tmp;
6065 /* In the 32-bit runtime, arguments larger than eight bytes are passed
6066 by invisible reference. As a GCC extension, we also pass anything
6067 with a zero or variable size by reference.
6069 The 64-bit runtime does not describe passing any types by invisible
6070 reference. The internals of GCC can't currently handle passing
6071 empty structures, and zero or variable length arrays when they are
6072 not passed entirely on the stack or by reference. Thus, as a GCC
6073 extension, we pass these types by reference. The HP compiler doesn't
6074 support these types, so hopefully there shouldn't be any compatibility
6075 issues. This may have to be revisited when HP releases a C99 compiler
6076 or updates the ABI. */
6078 static bool
6079 pa_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
6080 enum machine_mode mode, const_tree type,
6081 bool named ATTRIBUTE_UNUSED)
6083 HOST_WIDE_INT size;
6085 if (type)
6086 size = int_size_in_bytes (type);
6087 else
6088 size = GET_MODE_SIZE (mode);
6090 if (TARGET_64BIT)
6091 return size <= 0;
6092 else
6093 return size <= 0 || size > 8;
6096 enum direction
6097 pa_function_arg_padding (enum machine_mode mode, const_tree type)
6099 if (mode == BLKmode
6100 || (TARGET_64BIT
6101 && type
6102 && (AGGREGATE_TYPE_P (type)
6103 || TREE_CODE (type) == COMPLEX_TYPE
6104 || TREE_CODE (type) == VECTOR_TYPE)))
6106 /* Return none if justification is not required. */
6107 if (type
6108 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
6109 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
6110 return none;
6112 /* The directions set here are ignored when a BLKmode argument larger
6113 than a word is placed in a register. Different code is used for
6114 the stack and registers. This makes it difficult to have a
6115 consistent data representation for both the stack and registers.
6116 For both runtimes, the justification and padding for arguments on
6117 the stack and in registers should be identical. */
6118 if (TARGET_64BIT)
6119 /* The 64-bit runtime specifies left justification for aggregates. */
6120 return upward;
6121 else
6122 /* The 32-bit runtime architecture specifies right justification.
6123 When the argument is passed on the stack, the argument is padded
6124 with garbage on the left. The HP compiler pads with zeros. */
6125 return downward;
6128 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
6129 return downward;
6130 else
6131 return none;
6135 /* Do what is necessary for `va_start'. We look at the current function
6136 to determine if stdargs or varargs is used and fill in an initial
6137 va_list. A pointer to this constructor is returned. */
6139 static rtx
6140 hppa_builtin_saveregs (void)
6142 rtx offset, dest;
6143 tree fntype = TREE_TYPE (current_function_decl);
6144 int argadj = ((!stdarg_p (fntype))
6145 ? UNITS_PER_WORD : 0);
6147 if (argadj)
6148 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
6149 else
6150 offset = crtl->args.arg_offset_rtx;
6152 if (TARGET_64BIT)
6154 int i, off;
6156 /* Adjust for varargs/stdarg differences. */
6157 if (argadj)
6158 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, -argadj);
6159 else
6160 offset = crtl->args.arg_offset_rtx;
6162 /* We need to save %r26 .. %r19 inclusive starting at offset -64
6163 from the incoming arg pointer and growing to larger addresses. */
6164 for (i = 26, off = -64; i >= 19; i--, off += 8)
6165 emit_move_insn (gen_rtx_MEM (word_mode,
6166 plus_constant (Pmode,
6167 arg_pointer_rtx, off)),
6168 gen_rtx_REG (word_mode, i));
6170 /* The incoming args pointer points just beyond the flushback area;
6171 normally this is not a serious concern. However, when we are doing
6172 varargs/stdargs we want to make the arg pointer point to the start
6173 of the incoming argument area. */
6174 emit_move_insn (virtual_incoming_args_rtx,
6175 plus_constant (Pmode, arg_pointer_rtx, -64));
6177 /* Now return a pointer to the first anonymous argument. */
6178 return copy_to_reg (expand_binop (Pmode, add_optab,
6179 virtual_incoming_args_rtx,
6180 offset, 0, 0, OPTAB_LIB_WIDEN));
6183 /* Store general registers on the stack. */
6184 dest = gen_rtx_MEM (BLKmode,
6185 plus_constant (Pmode, crtl->args.internal_arg_pointer,
6186 -16));
6187 set_mem_alias_set (dest, get_varargs_alias_set ());
6188 set_mem_align (dest, BITS_PER_WORD);
6189 move_block_from_reg (23, dest, 4);
6191 /* move_block_from_reg will emit code to store the argument registers
6192 individually as scalar stores.
6194 However, other insns may later load from the same addresses for
6195 a structure load (passing a struct to a varargs routine).
6197 The alias code assumes that such aliasing can never happen, so we
6198 have to keep memory referencing insns from moving up beyond the
6199 last argument register store. So we emit a blockage insn here. */
6200 emit_insn (gen_blockage ());
6202 return copy_to_reg (expand_binop (Pmode, add_optab,
6203 crtl->args.internal_arg_pointer,
6204 offset, 0, 0, OPTAB_LIB_WIDEN));
6207 static void
6208 hppa_va_start (tree valist, rtx nextarg)
6210 nextarg = expand_builtin_saveregs ();
6211 std_expand_builtin_va_start (valist, nextarg);
6214 static tree
6215 hppa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
6216 gimple_seq *post_p)
6218 if (TARGET_64BIT)
6220 /* Args grow upward. We can use the generic routines. */
6221 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6223 else /* !TARGET_64BIT */
6225 tree ptr = build_pointer_type (type);
6226 tree valist_type;
6227 tree t, u;
6228 unsigned int size, ofs;
6229 bool indirect;
6231 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6232 if (indirect)
6234 type = ptr;
6235 ptr = build_pointer_type (type);
6237 size = int_size_in_bytes (type);
6238 valist_type = TREE_TYPE (valist);
6240 /* Args grow down. Not handled by generic routines. */
6242 u = fold_convert (sizetype, size_in_bytes (type));
6243 u = fold_build1 (NEGATE_EXPR, sizetype, u);
6244 t = fold_build_pointer_plus (valist, u);
6246 /* Align to 4 or 8 byte boundary depending on argument size. */
6248 u = build_int_cst (TREE_TYPE (t), (HOST_WIDE_INT)(size > 4 ? -8 : -4));
6249 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, u);
6250 t = fold_convert (valist_type, t);
6252 t = build2 (MODIFY_EXPR, valist_type, valist, t);
6254 ofs = (8 - size) % 4;
6255 if (ofs != 0)
6256 t = fold_build_pointer_plus_hwi (t, ofs);
6258 t = fold_convert (ptr, t);
6259 t = build_va_arg_indirect_ref (t);
6261 if (indirect)
6262 t = build_va_arg_indirect_ref (t);
6264 return t;
6268 /* True if MODE is valid for the target. By "valid", we mean able to
6269 be manipulated in non-trivial ways. In particular, this means all
6270 the arithmetic is supported.
6272 Currently, TImode is not valid as the HP 64-bit runtime documentation
6273 doesn't document the alignment and calling conventions for this type.
6274 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6275 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6277 static bool
6278 pa_scalar_mode_supported_p (enum machine_mode mode)
6280 int precision = GET_MODE_PRECISION (mode);
6282 switch (GET_MODE_CLASS (mode))
6284 case MODE_PARTIAL_INT:
6285 case MODE_INT:
6286 if (precision == CHAR_TYPE_SIZE)
6287 return true;
6288 if (precision == SHORT_TYPE_SIZE)
6289 return true;
6290 if (precision == INT_TYPE_SIZE)
6291 return true;
6292 if (precision == LONG_TYPE_SIZE)
6293 return true;
6294 if (precision == LONG_LONG_TYPE_SIZE)
6295 return true;
6296 return false;
6298 case MODE_FLOAT:
6299 if (precision == FLOAT_TYPE_SIZE)
6300 return true;
6301 if (precision == DOUBLE_TYPE_SIZE)
6302 return true;
6303 if (precision == LONG_DOUBLE_TYPE_SIZE)
6304 return true;
6305 return false;
6307 case MODE_DECIMAL_FLOAT:
6308 return false;
6310 default:
6311 gcc_unreachable ();
6315 /* Return TRUE if INSN, a jump insn, has an unfilled delay slot and
6316 it branches into the delay slot. Otherwise, return FALSE. */
6318 static bool
6319 branch_to_delay_slot_p (rtx insn)
6321 rtx jump_insn;
6323 if (dbr_sequence_length ())
6324 return FALSE;
6326 jump_insn = next_active_insn (JUMP_LABEL (insn));
6327 while (insn)
6329 insn = next_active_insn (insn);
6330 if (jump_insn == insn)
6331 return TRUE;
6333 /* We can't rely on the length of asms. So, we return FALSE when
6334 the branch is followed by an asm. */
6335 if (!insn
6336 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6337 || extract_asm_operands (PATTERN (insn)) != NULL_RTX
6338 || get_attr_length (insn) > 0)
6339 break;
6342 return FALSE;
6345 /* Return TRUE if INSN, a forward jump insn, needs a nop in its delay slot.
6347 This occurs when INSN has an unfilled delay slot and is followed
6348 by an asm. Disaster can occur if the asm is empty and the jump
6349 branches into the delay slot. So, we add a nop in the delay slot
6350 when this occurs. */
6352 static bool
6353 branch_needs_nop_p (rtx insn)
6355 rtx jump_insn;
6357 if (dbr_sequence_length ())
6358 return FALSE;
6360 jump_insn = next_active_insn (JUMP_LABEL (insn));
6361 while (insn)
6363 insn = next_active_insn (insn);
6364 if (!insn || jump_insn == insn)
6365 return TRUE;
6367 if (!(GET_CODE (PATTERN (insn)) == ASM_INPUT
6368 || extract_asm_operands (PATTERN (insn)) != NULL_RTX)
6369 && get_attr_length (insn) > 0)
6370 break;
6373 return FALSE;
6376 /* Return TRUE if INSN, a forward jump insn, can use nullification
6377 to skip the following instruction. This avoids an extra cycle due
6378 to a mis-predicted branch when we fall through. */
6380 static bool
6381 use_skip_p (rtx insn)
6383 rtx jump_insn = next_active_insn (JUMP_LABEL (insn));
6385 while (insn)
6387 insn = next_active_insn (insn);
6389 /* We can't rely on the length of asms, so we can't skip asms. */
6390 if (!insn
6391 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6392 || extract_asm_operands (PATTERN (insn)) != NULL_RTX)
6393 break;
6394 if (get_attr_length (insn) == 4
6395 && jump_insn == next_active_insn (insn))
6396 return TRUE;
6397 if (get_attr_length (insn) > 0)
6398 break;
6401 return FALSE;
6404 /* This routine handles all the normal conditional branch sequences we
6405 might need to generate. It handles compare immediate vs compare
6406 register, nullification of delay slots, varying length branches,
6407 negated branches, and all combinations of the above. It returns the
6408 output appropriate to emit the branch corresponding to all given
6409 parameters. */
6411 const char *
6412 pa_output_cbranch (rtx *operands, int negated, rtx insn)
6414 static char buf[100];
6415 bool useskip;
6416 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6417 int length = get_attr_length (insn);
6418 int xdelay;
6420 /* A conditional branch to the following instruction (e.g. the delay slot)
6421 is asking for a disaster. This can happen when not optimizing and
6422 when jump optimization fails.
6424 While it is usually safe to emit nothing, this can fail if the
6425 preceding instruction is a nullified branch with an empty delay
6426 slot and the same branch target as this branch. We could check
6427 for this but jump optimization should eliminate nop jumps. It
6428 is always safe to emit a nop. */
6429 if (branch_to_delay_slot_p (insn))
6430 return "nop";
6432 /* The doubleword form of the cmpib instruction doesn't have the LEU
6433 and GTU conditions while the cmpb instruction does. Since we accept
6434 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6435 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6436 operands[2] = gen_rtx_REG (DImode, 0);
6437 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
6438 operands[1] = gen_rtx_REG (DImode, 0);
6440 /* If this is a long branch with its delay slot unfilled, set `nullify'
6441 as it can nullify the delay slot and save a nop. */
6442 if (length == 8 && dbr_sequence_length () == 0)
6443 nullify = 1;
6445 /* If this is a short forward conditional branch which did not get
6446 its delay slot filled, the delay slot can still be nullified. */
6447 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6448 nullify = forward_branch_p (insn);
6450 /* A forward branch over a single nullified insn can be done with a
6451 comclr instruction. This avoids a single cycle penalty due to
6452 mis-predicted branch if we fall through (branch not taken). */
6453 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6455 switch (length)
6457 /* All short conditional branches except backwards with an unfilled
6458 delay slot. */
6459 case 4:
6460 if (useskip)
6461 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6462 else
6463 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6464 if (GET_MODE (operands[1]) == DImode)
6465 strcat (buf, "*");
6466 if (negated)
6467 strcat (buf, "%B3");
6468 else
6469 strcat (buf, "%S3");
6470 if (useskip)
6471 strcat (buf, " %2,%r1,%%r0");
6472 else if (nullify)
6474 if (branch_needs_nop_p (insn))
6475 strcat (buf, ",n %2,%r1,%0%#");
6476 else
6477 strcat (buf, ",n %2,%r1,%0");
6479 else
6480 strcat (buf, " %2,%r1,%0");
6481 break;
6483 /* All long conditionals. Note a short backward branch with an
6484 unfilled delay slot is treated just like a long backward branch
6485 with an unfilled delay slot. */
6486 case 8:
6487 /* Handle weird backwards branch with a filled delay slot
6488 which is nullified. */
6489 if (dbr_sequence_length () != 0
6490 && ! forward_branch_p (insn)
6491 && nullify)
6493 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6494 if (GET_MODE (operands[1]) == DImode)
6495 strcat (buf, "*");
6496 if (negated)
6497 strcat (buf, "%S3");
6498 else
6499 strcat (buf, "%B3");
6500 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6502 /* Handle short backwards branch with an unfilled delay slot.
6503 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6504 taken and untaken branches. */
6505 else if (dbr_sequence_length () == 0
6506 && ! forward_branch_p (insn)
6507 && INSN_ADDRESSES_SET_P ()
6508 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6509 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6511 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6512 if (GET_MODE (operands[1]) == DImode)
6513 strcat (buf, "*");
6514 if (negated)
6515 strcat (buf, "%B3 %2,%r1,%0%#");
6516 else
6517 strcat (buf, "%S3 %2,%r1,%0%#");
6519 else
6521 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6522 if (GET_MODE (operands[1]) == DImode)
6523 strcat (buf, "*");
6524 if (negated)
6525 strcat (buf, "%S3");
6526 else
6527 strcat (buf, "%B3");
6528 if (nullify)
6529 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6530 else
6531 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6533 break;
6535 default:
6536 /* The reversed conditional branch must branch over one additional
6537 instruction if the delay slot is filled and needs to be extracted
6538 by pa_output_lbranch. If the delay slot is empty or this is a
6539 nullified forward branch, the instruction after the reversed
6540 condition branch must be nullified. */
6541 if (dbr_sequence_length () == 0
6542 || (nullify && forward_branch_p (insn)))
6544 nullify = 1;
6545 xdelay = 0;
6546 operands[4] = GEN_INT (length);
6548 else
6550 xdelay = 1;
6551 operands[4] = GEN_INT (length + 4);
6554 /* Create a reversed conditional branch which branches around
6555 the following insns. */
6556 if (GET_MODE (operands[1]) != DImode)
6558 if (nullify)
6560 if (negated)
6561 strcpy (buf,
6562 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6563 else
6564 strcpy (buf,
6565 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6567 else
6569 if (negated)
6570 strcpy (buf,
6571 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6572 else
6573 strcpy (buf,
6574 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6577 else
6579 if (nullify)
6581 if (negated)
6582 strcpy (buf,
6583 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6584 else
6585 strcpy (buf,
6586 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6588 else
6590 if (negated)
6591 strcpy (buf,
6592 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6593 else
6594 strcpy (buf,
6595 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6599 output_asm_insn (buf, operands);
6600 return pa_output_lbranch (operands[0], insn, xdelay);
6602 return buf;
6605 /* This routine handles output of long unconditional branches that
6606 exceed the maximum range of a simple branch instruction. Since
6607 we don't have a register available for the branch, we save register
6608 %r1 in the frame marker, load the branch destination DEST into %r1,
6609 execute the branch, and restore %r1 in the delay slot of the branch.
6611 Since long branches may have an insn in the delay slot and the
6612 delay slot is used to restore %r1, we in general need to extract
6613 this insn and execute it before the branch. However, to facilitate
6614 use of this function by conditional branches, we also provide an
6615 option to not extract the delay insn so that it will be emitted
6616 after the long branch. So, if there is an insn in the delay slot,
6617 it is extracted if XDELAY is nonzero.
6619 The lengths of the various long-branch sequences are 20, 16 and 24
6620 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6622 const char *
6623 pa_output_lbranch (rtx dest, rtx insn, int xdelay)
6625 rtx xoperands[2];
6627 xoperands[0] = dest;
6629 /* First, free up the delay slot. */
6630 if (xdelay && dbr_sequence_length () != 0)
6632 /* We can't handle a jump in the delay slot. */
6633 gcc_assert (! JUMP_P (NEXT_INSN (insn)));
6635 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6636 optimize, 0, NULL);
6638 /* Now delete the delay insn. */
6639 SET_INSN_DELETED (NEXT_INSN (insn));
6642 /* Output an insn to save %r1. The runtime documentation doesn't
6643 specify whether the "Clean Up" slot in the callers frame can
6644 be clobbered by the callee. It isn't copied by HP's builtin
6645 alloca, so this suggests that it can be clobbered if necessary.
6646 The "Static Link" location is copied by HP builtin alloca, so
6647 we avoid using it. Using the cleanup slot might be a problem
6648 if we have to interoperate with languages that pass cleanup
6649 information. However, it should be possible to handle these
6650 situations with GCC's asm feature.
6652 The "Current RP" slot is reserved for the called procedure, so
6653 we try to use it when we don't have a frame of our own. It's
6654 rather unlikely that we won't have a frame when we need to emit
6655 a very long branch.
6657 Really the way to go long term is a register scavenger; goto
6658 the target of the jump and find a register which we can use
6659 as a scratch to hold the value in %r1. Then, we wouldn't have
6660 to free up the delay slot or clobber a slot that may be needed
6661 for other purposes. */
6662 if (TARGET_64BIT)
6664 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6665 /* Use the return pointer slot in the frame marker. */
6666 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6667 else
6668 /* Use the slot at -40 in the frame marker since HP builtin
6669 alloca doesn't copy it. */
6670 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6672 else
6674 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6675 /* Use the return pointer slot in the frame marker. */
6676 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6677 else
6678 /* Use the "Clean Up" slot in the frame marker. In GCC,
6679 the only other use of this location is for copying a
6680 floating point double argument from a floating-point
6681 register to two general registers. The copy is done
6682 as an "atomic" operation when outputting a call, so it
6683 won't interfere with our using the location here. */
6684 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6687 if (TARGET_PORTABLE_RUNTIME)
6689 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6690 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6691 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6693 else if (flag_pic)
6695 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6696 if (TARGET_SOM || !TARGET_GAS)
6698 xoperands[1] = gen_label_rtx ();
6699 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6700 targetm.asm_out.internal_label (asm_out_file, "L",
6701 CODE_LABEL_NUMBER (xoperands[1]));
6702 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6704 else
6706 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6707 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6709 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6711 else
6712 /* Now output a very long branch to the original target. */
6713 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6715 /* Now restore the value of %r1 in the delay slot. */
6716 if (TARGET_64BIT)
6718 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6719 return "ldd -16(%%r30),%%r1";
6720 else
6721 return "ldd -40(%%r30),%%r1";
6723 else
6725 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6726 return "ldw -20(%%r30),%%r1";
6727 else
6728 return "ldw -12(%%r30),%%r1";
6732 /* This routine handles all the branch-on-bit conditional branch sequences we
6733 might need to generate. It handles nullification of delay slots,
6734 varying length branches, negated branches and all combinations of the
6735 above. it returns the appropriate output template to emit the branch. */
6737 const char *
6738 pa_output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
6740 static char buf[100];
6741 bool useskip;
6742 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6743 int length = get_attr_length (insn);
6744 int xdelay;
6746 /* A conditional branch to the following instruction (e.g. the delay slot) is
6747 asking for a disaster. I do not think this can happen as this pattern
6748 is only used when optimizing; jump optimization should eliminate the
6749 jump. But be prepared just in case. */
6751 if (branch_to_delay_slot_p (insn))
6752 return "nop";
6754 /* If this is a long branch with its delay slot unfilled, set `nullify'
6755 as it can nullify the delay slot and save a nop. */
6756 if (length == 8 && dbr_sequence_length () == 0)
6757 nullify = 1;
6759 /* If this is a short forward conditional branch which did not get
6760 its delay slot filled, the delay slot can still be nullified. */
6761 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6762 nullify = forward_branch_p (insn);
6764 /* A forward branch over a single nullified insn can be done with a
6765 extrs instruction. This avoids a single cycle penalty due to
6766 mis-predicted branch if we fall through (branch not taken). */
6767 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6769 switch (length)
6772 /* All short conditional branches except backwards with an unfilled
6773 delay slot. */
6774 case 4:
6775 if (useskip)
6776 strcpy (buf, "{extrs,|extrw,s,}");
6777 else
6778 strcpy (buf, "bb,");
6779 if (useskip && GET_MODE (operands[0]) == DImode)
6780 strcpy (buf, "extrd,s,*");
6781 else if (GET_MODE (operands[0]) == DImode)
6782 strcpy (buf, "bb,*");
6783 if ((which == 0 && negated)
6784 || (which == 1 && ! negated))
6785 strcat (buf, ">=");
6786 else
6787 strcat (buf, "<");
6788 if (useskip)
6789 strcat (buf, " %0,%1,1,%%r0");
6790 else if (nullify && negated)
6792 if (branch_needs_nop_p (insn))
6793 strcat (buf, ",n %0,%1,%3%#");
6794 else
6795 strcat (buf, ",n %0,%1,%3");
6797 else if (nullify && ! negated)
6799 if (branch_needs_nop_p (insn))
6800 strcat (buf, ",n %0,%1,%2%#");
6801 else
6802 strcat (buf, ",n %0,%1,%2");
6804 else if (! nullify && negated)
6805 strcat (buf, " %0,%1,%3");
6806 else if (! nullify && ! negated)
6807 strcat (buf, " %0,%1,%2");
6808 break;
6810 /* All long conditionals. Note a short backward branch with an
6811 unfilled delay slot is treated just like a long backward branch
6812 with an unfilled delay slot. */
6813 case 8:
6814 /* Handle weird backwards branch with a filled delay slot
6815 which is nullified. */
6816 if (dbr_sequence_length () != 0
6817 && ! forward_branch_p (insn)
6818 && nullify)
6820 strcpy (buf, "bb,");
6821 if (GET_MODE (operands[0]) == DImode)
6822 strcat (buf, "*");
6823 if ((which == 0 && negated)
6824 || (which == 1 && ! negated))
6825 strcat (buf, "<");
6826 else
6827 strcat (buf, ">=");
6828 if (negated)
6829 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6830 else
6831 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6833 /* Handle short backwards branch with an unfilled delay slot.
6834 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6835 taken and untaken branches. */
6836 else if (dbr_sequence_length () == 0
6837 && ! forward_branch_p (insn)
6838 && INSN_ADDRESSES_SET_P ()
6839 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6840 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6842 strcpy (buf, "bb,");
6843 if (GET_MODE (operands[0]) == DImode)
6844 strcat (buf, "*");
6845 if ((which == 0 && negated)
6846 || (which == 1 && ! negated))
6847 strcat (buf, ">=");
6848 else
6849 strcat (buf, "<");
6850 if (negated)
6851 strcat (buf, " %0,%1,%3%#");
6852 else
6853 strcat (buf, " %0,%1,%2%#");
6855 else
6857 if (GET_MODE (operands[0]) == DImode)
6858 strcpy (buf, "extrd,s,*");
6859 else
6860 strcpy (buf, "{extrs,|extrw,s,}");
6861 if ((which == 0 && negated)
6862 || (which == 1 && ! negated))
6863 strcat (buf, "<");
6864 else
6865 strcat (buf, ">=");
6866 if (nullify && negated)
6867 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6868 else if (nullify && ! negated)
6869 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6870 else if (negated)
6871 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6872 else
6873 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6875 break;
6877 default:
6878 /* The reversed conditional branch must branch over one additional
6879 instruction if the delay slot is filled and needs to be extracted
6880 by pa_output_lbranch. If the delay slot is empty or this is a
6881 nullified forward branch, the instruction after the reversed
6882 condition branch must be nullified. */
6883 if (dbr_sequence_length () == 0
6884 || (nullify && forward_branch_p (insn)))
6886 nullify = 1;
6887 xdelay = 0;
6888 operands[4] = GEN_INT (length);
6890 else
6892 xdelay = 1;
6893 operands[4] = GEN_INT (length + 4);
6896 if (GET_MODE (operands[0]) == DImode)
6897 strcpy (buf, "bb,*");
6898 else
6899 strcpy (buf, "bb,");
6900 if ((which == 0 && negated)
6901 || (which == 1 && !negated))
6902 strcat (buf, "<");
6903 else
6904 strcat (buf, ">=");
6905 if (nullify)
6906 strcat (buf, ",n %0,%1,.+%4");
6907 else
6908 strcat (buf, " %0,%1,.+%4");
6909 output_asm_insn (buf, operands);
6910 return pa_output_lbranch (negated ? operands[3] : operands[2],
6911 insn, xdelay);
6913 return buf;
6916 /* This routine handles all the branch-on-variable-bit conditional branch
6917 sequences we might need to generate. It handles nullification of delay
6918 slots, varying length branches, negated branches and all combinations
6919 of the above. it returns the appropriate output template to emit the
6920 branch. */
6922 const char *
6923 pa_output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn,
6924 int which)
6926 static char buf[100];
6927 bool useskip;
6928 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6929 int length = get_attr_length (insn);
6930 int xdelay;
6932 /* A conditional branch to the following instruction (e.g. the delay slot) is
6933 asking for a disaster. I do not think this can happen as this pattern
6934 is only used when optimizing; jump optimization should eliminate the
6935 jump. But be prepared just in case. */
6937 if (branch_to_delay_slot_p (insn))
6938 return "nop";
6940 /* If this is a long branch with its delay slot unfilled, set `nullify'
6941 as it can nullify the delay slot and save a nop. */
6942 if (length == 8 && dbr_sequence_length () == 0)
6943 nullify = 1;
6945 /* If this is a short forward conditional branch which did not get
6946 its delay slot filled, the delay slot can still be nullified. */
6947 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6948 nullify = forward_branch_p (insn);
6950 /* A forward branch over a single nullified insn can be done with a
6951 extrs instruction. This avoids a single cycle penalty due to
6952 mis-predicted branch if we fall through (branch not taken). */
6953 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6955 switch (length)
6958 /* All short conditional branches except backwards with an unfilled
6959 delay slot. */
6960 case 4:
6961 if (useskip)
6962 strcpy (buf, "{vextrs,|extrw,s,}");
6963 else
6964 strcpy (buf, "{bvb,|bb,}");
6965 if (useskip && GET_MODE (operands[0]) == DImode)
6966 strcpy (buf, "extrd,s,*");
6967 else if (GET_MODE (operands[0]) == DImode)
6968 strcpy (buf, "bb,*");
6969 if ((which == 0 && negated)
6970 || (which == 1 && ! negated))
6971 strcat (buf, ">=");
6972 else
6973 strcat (buf, "<");
6974 if (useskip)
6975 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6976 else if (nullify && negated)
6978 if (branch_needs_nop_p (insn))
6979 strcat (buf, "{,n %0,%3%#|,n %0,%%sar,%3%#}");
6980 else
6981 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
6983 else if (nullify && ! negated)
6985 if (branch_needs_nop_p (insn))
6986 strcat (buf, "{,n %0,%2%#|,n %0,%%sar,%2%#}");
6987 else
6988 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
6990 else if (! nullify && negated)
6991 strcat (buf, "{ %0,%3| %0,%%sar,%3}");
6992 else if (! nullify && ! negated)
6993 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
6994 break;
6996 /* All long conditionals. Note a short backward branch with an
6997 unfilled delay slot is treated just like a long backward branch
6998 with an unfilled delay slot. */
6999 case 8:
7000 /* Handle weird backwards branch with a filled delay slot
7001 which is nullified. */
7002 if (dbr_sequence_length () != 0
7003 && ! forward_branch_p (insn)
7004 && nullify)
7006 strcpy (buf, "{bvb,|bb,}");
7007 if (GET_MODE (operands[0]) == DImode)
7008 strcat (buf, "*");
7009 if ((which == 0 && negated)
7010 || (which == 1 && ! negated))
7011 strcat (buf, "<");
7012 else
7013 strcat (buf, ">=");
7014 if (negated)
7015 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
7016 else
7017 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
7019 /* Handle short backwards branch with an unfilled delay slot.
7020 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7021 taken and untaken branches. */
7022 else if (dbr_sequence_length () == 0
7023 && ! forward_branch_p (insn)
7024 && INSN_ADDRESSES_SET_P ()
7025 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7026 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7028 strcpy (buf, "{bvb,|bb,}");
7029 if (GET_MODE (operands[0]) == DImode)
7030 strcat (buf, "*");
7031 if ((which == 0 && negated)
7032 || (which == 1 && ! negated))
7033 strcat (buf, ">=");
7034 else
7035 strcat (buf, "<");
7036 if (negated)
7037 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
7038 else
7039 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
7041 else
7043 strcpy (buf, "{vextrs,|extrw,s,}");
7044 if (GET_MODE (operands[0]) == DImode)
7045 strcpy (buf, "extrd,s,*");
7046 if ((which == 0 && negated)
7047 || (which == 1 && ! negated))
7048 strcat (buf, "<");
7049 else
7050 strcat (buf, ">=");
7051 if (nullify && negated)
7052 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
7053 else if (nullify && ! negated)
7054 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
7055 else if (negated)
7056 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
7057 else
7058 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
7060 break;
7062 default:
7063 /* The reversed conditional branch must branch over one additional
7064 instruction if the delay slot is filled and needs to be extracted
7065 by pa_output_lbranch. If the delay slot is empty or this is a
7066 nullified forward branch, the instruction after the reversed
7067 condition branch must be nullified. */
7068 if (dbr_sequence_length () == 0
7069 || (nullify && forward_branch_p (insn)))
7071 nullify = 1;
7072 xdelay = 0;
7073 operands[4] = GEN_INT (length);
7075 else
7077 xdelay = 1;
7078 operands[4] = GEN_INT (length + 4);
7081 if (GET_MODE (operands[0]) == DImode)
7082 strcpy (buf, "bb,*");
7083 else
7084 strcpy (buf, "{bvb,|bb,}");
7085 if ((which == 0 && negated)
7086 || (which == 1 && !negated))
7087 strcat (buf, "<");
7088 else
7089 strcat (buf, ">=");
7090 if (nullify)
7091 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
7092 else
7093 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
7094 output_asm_insn (buf, operands);
7095 return pa_output_lbranch (negated ? operands[3] : operands[2],
7096 insn, xdelay);
7098 return buf;
7101 /* Return the output template for emitting a dbra type insn.
7103 Note it may perform some output operations on its own before
7104 returning the final output string. */
7105 const char *
7106 pa_output_dbra (rtx *operands, rtx insn, int which_alternative)
7108 int length = get_attr_length (insn);
7110 /* A conditional branch to the following instruction (e.g. the delay slot) is
7111 asking for a disaster. Be prepared! */
7113 if (branch_to_delay_slot_p (insn))
7115 if (which_alternative == 0)
7116 return "ldo %1(%0),%0";
7117 else if (which_alternative == 1)
7119 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
7120 output_asm_insn ("ldw -16(%%r30),%4", operands);
7121 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7122 return "{fldws|fldw} -16(%%r30),%0";
7124 else
7126 output_asm_insn ("ldw %0,%4", operands);
7127 return "ldo %1(%4),%4\n\tstw %4,%0";
7131 if (which_alternative == 0)
7133 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7134 int xdelay;
7136 /* If this is a long branch with its delay slot unfilled, set `nullify'
7137 as it can nullify the delay slot and save a nop. */
7138 if (length == 8 && dbr_sequence_length () == 0)
7139 nullify = 1;
7141 /* If this is a short forward conditional branch which did not get
7142 its delay slot filled, the delay slot can still be nullified. */
7143 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7144 nullify = forward_branch_p (insn);
7146 switch (length)
7148 case 4:
7149 if (nullify)
7151 if (branch_needs_nop_p (insn))
7152 return "addib,%C2,n %1,%0,%3%#";
7153 else
7154 return "addib,%C2,n %1,%0,%3";
7156 else
7157 return "addib,%C2 %1,%0,%3";
7159 case 8:
7160 /* Handle weird backwards branch with a fulled delay slot
7161 which is nullified. */
7162 if (dbr_sequence_length () != 0
7163 && ! forward_branch_p (insn)
7164 && nullify)
7165 return "addib,%N2,n %1,%0,.+12\n\tb %3";
7166 /* Handle short backwards branch with an unfilled delay slot.
7167 Using a addb;nop rather than addi;bl saves 1 cycle for both
7168 taken and untaken branches. */
7169 else if (dbr_sequence_length () == 0
7170 && ! forward_branch_p (insn)
7171 && INSN_ADDRESSES_SET_P ()
7172 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7173 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7174 return "addib,%C2 %1,%0,%3%#";
7176 /* Handle normal cases. */
7177 if (nullify)
7178 return "addi,%N2 %1,%0,%0\n\tb,n %3";
7179 else
7180 return "addi,%N2 %1,%0,%0\n\tb %3";
7182 default:
7183 /* The reversed conditional branch must branch over one additional
7184 instruction if the delay slot is filled and needs to be extracted
7185 by pa_output_lbranch. If the delay slot is empty or this is a
7186 nullified forward branch, the instruction after the reversed
7187 condition branch must be nullified. */
7188 if (dbr_sequence_length () == 0
7189 || (nullify && forward_branch_p (insn)))
7191 nullify = 1;
7192 xdelay = 0;
7193 operands[4] = GEN_INT (length);
7195 else
7197 xdelay = 1;
7198 operands[4] = GEN_INT (length + 4);
7201 if (nullify)
7202 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
7203 else
7204 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
7206 return pa_output_lbranch (operands[3], insn, xdelay);
7210 /* Deal with gross reload from FP register case. */
7211 else if (which_alternative == 1)
7213 /* Move loop counter from FP register to MEM then into a GR,
7214 increment the GR, store the GR into MEM, and finally reload
7215 the FP register from MEM from within the branch's delay slot. */
7216 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
7217 operands);
7218 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7219 if (length == 24)
7220 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
7221 else if (length == 28)
7222 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7223 else
7225 operands[5] = GEN_INT (length - 16);
7226 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
7227 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7228 return pa_output_lbranch (operands[3], insn, 0);
7231 /* Deal with gross reload from memory case. */
7232 else
7234 /* Reload loop counter from memory, the store back to memory
7235 happens in the branch's delay slot. */
7236 output_asm_insn ("ldw %0,%4", operands);
7237 if (length == 12)
7238 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
7239 else if (length == 16)
7240 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
7241 else
7243 operands[5] = GEN_INT (length - 4);
7244 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
7245 return pa_output_lbranch (operands[3], insn, 0);
7250 /* Return the output template for emitting a movb type insn.
7252 Note it may perform some output operations on its own before
7253 returning the final output string. */
7254 const char *
7255 pa_output_movb (rtx *operands, rtx insn, int which_alternative,
7256 int reverse_comparison)
7258 int length = get_attr_length (insn);
7260 /* A conditional branch to the following instruction (e.g. the delay slot) is
7261 asking for a disaster. Be prepared! */
7263 if (branch_to_delay_slot_p (insn))
7265 if (which_alternative == 0)
7266 return "copy %1,%0";
7267 else if (which_alternative == 1)
7269 output_asm_insn ("stw %1,-16(%%r30)", operands);
7270 return "{fldws|fldw} -16(%%r30),%0";
7272 else if (which_alternative == 2)
7273 return "stw %1,%0";
7274 else
7275 return "mtsar %r1";
7278 /* Support the second variant. */
7279 if (reverse_comparison)
7280 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
7282 if (which_alternative == 0)
7284 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7285 int xdelay;
7287 /* If this is a long branch with its delay slot unfilled, set `nullify'
7288 as it can nullify the delay slot and save a nop. */
7289 if (length == 8 && dbr_sequence_length () == 0)
7290 nullify = 1;
7292 /* If this is a short forward conditional branch which did not get
7293 its delay slot filled, the delay slot can still be nullified. */
7294 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7295 nullify = forward_branch_p (insn);
7297 switch (length)
7299 case 4:
7300 if (nullify)
7302 if (branch_needs_nop_p (insn))
7303 return "movb,%C2,n %1,%0,%3%#";
7304 else
7305 return "movb,%C2,n %1,%0,%3";
7307 else
7308 return "movb,%C2 %1,%0,%3";
7310 case 8:
7311 /* Handle weird backwards branch with a filled delay slot
7312 which is nullified. */
7313 if (dbr_sequence_length () != 0
7314 && ! forward_branch_p (insn)
7315 && nullify)
7316 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7318 /* Handle short backwards branch with an unfilled delay slot.
7319 Using a movb;nop rather than or;bl saves 1 cycle for both
7320 taken and untaken branches. */
7321 else if (dbr_sequence_length () == 0
7322 && ! forward_branch_p (insn)
7323 && INSN_ADDRESSES_SET_P ()
7324 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7325 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7326 return "movb,%C2 %1,%0,%3%#";
7327 /* Handle normal cases. */
7328 if (nullify)
7329 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7330 else
7331 return "or,%N2 %1,%%r0,%0\n\tb %3";
7333 default:
7334 /* The reversed conditional branch must branch over one additional
7335 instruction if the delay slot is filled and needs to be extracted
7336 by pa_output_lbranch. If the delay slot is empty or this is a
7337 nullified forward branch, the instruction after the reversed
7338 condition branch must be nullified. */
7339 if (dbr_sequence_length () == 0
7340 || (nullify && forward_branch_p (insn)))
7342 nullify = 1;
7343 xdelay = 0;
7344 operands[4] = GEN_INT (length);
7346 else
7348 xdelay = 1;
7349 operands[4] = GEN_INT (length + 4);
7352 if (nullify)
7353 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
7354 else
7355 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
7357 return pa_output_lbranch (operands[3], insn, xdelay);
7360 /* Deal with gross reload for FP destination register case. */
7361 else if (which_alternative == 1)
7363 /* Move source register to MEM, perform the branch test, then
7364 finally load the FP register from MEM from within the branch's
7365 delay slot. */
7366 output_asm_insn ("stw %1,-16(%%r30)", operands);
7367 if (length == 12)
7368 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7369 else if (length == 16)
7370 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7371 else
7373 operands[4] = GEN_INT (length - 4);
7374 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
7375 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7376 return pa_output_lbranch (operands[3], insn, 0);
7379 /* Deal with gross reload from memory case. */
7380 else if (which_alternative == 2)
7382 /* Reload loop counter from memory, the store back to memory
7383 happens in the branch's delay slot. */
7384 if (length == 8)
7385 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7386 else if (length == 12)
7387 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7388 else
7390 operands[4] = GEN_INT (length);
7391 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7392 operands);
7393 return pa_output_lbranch (operands[3], insn, 0);
7396 /* Handle SAR as a destination. */
7397 else
7399 if (length == 8)
7400 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7401 else if (length == 12)
7402 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7403 else
7405 operands[4] = GEN_INT (length);
7406 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7407 operands);
7408 return pa_output_lbranch (operands[3], insn, 0);
7413 /* Copy any FP arguments in INSN into integer registers. */
7414 static void
7415 copy_fp_args (rtx insn)
7417 rtx link;
7418 rtx xoperands[2];
7420 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7422 int arg_mode, regno;
7423 rtx use = XEXP (link, 0);
7425 if (! (GET_CODE (use) == USE
7426 && GET_CODE (XEXP (use, 0)) == REG
7427 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7428 continue;
7430 arg_mode = GET_MODE (XEXP (use, 0));
7431 regno = REGNO (XEXP (use, 0));
7433 /* Is it a floating point register? */
7434 if (regno >= 32 && regno <= 39)
7436 /* Copy the FP register into an integer register via memory. */
7437 if (arg_mode == SFmode)
7439 xoperands[0] = XEXP (use, 0);
7440 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
7441 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
7442 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7444 else
7446 xoperands[0] = XEXP (use, 0);
7447 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
7448 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
7449 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
7450 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7456 /* Compute length of the FP argument copy sequence for INSN. */
7457 static int
7458 length_fp_args (rtx insn)
7460 int length = 0;
7461 rtx link;
7463 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7465 int arg_mode, regno;
7466 rtx use = XEXP (link, 0);
7468 if (! (GET_CODE (use) == USE
7469 && GET_CODE (XEXP (use, 0)) == REG
7470 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7471 continue;
7473 arg_mode = GET_MODE (XEXP (use, 0));
7474 regno = REGNO (XEXP (use, 0));
7476 /* Is it a floating point register? */
7477 if (regno >= 32 && regno <= 39)
7479 if (arg_mode == SFmode)
7480 length += 8;
7481 else
7482 length += 12;
7486 return length;
7489 /* Return the attribute length for the millicode call instruction INSN.
7490 The length must match the code generated by pa_output_millicode_call.
7491 We include the delay slot in the returned length as it is better to
7492 over estimate the length than to under estimate it. */
7495 pa_attr_length_millicode_call (rtx insn)
7497 unsigned long distance = -1;
7498 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7500 if (INSN_ADDRESSES_SET_P ())
7502 distance = (total + insn_current_reference_address (insn));
7503 if (distance < total)
7504 distance = -1;
7507 if (TARGET_64BIT)
7509 if (!TARGET_LONG_CALLS && distance < 7600000)
7510 return 8;
7512 return 20;
7514 else if (TARGET_PORTABLE_RUNTIME)
7515 return 24;
7516 else
7518 if (!TARGET_LONG_CALLS && distance < MAX_PCREL17F_OFFSET)
7519 return 8;
7521 if (!flag_pic)
7522 return 12;
7524 return 24;
7528 /* INSN is a function call. It may have an unconditional jump
7529 in its delay slot.
7531 CALL_DEST is the routine we are calling. */
7533 const char *
7534 pa_output_millicode_call (rtx insn, rtx call_dest)
7536 int attr_length = get_attr_length (insn);
7537 int seq_length = dbr_sequence_length ();
7538 int distance;
7539 rtx seq_insn;
7540 rtx xoperands[3];
7542 xoperands[0] = call_dest;
7543 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7545 /* Handle the common case where we are sure that the branch will
7546 reach the beginning of the $CODE$ subspace. The within reach
7547 form of the $$sh_func_adrs call has a length of 28. Because it
7548 has an attribute type of sh_func_adrs, it never has a nonzero
7549 sequence length (i.e., the delay slot is never filled). */
7550 if (!TARGET_LONG_CALLS
7551 && (attr_length == 8
7552 || (attr_length == 28
7553 && get_attr_type (insn) == TYPE_SH_FUNC_ADRS)))
7555 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
7557 else
7559 if (TARGET_64BIT)
7561 /* It might seem that one insn could be saved by accessing
7562 the millicode function using the linkage table. However,
7563 this doesn't work in shared libraries and other dynamically
7564 loaded objects. Using a pc-relative sequence also avoids
7565 problems related to the implicit use of the gp register. */
7566 output_asm_insn ("b,l .+8,%%r1", xoperands);
7568 if (TARGET_GAS)
7570 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7571 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7573 else
7575 xoperands[1] = gen_label_rtx ();
7576 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7577 targetm.asm_out.internal_label (asm_out_file, "L",
7578 CODE_LABEL_NUMBER (xoperands[1]));
7579 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7582 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7584 else if (TARGET_PORTABLE_RUNTIME)
7586 /* Pure portable runtime doesn't allow be/ble; we also don't
7587 have PIC support in the assembler/linker, so this sequence
7588 is needed. */
7590 /* Get the address of our target into %r1. */
7591 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7592 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7594 /* Get our return address into %r31. */
7595 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7596 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7598 /* Jump to our target address in %r1. */
7599 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7601 else if (!flag_pic)
7603 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7604 if (TARGET_PA_20)
7605 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7606 else
7607 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7609 else
7611 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7612 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
7614 if (TARGET_SOM || !TARGET_GAS)
7616 /* The HP assembler can generate relocations for the
7617 difference of two symbols. GAS can do this for a
7618 millicode symbol but not an arbitrary external
7619 symbol when generating SOM output. */
7620 xoperands[1] = gen_label_rtx ();
7621 targetm.asm_out.internal_label (asm_out_file, "L",
7622 CODE_LABEL_NUMBER (xoperands[1]));
7623 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7624 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7626 else
7628 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
7629 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7630 xoperands);
7633 /* Jump to our target address in %r1. */
7634 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7638 if (seq_length == 0)
7639 output_asm_insn ("nop", xoperands);
7641 /* We are done if there isn't a jump in the delay slot. */
7642 if (seq_length == 0 || ! JUMP_P (NEXT_INSN (insn)))
7643 return "";
7645 /* This call has an unconditional jump in its delay slot. */
7646 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7648 /* See if the return address can be adjusted. Use the containing
7649 sequence insn's address. */
7650 if (INSN_ADDRESSES_SET_P ())
7652 seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7653 distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7654 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7656 if (VAL_14_BITS_P (distance))
7658 xoperands[1] = gen_label_rtx ();
7659 output_asm_insn ("ldo %0-%1(%2),%2", xoperands);
7660 targetm.asm_out.internal_label (asm_out_file, "L",
7661 CODE_LABEL_NUMBER (xoperands[1]));
7663 else
7664 /* ??? This branch may not reach its target. */
7665 output_asm_insn ("nop\n\tb,n %0", xoperands);
7667 else
7668 /* ??? This branch may not reach its target. */
7669 output_asm_insn ("nop\n\tb,n %0", xoperands);
7671 /* Delete the jump. */
7672 SET_INSN_DELETED (NEXT_INSN (insn));
7674 return "";
7677 /* Return the attribute length of the call instruction INSN. The SIBCALL
7678 flag indicates whether INSN is a regular call or a sibling call. The
7679 length returned must be longer than the code actually generated by
7680 pa_output_call. Since branch shortening is done before delay branch
7681 sequencing, there is no way to determine whether or not the delay
7682 slot will be filled during branch shortening. Even when the delay
7683 slot is filled, we may have to add a nop if the delay slot contains
7684 a branch that can't reach its target. Thus, we always have to include
7685 the delay slot in the length estimate. This used to be done in
7686 pa_adjust_insn_length but we do it here now as some sequences always
7687 fill the delay slot and we can save four bytes in the estimate for
7688 these sequences. */
7691 pa_attr_length_call (rtx insn, int sibcall)
7693 int local_call;
7694 rtx call, call_dest;
7695 tree call_decl;
7696 int length = 0;
7697 rtx pat = PATTERN (insn);
7698 unsigned long distance = -1;
7700 gcc_assert (CALL_P (insn));
7702 if (INSN_ADDRESSES_SET_P ())
7704 unsigned long total;
7706 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7707 distance = (total + insn_current_reference_address (insn));
7708 if (distance < total)
7709 distance = -1;
7712 gcc_assert (GET_CODE (pat) == PARALLEL);
7714 /* Get the call rtx. */
7715 call = XVECEXP (pat, 0, 0);
7716 if (GET_CODE (call) == SET)
7717 call = SET_SRC (call);
7719 gcc_assert (GET_CODE (call) == CALL);
7721 /* Determine if this is a local call. */
7722 call_dest = XEXP (XEXP (call, 0), 0);
7723 call_decl = SYMBOL_REF_DECL (call_dest);
7724 local_call = call_decl && targetm.binds_local_p (call_decl);
7726 /* pc-relative branch. */
7727 if (!TARGET_LONG_CALLS
7728 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7729 || distance < MAX_PCREL17F_OFFSET))
7730 length += 8;
7732 /* 64-bit plabel sequence. */
7733 else if (TARGET_64BIT && !local_call)
7734 length += sibcall ? 28 : 24;
7736 /* non-pic long absolute branch sequence. */
7737 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7738 length += 12;
7740 /* long pc-relative branch sequence. */
7741 else if (TARGET_LONG_PIC_SDIFF_CALL
7742 || (TARGET_GAS && !TARGET_SOM
7743 && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7745 length += 20;
7747 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7748 length += 8;
7751 /* 32-bit plabel sequence. */
7752 else
7754 length += 32;
7756 if (TARGET_SOM)
7757 length += length_fp_args (insn);
7759 if (flag_pic)
7760 length += 4;
7762 if (!TARGET_PA_20)
7764 if (!sibcall)
7765 length += 8;
7767 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7768 length += 8;
7772 return length;
7775 /* INSN is a function call. It may have an unconditional jump
7776 in its delay slot.
7778 CALL_DEST is the routine we are calling. */
7780 const char *
7781 pa_output_call (rtx insn, rtx call_dest, int sibcall)
7783 int delay_insn_deleted = 0;
7784 int delay_slot_filled = 0;
7785 int seq_length = dbr_sequence_length ();
7786 tree call_decl = SYMBOL_REF_DECL (call_dest);
7787 int local_call = call_decl && targetm.binds_local_p (call_decl);
7788 rtx xoperands[2];
7790 xoperands[0] = call_dest;
7792 /* Handle the common case where we're sure that the branch will reach
7793 the beginning of the "$CODE$" subspace. This is the beginning of
7794 the current function if we are in a named section. */
7795 if (!TARGET_LONG_CALLS && pa_attr_length_call (insn, sibcall) == 8)
7797 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7798 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7800 else
7802 if (TARGET_64BIT && !local_call)
7804 /* ??? As far as I can tell, the HP linker doesn't support the
7805 long pc-relative sequence described in the 64-bit runtime
7806 architecture. So, we use a slightly longer indirect call. */
7807 xoperands[0] = pa_get_deferred_plabel (call_dest);
7808 xoperands[1] = gen_label_rtx ();
7810 /* If this isn't a sibcall, we put the load of %r27 into the
7811 delay slot. We can't do this in a sibcall as we don't
7812 have a second call-clobbered scratch register available. */
7813 if (seq_length != 0
7814 && ! JUMP_P (NEXT_INSN (insn))
7815 && !sibcall)
7817 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7818 optimize, 0, NULL);
7820 /* Now delete the delay insn. */
7821 SET_INSN_DELETED (NEXT_INSN (insn));
7822 delay_insn_deleted = 1;
7825 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7826 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7827 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7829 if (sibcall)
7831 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7832 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7833 output_asm_insn ("bve (%%r1)", xoperands);
7835 else
7837 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7838 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7839 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7840 delay_slot_filled = 1;
7843 else
7845 int indirect_call = 0;
7847 /* Emit a long call. There are several different sequences
7848 of increasing length and complexity. In most cases,
7849 they don't allow an instruction in the delay slot. */
7850 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7851 && !TARGET_LONG_PIC_SDIFF_CALL
7852 && !(TARGET_GAS && !TARGET_SOM
7853 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7854 && !TARGET_64BIT)
7855 indirect_call = 1;
7857 if (seq_length != 0
7858 && ! JUMP_P (NEXT_INSN (insn))
7859 && !sibcall
7860 && (!TARGET_PA_20
7861 || indirect_call
7862 || ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)))
7864 /* A non-jump insn in the delay slot. By definition we can
7865 emit this insn before the call (and in fact before argument
7866 relocating. */
7867 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7868 NULL);
7870 /* Now delete the delay insn. */
7871 SET_INSN_DELETED (NEXT_INSN (insn));
7872 delay_insn_deleted = 1;
7875 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7877 /* This is the best sequence for making long calls in
7878 non-pic code. Unfortunately, GNU ld doesn't provide
7879 the stub needed for external calls, and GAS's support
7880 for this with the SOM linker is buggy. It is safe
7881 to use this for local calls. */
7882 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7883 if (sibcall)
7884 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7885 else
7887 if (TARGET_PA_20)
7888 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7889 xoperands);
7890 else
7891 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7893 output_asm_insn ("copy %%r31,%%r2", xoperands);
7894 delay_slot_filled = 1;
7897 else
7899 if (TARGET_LONG_PIC_SDIFF_CALL)
7901 /* The HP assembler and linker can handle relocations
7902 for the difference of two symbols. The HP assembler
7903 recognizes the sequence as a pc-relative call and
7904 the linker provides stubs when needed. */
7905 xoperands[1] = gen_label_rtx ();
7906 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7907 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7908 targetm.asm_out.internal_label (asm_out_file, "L",
7909 CODE_LABEL_NUMBER (xoperands[1]));
7910 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7912 else if (TARGET_GAS && !TARGET_SOM
7913 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7915 /* GAS currently can't generate the relocations that
7916 are needed for the SOM linker under HP-UX using this
7917 sequence. The GNU linker doesn't generate the stubs
7918 that are needed for external calls on TARGET_ELF32
7919 with this sequence. For now, we have to use a
7920 longer plabel sequence when using GAS. */
7921 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7922 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7923 xoperands);
7924 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7925 xoperands);
7927 else
7929 /* Emit a long plabel-based call sequence. This is
7930 essentially an inline implementation of $$dyncall.
7931 We don't actually try to call $$dyncall as this is
7932 as difficult as calling the function itself. */
7933 xoperands[0] = pa_get_deferred_plabel (call_dest);
7934 xoperands[1] = gen_label_rtx ();
7936 /* Since the call is indirect, FP arguments in registers
7937 need to be copied to the general registers. Then, the
7938 argument relocation stub will copy them back. */
7939 if (TARGET_SOM)
7940 copy_fp_args (insn);
7942 if (flag_pic)
7944 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7945 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7946 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7948 else
7950 output_asm_insn ("addil LR'%0-$global$,%%r27",
7951 xoperands);
7952 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7953 xoperands);
7956 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7957 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7958 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7959 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7961 if (!sibcall && !TARGET_PA_20)
7963 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7964 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
7965 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7966 else
7967 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7971 if (TARGET_PA_20)
7973 if (sibcall)
7974 output_asm_insn ("bve (%%r1)", xoperands);
7975 else
7977 if (indirect_call)
7979 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7980 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
7981 delay_slot_filled = 1;
7983 else
7984 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7987 else
7989 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7990 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
7991 xoperands);
7993 if (sibcall)
7995 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
7996 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
7997 else
7998 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
8000 else
8002 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8003 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
8004 else
8005 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
8007 if (indirect_call)
8008 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
8009 else
8010 output_asm_insn ("copy %%r31,%%r2", xoperands);
8011 delay_slot_filled = 1;
8018 if (!delay_slot_filled && (seq_length == 0 || delay_insn_deleted))
8019 output_asm_insn ("nop", xoperands);
8021 /* We are done if there isn't a jump in the delay slot. */
8022 if (seq_length == 0
8023 || delay_insn_deleted
8024 || ! JUMP_P (NEXT_INSN (insn)))
8025 return "";
8027 /* A sibcall should never have a branch in the delay slot. */
8028 gcc_assert (!sibcall);
8030 /* This call has an unconditional jump in its delay slot. */
8031 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
8033 if (!delay_slot_filled && INSN_ADDRESSES_SET_P ())
8035 /* See if the return address can be adjusted. Use the containing
8036 sequence insn's address. This would break the regular call/return@
8037 relationship assumed by the table based eh unwinder, so only do that
8038 if the call is not possibly throwing. */
8039 rtx seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
8040 int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
8041 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
8043 if (VAL_14_BITS_P (distance)
8044 && !(can_throw_internal (insn) || can_throw_external (insn)))
8046 xoperands[1] = gen_label_rtx ();
8047 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands);
8048 targetm.asm_out.internal_label (asm_out_file, "L",
8049 CODE_LABEL_NUMBER (xoperands[1]));
8051 else
8052 output_asm_insn ("nop\n\tb,n %0", xoperands);
8054 else
8055 output_asm_insn ("b,n %0", xoperands);
8057 /* Delete the jump. */
8058 SET_INSN_DELETED (NEXT_INSN (insn));
8060 return "";
8063 /* Return the attribute length of the indirect call instruction INSN.
8064 The length must match the code generated by output_indirect call.
8065 The returned length includes the delay slot. Currently, the delay
8066 slot of an indirect call sequence is not exposed and it is used by
8067 the sequence itself. */
8070 pa_attr_length_indirect_call (rtx insn)
8072 unsigned long distance = -1;
8073 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
8075 if (INSN_ADDRESSES_SET_P ())
8077 distance = (total + insn_current_reference_address (insn));
8078 if (distance < total)
8079 distance = -1;
8082 if (TARGET_64BIT)
8083 return 12;
8085 if (TARGET_FAST_INDIRECT_CALLS
8086 || (!TARGET_LONG_CALLS
8087 && !TARGET_PORTABLE_RUNTIME
8088 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
8089 || distance < MAX_PCREL17F_OFFSET)))
8090 return 8;
8092 if (flag_pic)
8093 return 20;
8095 if (TARGET_PORTABLE_RUNTIME)
8096 return 16;
8098 /* Out of reach, can use ble. */
8099 return 12;
8102 const char *
8103 pa_output_indirect_call (rtx insn, rtx call_dest)
8105 rtx xoperands[1];
8107 if (TARGET_64BIT)
8109 xoperands[0] = call_dest;
8110 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
8111 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
8112 return "";
8115 /* First the special case for kernels, level 0 systems, etc. */
8116 if (TARGET_FAST_INDIRECT_CALLS)
8117 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8119 /* Now the normal case -- we can reach $$dyncall directly or
8120 we're sure that we can get there via a long-branch stub.
8122 No need to check target flags as the length uniquely identifies
8123 the remaining cases. */
8124 if (pa_attr_length_indirect_call (insn) == 8)
8126 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
8127 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
8128 variant of the B,L instruction can't be used on the SOM target. */
8129 if (TARGET_PA_20 && !TARGET_SOM)
8130 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
8131 else
8132 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
8135 /* Long millicode call, but we are not generating PIC or portable runtime
8136 code. */
8137 if (pa_attr_length_indirect_call (insn) == 12)
8138 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
8140 /* Long millicode call for portable runtime. */
8141 if (pa_attr_length_indirect_call (insn) == 16)
8142 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)";
8144 /* We need a long PIC call to $$dyncall. */
8145 xoperands[0] = NULL_RTX;
8146 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
8147 if (TARGET_SOM || !TARGET_GAS)
8149 xoperands[0] = gen_label_rtx ();
8150 output_asm_insn ("addil L'$$dyncall-%0,%%r2", xoperands);
8151 targetm.asm_out.internal_label (asm_out_file, "L",
8152 CODE_LABEL_NUMBER (xoperands[0]));
8153 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
8155 else
8157 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r2", xoperands);
8158 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
8159 xoperands);
8161 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8162 output_asm_insn ("ldo 12(%%r2),%%r2", xoperands);
8163 return "";
8166 /* In HPUX 8.0's shared library scheme, special relocations are needed
8167 for function labels if they might be passed to a function
8168 in a shared library (because shared libraries don't live in code
8169 space), and special magic is needed to construct their address. */
8171 void
8172 pa_encode_label (rtx sym)
8174 const char *str = XSTR (sym, 0);
8175 int len = strlen (str) + 1;
8176 char *newstr, *p;
8178 p = newstr = XALLOCAVEC (char, len + 1);
8179 *p++ = '@';
8180 strcpy (p, str);
8182 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
8185 static void
8186 pa_encode_section_info (tree decl, rtx rtl, int first)
8188 int old_referenced = 0;
8190 if (!first && MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
8191 old_referenced
8192 = SYMBOL_REF_FLAGS (XEXP (rtl, 0)) & SYMBOL_FLAG_REFERENCED;
8194 default_encode_section_info (decl, rtl, first);
8196 if (first && TEXT_SPACE_P (decl))
8198 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
8199 if (TREE_CODE (decl) == FUNCTION_DECL)
8200 pa_encode_label (XEXP (rtl, 0));
8202 else if (old_referenced)
8203 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= old_referenced;
8206 /* This is sort of inverse to pa_encode_section_info. */
8208 static const char *
8209 pa_strip_name_encoding (const char *str)
8211 str += (*str == '@');
8212 str += (*str == '*');
8213 return str;
8216 /* Returns 1 if OP is a function label involved in a simple addition
8217 with a constant. Used to keep certain patterns from matching
8218 during instruction combination. */
8220 pa_is_function_label_plus_const (rtx op)
8222 /* Strip off any CONST. */
8223 if (GET_CODE (op) == CONST)
8224 op = XEXP (op, 0);
8226 return (GET_CODE (op) == PLUS
8227 && function_label_operand (XEXP (op, 0), VOIDmode)
8228 && GET_CODE (XEXP (op, 1)) == CONST_INT);
8231 /* Output assembly code for a thunk to FUNCTION. */
8233 static void
8234 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
8235 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
8236 tree function)
8238 static unsigned int current_thunk_number;
8239 int val_14 = VAL_14_BITS_P (delta);
8240 unsigned int old_last_address = last_address, nbytes = 0;
8241 char label[16];
8242 rtx xoperands[4];
8244 xoperands[0] = XEXP (DECL_RTL (function), 0);
8245 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
8246 xoperands[2] = GEN_INT (delta);
8248 final_start_function (emit_barrier (), file, 1);
8250 /* Output the thunk. We know that the function is in the same
8251 translation unit (i.e., the same space) as the thunk, and that
8252 thunks are output after their method. Thus, we don't need an
8253 external branch to reach the function. With SOM and GAS,
8254 functions and thunks are effectively in different sections.
8255 Thus, we can always use a IA-relative branch and the linker
8256 will add a long branch stub if necessary.
8258 However, we have to be careful when generating PIC code on the
8259 SOM port to ensure that the sequence does not transfer to an
8260 import stub for the target function as this could clobber the
8261 return value saved at SP-24. This would also apply to the
8262 32-bit linux port if the multi-space model is implemented. */
8263 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8264 && !(flag_pic && TREE_PUBLIC (function))
8265 && (TARGET_GAS || last_address < 262132))
8266 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8267 && ((targetm_common.have_named_sections
8268 && DECL_SECTION_NAME (thunk_fndecl) != NULL
8269 /* The GNU 64-bit linker has rather poor stub management.
8270 So, we use a long branch from thunks that aren't in
8271 the same section as the target function. */
8272 && ((!TARGET_64BIT
8273 && (DECL_SECTION_NAME (thunk_fndecl)
8274 != DECL_SECTION_NAME (function)))
8275 || ((DECL_SECTION_NAME (thunk_fndecl)
8276 == DECL_SECTION_NAME (function))
8277 && last_address < 262132)))
8278 || (targetm_common.have_named_sections
8279 && DECL_SECTION_NAME (thunk_fndecl) == NULL
8280 && DECL_SECTION_NAME (function) == NULL
8281 && last_address < 262132)
8282 || (!targetm_common.have_named_sections
8283 && last_address < 262132))))
8285 if (!val_14)
8286 output_asm_insn ("addil L'%2,%%r26", xoperands);
8288 output_asm_insn ("b %0", xoperands);
8290 if (val_14)
8292 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8293 nbytes += 8;
8295 else
8297 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8298 nbytes += 12;
8301 else if (TARGET_64BIT)
8303 /* We only have one call-clobbered scratch register, so we can't
8304 make use of the delay slot if delta doesn't fit in 14 bits. */
8305 if (!val_14)
8307 output_asm_insn ("addil L'%2,%%r26", xoperands);
8308 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8311 output_asm_insn ("b,l .+8,%%r1", xoperands);
8313 if (TARGET_GAS)
8315 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8316 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
8318 else
8320 xoperands[3] = GEN_INT (val_14 ? 8 : 16);
8321 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands);
8324 if (val_14)
8326 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8327 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8328 nbytes += 20;
8330 else
8332 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
8333 nbytes += 24;
8336 else if (TARGET_PORTABLE_RUNTIME)
8338 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8339 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
8341 if (!val_14)
8342 output_asm_insn ("addil L'%2,%%r26", xoperands);
8344 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8346 if (val_14)
8348 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8349 nbytes += 16;
8351 else
8353 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8354 nbytes += 20;
8357 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8359 /* The function is accessible from outside this module. The only
8360 way to avoid an import stub between the thunk and function is to
8361 call the function directly with an indirect sequence similar to
8362 that used by $$dyncall. This is possible because $$dyncall acts
8363 as the import stub in an indirect call. */
8364 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
8365 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8366 output_asm_insn ("addil LT'%3,%%r19", xoperands);
8367 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
8368 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8369 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
8370 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
8371 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
8372 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8374 if (!val_14)
8376 output_asm_insn ("addil L'%2,%%r26", xoperands);
8377 nbytes += 4;
8380 if (TARGET_PA_20)
8382 output_asm_insn ("bve (%%r22)", xoperands);
8383 nbytes += 36;
8385 else if (TARGET_NO_SPACE_REGS)
8387 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
8388 nbytes += 36;
8390 else
8392 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
8393 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
8394 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
8395 nbytes += 44;
8398 if (val_14)
8399 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8400 else
8401 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8403 else if (flag_pic)
8405 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
8407 if (TARGET_SOM || !TARGET_GAS)
8409 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands);
8410 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands);
8412 else
8414 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8415 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands);
8418 if (!val_14)
8419 output_asm_insn ("addil L'%2,%%r26", xoperands);
8421 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8423 if (val_14)
8425 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8426 nbytes += 20;
8428 else
8430 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8431 nbytes += 24;
8434 else
8436 if (!val_14)
8437 output_asm_insn ("addil L'%2,%%r26", xoperands);
8439 output_asm_insn ("ldil L'%0,%%r22", xoperands);
8440 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
8442 if (val_14)
8444 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8445 nbytes += 12;
8447 else
8449 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8450 nbytes += 16;
8454 final_end_function ();
8456 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8458 switch_to_section (data_section);
8459 output_asm_insn (".align 4", xoperands);
8460 ASM_OUTPUT_LABEL (file, label);
8461 output_asm_insn (".word P'%0", xoperands);
8464 current_thunk_number++;
8465 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8466 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8467 last_address += nbytes;
8468 if (old_last_address > last_address)
8469 last_address = UINT_MAX;
8470 update_total_code_bytes (nbytes);
8473 /* Only direct calls to static functions are allowed to be sibling (tail)
8474 call optimized.
8476 This restriction is necessary because some linker generated stubs will
8477 store return pointers into rp' in some cases which might clobber a
8478 live value already in rp'.
8480 In a sibcall the current function and the target function share stack
8481 space. Thus if the path to the current function and the path to the
8482 target function save a value in rp', they save the value into the
8483 same stack slot, which has undesirable consequences.
8485 Because of the deferred binding nature of shared libraries any function
8486 with external scope could be in a different load module and thus require
8487 rp' to be saved when calling that function. So sibcall optimizations
8488 can only be safe for static function.
8490 Note that GCC never needs return value relocations, so we don't have to
8491 worry about static calls with return value relocations (which require
8492 saving rp').
8494 It is safe to perform a sibcall optimization when the target function
8495 will never return. */
8496 static bool
8497 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8499 if (TARGET_PORTABLE_RUNTIME)
8500 return false;
8502 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
8503 single subspace mode and the call is not indirect. As far as I know,
8504 there is no operating system support for the multiple subspace mode.
8505 It might be possible to support indirect calls if we didn't use
8506 $$dyncall (see the indirect sequence generated in pa_output_call). */
8507 if (TARGET_ELF32)
8508 return (decl != NULL_TREE);
8510 /* Sibcalls are not ok because the arg pointer register is not a fixed
8511 register. This prevents the sibcall optimization from occurring. In
8512 addition, there are problems with stub placement using GNU ld. This
8513 is because a normal sibcall branch uses a 17-bit relocation while
8514 a regular call branch uses a 22-bit relocation. As a result, more
8515 care needs to be taken in the placement of long-branch stubs. */
8516 if (TARGET_64BIT)
8517 return false;
8519 /* Sibcalls are only ok within a translation unit. */
8520 return (decl && !TREE_PUBLIC (decl));
8523 /* ??? Addition is not commutative on the PA due to the weird implicit
8524 space register selection rules for memory addresses. Therefore, we
8525 don't consider a + b == b + a, as this might be inside a MEM. */
8526 static bool
8527 pa_commutative_p (const_rtx x, int outer_code)
8529 return (COMMUTATIVE_P (x)
8530 && (TARGET_NO_SPACE_REGS
8531 || (outer_code != UNKNOWN && outer_code != MEM)
8532 || GET_CODE (x) != PLUS));
8535 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8536 use in fmpyadd instructions. */
8538 pa_fmpyaddoperands (rtx *operands)
8540 enum machine_mode mode = GET_MODE (operands[0]);
8542 /* Must be a floating point mode. */
8543 if (mode != SFmode && mode != DFmode)
8544 return 0;
8546 /* All modes must be the same. */
8547 if (! (mode == GET_MODE (operands[1])
8548 && mode == GET_MODE (operands[2])
8549 && mode == GET_MODE (operands[3])
8550 && mode == GET_MODE (operands[4])
8551 && mode == GET_MODE (operands[5])))
8552 return 0;
8554 /* All operands must be registers. */
8555 if (! (GET_CODE (operands[1]) == REG
8556 && GET_CODE (operands[2]) == REG
8557 && GET_CODE (operands[3]) == REG
8558 && GET_CODE (operands[4]) == REG
8559 && GET_CODE (operands[5]) == REG))
8560 return 0;
8562 /* Only 2 real operands to the addition. One of the input operands must
8563 be the same as the output operand. */
8564 if (! rtx_equal_p (operands[3], operands[4])
8565 && ! rtx_equal_p (operands[3], operands[5]))
8566 return 0;
8568 /* Inout operand of add cannot conflict with any operands from multiply. */
8569 if (rtx_equal_p (operands[3], operands[0])
8570 || rtx_equal_p (operands[3], operands[1])
8571 || rtx_equal_p (operands[3], operands[2]))
8572 return 0;
8574 /* multiply cannot feed into addition operands. */
8575 if (rtx_equal_p (operands[4], operands[0])
8576 || rtx_equal_p (operands[5], operands[0]))
8577 return 0;
8579 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8580 if (mode == SFmode
8581 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8582 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8583 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8584 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8585 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8586 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8587 return 0;
8589 /* Passed. Operands are suitable for fmpyadd. */
8590 return 1;
8593 #if !defined(USE_COLLECT2)
8594 static void
8595 pa_asm_out_constructor (rtx symbol, int priority)
8597 if (!function_label_operand (symbol, VOIDmode))
8598 pa_encode_label (symbol);
8600 #ifdef CTORS_SECTION_ASM_OP
8601 default_ctor_section_asm_out_constructor (symbol, priority);
8602 #else
8603 # ifdef TARGET_ASM_NAMED_SECTION
8604 default_named_section_asm_out_constructor (symbol, priority);
8605 # else
8606 default_stabs_asm_out_constructor (symbol, priority);
8607 # endif
8608 #endif
8611 static void
8612 pa_asm_out_destructor (rtx symbol, int priority)
8614 if (!function_label_operand (symbol, VOIDmode))
8615 pa_encode_label (symbol);
8617 #ifdef DTORS_SECTION_ASM_OP
8618 default_dtor_section_asm_out_destructor (symbol, priority);
8619 #else
8620 # ifdef TARGET_ASM_NAMED_SECTION
8621 default_named_section_asm_out_destructor (symbol, priority);
8622 # else
8623 default_stabs_asm_out_destructor (symbol, priority);
8624 # endif
8625 #endif
8627 #endif
8629 /* This function places uninitialized global data in the bss section.
8630 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8631 function on the SOM port to prevent uninitialized global data from
8632 being placed in the data section. */
8634 void
8635 pa_asm_output_aligned_bss (FILE *stream,
8636 const char *name,
8637 unsigned HOST_WIDE_INT size,
8638 unsigned int align)
8640 switch_to_section (bss_section);
8641 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8643 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8644 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8645 #endif
8647 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8648 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8649 #endif
8651 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8652 ASM_OUTPUT_LABEL (stream, name);
8653 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8656 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8657 that doesn't allow the alignment of global common storage to be directly
8658 specified. The SOM linker aligns common storage based on the rounded
8659 value of the NUM_BYTES parameter in the .comm directive. It's not
8660 possible to use the .align directive as it doesn't affect the alignment
8661 of the label associated with a .comm directive. */
8663 void
8664 pa_asm_output_aligned_common (FILE *stream,
8665 const char *name,
8666 unsigned HOST_WIDE_INT size,
8667 unsigned int align)
8669 unsigned int max_common_align;
8671 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8672 if (align > max_common_align)
8674 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8675 "for global common data. Using %u",
8676 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8677 align = max_common_align;
8680 switch_to_section (bss_section);
8682 assemble_name (stream, name);
8683 fprintf (stream, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8684 MAX (size, align / BITS_PER_UNIT));
8687 /* We can't use .comm for local common storage as the SOM linker effectively
8688 treats the symbol as universal and uses the same storage for local symbols
8689 with the same name in different object files. The .block directive
8690 reserves an uninitialized block of storage. However, it's not common
8691 storage. Fortunately, GCC never requests common storage with the same
8692 name in any given translation unit. */
8694 void
8695 pa_asm_output_aligned_local (FILE *stream,
8696 const char *name,
8697 unsigned HOST_WIDE_INT size,
8698 unsigned int align)
8700 switch_to_section (bss_section);
8701 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8703 #ifdef LOCAL_ASM_OP
8704 fprintf (stream, "%s", LOCAL_ASM_OP);
8705 assemble_name (stream, name);
8706 fprintf (stream, "\n");
8707 #endif
8709 ASM_OUTPUT_LABEL (stream, name);
8710 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8713 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8714 use in fmpysub instructions. */
8716 pa_fmpysuboperands (rtx *operands)
8718 enum machine_mode mode = GET_MODE (operands[0]);
8720 /* Must be a floating point mode. */
8721 if (mode != SFmode && mode != DFmode)
8722 return 0;
8724 /* All modes must be the same. */
8725 if (! (mode == GET_MODE (operands[1])
8726 && mode == GET_MODE (operands[2])
8727 && mode == GET_MODE (operands[3])
8728 && mode == GET_MODE (operands[4])
8729 && mode == GET_MODE (operands[5])))
8730 return 0;
8732 /* All operands must be registers. */
8733 if (! (GET_CODE (operands[1]) == REG
8734 && GET_CODE (operands[2]) == REG
8735 && GET_CODE (operands[3]) == REG
8736 && GET_CODE (operands[4]) == REG
8737 && GET_CODE (operands[5]) == REG))
8738 return 0;
8740 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8741 operation, so operands[4] must be the same as operand[3]. */
8742 if (! rtx_equal_p (operands[3], operands[4]))
8743 return 0;
8745 /* multiply cannot feed into subtraction. */
8746 if (rtx_equal_p (operands[5], operands[0]))
8747 return 0;
8749 /* Inout operand of sub cannot conflict with any operands from multiply. */
8750 if (rtx_equal_p (operands[3], operands[0])
8751 || rtx_equal_p (operands[3], operands[1])
8752 || rtx_equal_p (operands[3], operands[2]))
8753 return 0;
8755 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8756 if (mode == SFmode
8757 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8758 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8759 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8760 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8761 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8762 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8763 return 0;
8765 /* Passed. Operands are suitable for fmpysub. */
8766 return 1;
8769 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8770 constants for shadd instructions. */
8772 pa_shadd_constant_p (int val)
8774 if (val == 2 || val == 4 || val == 8)
8775 return 1;
8776 else
8777 return 0;
8780 /* Return TRUE if INSN branches forward. */
8782 static bool
8783 forward_branch_p (rtx insn)
8785 rtx lab = JUMP_LABEL (insn);
8787 /* The INSN must have a jump label. */
8788 gcc_assert (lab != NULL_RTX);
8790 if (INSN_ADDRESSES_SET_P ())
8791 return INSN_ADDRESSES (INSN_UID (lab)) > INSN_ADDRESSES (INSN_UID (insn));
8793 while (insn)
8795 if (insn == lab)
8796 return true;
8797 else
8798 insn = NEXT_INSN (insn);
8801 return false;
8804 /* Return 1 if INSN is in the delay slot of a call instruction. */
8806 pa_jump_in_call_delay (rtx insn)
8809 if (! JUMP_P (insn))
8810 return 0;
8812 if (PREV_INSN (insn)
8813 && PREV_INSN (PREV_INSN (insn))
8814 && NONJUMP_INSN_P (next_real_insn (PREV_INSN (PREV_INSN (insn)))))
8816 rtx test_insn = next_real_insn (PREV_INSN (PREV_INSN (insn)));
8818 return (GET_CODE (PATTERN (test_insn)) == SEQUENCE
8819 && XVECEXP (PATTERN (test_insn), 0, 1) == insn);
8822 else
8823 return 0;
8826 /* Output an unconditional move and branch insn. */
8828 const char *
8829 pa_output_parallel_movb (rtx *operands, rtx insn)
8831 int length = get_attr_length (insn);
8833 /* These are the cases in which we win. */
8834 if (length == 4)
8835 return "mov%I1b,tr %1,%0,%2";
8837 /* None of the following cases win, but they don't lose either. */
8838 if (length == 8)
8840 if (dbr_sequence_length () == 0)
8842 /* Nothing in the delay slot, fake it by putting the combined
8843 insn (the copy or add) in the delay slot of a bl. */
8844 if (GET_CODE (operands[1]) == CONST_INT)
8845 return "b %2\n\tldi %1,%0";
8846 else
8847 return "b %2\n\tcopy %1,%0";
8849 else
8851 /* Something in the delay slot, but we've got a long branch. */
8852 if (GET_CODE (operands[1]) == CONST_INT)
8853 return "ldi %1,%0\n\tb %2";
8854 else
8855 return "copy %1,%0\n\tb %2";
8859 if (GET_CODE (operands[1]) == CONST_INT)
8860 output_asm_insn ("ldi %1,%0", operands);
8861 else
8862 output_asm_insn ("copy %1,%0", operands);
8863 return pa_output_lbranch (operands[2], insn, 1);
8866 /* Output an unconditional add and branch insn. */
8868 const char *
8869 pa_output_parallel_addb (rtx *operands, rtx insn)
8871 int length = get_attr_length (insn);
8873 /* To make life easy we want operand0 to be the shared input/output
8874 operand and operand1 to be the readonly operand. */
8875 if (operands[0] == operands[1])
8876 operands[1] = operands[2];
8878 /* These are the cases in which we win. */
8879 if (length == 4)
8880 return "add%I1b,tr %1,%0,%3";
8882 /* None of the following cases win, but they don't lose either. */
8883 if (length == 8)
8885 if (dbr_sequence_length () == 0)
8886 /* Nothing in the delay slot, fake it by putting the combined
8887 insn (the copy or add) in the delay slot of a bl. */
8888 return "b %3\n\tadd%I1 %1,%0,%0";
8889 else
8890 /* Something in the delay slot, but we've got a long branch. */
8891 return "add%I1 %1,%0,%0\n\tb %3";
8894 output_asm_insn ("add%I1 %1,%0,%0", operands);
8895 return pa_output_lbranch (operands[3], insn, 1);
8898 /* Return nonzero if INSN (a jump insn) immediately follows a call
8899 to a named function. This is used to avoid filling the delay slot
8900 of the jump since it can usually be eliminated by modifying RP in
8901 the delay slot of the call. */
8904 pa_following_call (rtx insn)
8906 if (! TARGET_JUMP_IN_DELAY)
8907 return 0;
8909 /* Find the previous real insn, skipping NOTEs. */
8910 insn = PREV_INSN (insn);
8911 while (insn && NOTE_P (insn))
8912 insn = PREV_INSN (insn);
8914 /* Check for CALL_INSNs and millicode calls. */
8915 if (insn
8916 && ((CALL_P (insn)
8917 && get_attr_type (insn) != TYPE_DYNCALL)
8918 || (NONJUMP_INSN_P (insn)
8919 && GET_CODE (PATTERN (insn)) != SEQUENCE
8920 && GET_CODE (PATTERN (insn)) != USE
8921 && GET_CODE (PATTERN (insn)) != CLOBBER
8922 && get_attr_type (insn) == TYPE_MILLI)))
8923 return 1;
8925 return 0;
8928 /* We use this hook to perform a PA specific optimization which is difficult
8929 to do in earlier passes.
8931 We surround the jump table itself with BEGIN_BRTAB and END_BRTAB
8932 insns. Those insns mark where we should emit .begin_brtab and
8933 .end_brtab directives when using GAS. This allows for better link
8934 time optimizations. */
8936 static void
8937 pa_reorg (void)
8939 rtx insn;
8941 remove_useless_addtr_insns (1);
8943 if (pa_cpu < PROCESSOR_8000)
8944 pa_combine_instructions ();
8946 /* Still need brtab marker insns. FIXME: the presence of these
8947 markers disables output of the branch table to readonly memory,
8948 and any alignment directives that might be needed. Possibly,
8949 the begin_brtab insn should be output before the label for the
8950 table. This doesn't matter at the moment since the tables are
8951 always output in the text section. */
8952 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8954 /* Find an ADDR_VEC insn. */
8955 if (! JUMP_TABLE_DATA_P (insn))
8956 continue;
8958 /* Now generate markers for the beginning and end of the
8959 branch table. */
8960 emit_insn_before (gen_begin_brtab (), insn);
8961 emit_insn_after (gen_end_brtab (), insn);
8965 /* The PA has a number of odd instructions which can perform multiple
8966 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8967 it may be profitable to combine two instructions into one instruction
8968 with two outputs. It's not profitable PA2.0 machines because the
8969 two outputs would take two slots in the reorder buffers.
8971 This routine finds instructions which can be combined and combines
8972 them. We only support some of the potential combinations, and we
8973 only try common ways to find suitable instructions.
8975 * addb can add two registers or a register and a small integer
8976 and jump to a nearby (+-8k) location. Normally the jump to the
8977 nearby location is conditional on the result of the add, but by
8978 using the "true" condition we can make the jump unconditional.
8979 Thus addb can perform two independent operations in one insn.
8981 * movb is similar to addb in that it can perform a reg->reg
8982 or small immediate->reg copy and jump to a nearby (+-8k location).
8984 * fmpyadd and fmpysub can perform a FP multiply and either an
8985 FP add or FP sub if the operands of the multiply and add/sub are
8986 independent (there are other minor restrictions). Note both
8987 the fmpy and fadd/fsub can in theory move to better spots according
8988 to data dependencies, but for now we require the fmpy stay at a
8989 fixed location.
8991 * Many of the memory operations can perform pre & post updates
8992 of index registers. GCC's pre/post increment/decrement addressing
8993 is far too simple to take advantage of all the possibilities. This
8994 pass may not be suitable since those insns may not be independent.
8996 * comclr can compare two ints or an int and a register, nullify
8997 the following instruction and zero some other register. This
8998 is more difficult to use as it's harder to find an insn which
8999 will generate a comclr than finding something like an unconditional
9000 branch. (conditional moves & long branches create comclr insns).
9002 * Most arithmetic operations can conditionally skip the next
9003 instruction. They can be viewed as "perform this operation
9004 and conditionally jump to this nearby location" (where nearby
9005 is an insns away). These are difficult to use due to the
9006 branch length restrictions. */
9008 static void
9009 pa_combine_instructions (void)
9011 rtx anchor, new_rtx;
9013 /* This can get expensive since the basic algorithm is on the
9014 order of O(n^2) (or worse). Only do it for -O2 or higher
9015 levels of optimization. */
9016 if (optimize < 2)
9017 return;
9019 /* Walk down the list of insns looking for "anchor" insns which
9020 may be combined with "floating" insns. As the name implies,
9021 "anchor" instructions don't move, while "floating" insns may
9022 move around. */
9023 new_rtx = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
9024 new_rtx = make_insn_raw (new_rtx);
9026 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
9028 enum attr_pa_combine_type anchor_attr;
9029 enum attr_pa_combine_type floater_attr;
9031 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
9032 Also ignore any special USE insns. */
9033 if ((! NONJUMP_INSN_P (anchor) && ! JUMP_P (anchor) && ! CALL_P (anchor))
9034 || GET_CODE (PATTERN (anchor)) == USE
9035 || GET_CODE (PATTERN (anchor)) == CLOBBER)
9036 continue;
9038 anchor_attr = get_attr_pa_combine_type (anchor);
9039 /* See if anchor is an insn suitable for combination. */
9040 if (anchor_attr == PA_COMBINE_TYPE_FMPY
9041 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
9042 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9043 && ! forward_branch_p (anchor)))
9045 rtx floater;
9047 for (floater = PREV_INSN (anchor);
9048 floater;
9049 floater = PREV_INSN (floater))
9051 if (NOTE_P (floater)
9052 || (NONJUMP_INSN_P (floater)
9053 && (GET_CODE (PATTERN (floater)) == USE
9054 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9055 continue;
9057 /* Anything except a regular INSN will stop our search. */
9058 if (! NONJUMP_INSN_P (floater))
9060 floater = NULL_RTX;
9061 break;
9064 /* See if FLOATER is suitable for combination with the
9065 anchor. */
9066 floater_attr = get_attr_pa_combine_type (floater);
9067 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9068 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9069 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9070 && floater_attr == PA_COMBINE_TYPE_FMPY))
9072 /* If ANCHOR and FLOATER can be combined, then we're
9073 done with this pass. */
9074 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9075 SET_DEST (PATTERN (floater)),
9076 XEXP (SET_SRC (PATTERN (floater)), 0),
9077 XEXP (SET_SRC (PATTERN (floater)), 1)))
9078 break;
9081 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9082 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
9084 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
9086 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9087 SET_DEST (PATTERN (floater)),
9088 XEXP (SET_SRC (PATTERN (floater)), 0),
9089 XEXP (SET_SRC (PATTERN (floater)), 1)))
9090 break;
9092 else
9094 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9095 SET_DEST (PATTERN (floater)),
9096 SET_SRC (PATTERN (floater)),
9097 SET_SRC (PATTERN (floater))))
9098 break;
9103 /* If we didn't find anything on the backwards scan try forwards. */
9104 if (!floater
9105 && (anchor_attr == PA_COMBINE_TYPE_FMPY
9106 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
9108 for (floater = anchor; floater; floater = NEXT_INSN (floater))
9110 if (NOTE_P (floater)
9111 || (NONJUMP_INSN_P (floater)
9112 && (GET_CODE (PATTERN (floater)) == USE
9113 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9115 continue;
9117 /* Anything except a regular INSN will stop our search. */
9118 if (! NONJUMP_INSN_P (floater))
9120 floater = NULL_RTX;
9121 break;
9124 /* See if FLOATER is suitable for combination with the
9125 anchor. */
9126 floater_attr = get_attr_pa_combine_type (floater);
9127 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9128 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9129 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9130 && floater_attr == PA_COMBINE_TYPE_FMPY))
9132 /* If ANCHOR and FLOATER can be combined, then we're
9133 done with this pass. */
9134 if (pa_can_combine_p (new_rtx, anchor, floater, 1,
9135 SET_DEST (PATTERN (floater)),
9136 XEXP (SET_SRC (PATTERN (floater)),
9138 XEXP (SET_SRC (PATTERN (floater)),
9139 1)))
9140 break;
9145 /* FLOATER will be nonzero if we found a suitable floating
9146 insn for combination with ANCHOR. */
9147 if (floater
9148 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9149 || anchor_attr == PA_COMBINE_TYPE_FMPY))
9151 /* Emit the new instruction and delete the old anchor. */
9152 emit_insn_before (gen_rtx_PARALLEL
9153 (VOIDmode,
9154 gen_rtvec (2, PATTERN (anchor),
9155 PATTERN (floater))),
9156 anchor);
9158 SET_INSN_DELETED (anchor);
9160 /* Emit a special USE insn for FLOATER, then delete
9161 the floating insn. */
9162 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9163 delete_insn (floater);
9165 continue;
9167 else if (floater
9168 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
9170 rtx temp;
9171 /* Emit the new_jump instruction and delete the old anchor. */
9172 temp
9173 = emit_jump_insn_before (gen_rtx_PARALLEL
9174 (VOIDmode,
9175 gen_rtvec (2, PATTERN (anchor),
9176 PATTERN (floater))),
9177 anchor);
9179 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
9180 SET_INSN_DELETED (anchor);
9182 /* Emit a special USE insn for FLOATER, then delete
9183 the floating insn. */
9184 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9185 delete_insn (floater);
9186 continue;
9192 static int
9193 pa_can_combine_p (rtx new_rtx, rtx anchor, rtx floater, int reversed, rtx dest,
9194 rtx src1, rtx src2)
9196 int insn_code_number;
9197 rtx start, end;
9199 /* Create a PARALLEL with the patterns of ANCHOR and
9200 FLOATER, try to recognize it, then test constraints
9201 for the resulting pattern.
9203 If the pattern doesn't match or the constraints
9204 aren't met keep searching for a suitable floater
9205 insn. */
9206 XVECEXP (PATTERN (new_rtx), 0, 0) = PATTERN (anchor);
9207 XVECEXP (PATTERN (new_rtx), 0, 1) = PATTERN (floater);
9208 INSN_CODE (new_rtx) = -1;
9209 insn_code_number = recog_memoized (new_rtx);
9210 if (insn_code_number < 0
9211 || (extract_insn (new_rtx), ! constrain_operands (1)))
9212 return 0;
9214 if (reversed)
9216 start = anchor;
9217 end = floater;
9219 else
9221 start = floater;
9222 end = anchor;
9225 /* There's up to three operands to consider. One
9226 output and two inputs.
9228 The output must not be used between FLOATER & ANCHOR
9229 exclusive. The inputs must not be set between
9230 FLOATER and ANCHOR exclusive. */
9232 if (reg_used_between_p (dest, start, end))
9233 return 0;
9235 if (reg_set_between_p (src1, start, end))
9236 return 0;
9238 if (reg_set_between_p (src2, start, end))
9239 return 0;
9241 /* If we get here, then everything is good. */
9242 return 1;
9245 /* Return nonzero if references for INSN are delayed.
9247 Millicode insns are actually function calls with some special
9248 constraints on arguments and register usage.
9250 Millicode calls always expect their arguments in the integer argument
9251 registers, and always return their result in %r29 (ret1). They
9252 are expected to clobber their arguments, %r1, %r29, and the return
9253 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9255 This function tells reorg that the references to arguments and
9256 millicode calls do not appear to happen until after the millicode call.
9257 This allows reorg to put insns which set the argument registers into the
9258 delay slot of the millicode call -- thus they act more like traditional
9259 CALL_INSNs.
9261 Note we cannot consider side effects of the insn to be delayed because
9262 the branch and link insn will clobber the return pointer. If we happened
9263 to use the return pointer in the delay slot of the call, then we lose.
9265 get_attr_type will try to recognize the given insn, so make sure to
9266 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9267 in particular. */
9269 pa_insn_refs_are_delayed (rtx insn)
9271 return ((NONJUMP_INSN_P (insn)
9272 && GET_CODE (PATTERN (insn)) != SEQUENCE
9273 && GET_CODE (PATTERN (insn)) != USE
9274 && GET_CODE (PATTERN (insn)) != CLOBBER
9275 && get_attr_type (insn) == TYPE_MILLI));
9278 /* Promote the return value, but not the arguments. */
9280 static enum machine_mode
9281 pa_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
9282 enum machine_mode mode,
9283 int *punsignedp ATTRIBUTE_UNUSED,
9284 const_tree fntype ATTRIBUTE_UNUSED,
9285 int for_return)
9287 if (for_return == 0)
9288 return mode;
9289 return promote_mode (type, mode, punsignedp);
9292 /* On the HP-PA the value is found in register(s) 28(-29), unless
9293 the mode is SF or DF. Then the value is returned in fr4 (32).
9295 This must perform the same promotions as PROMOTE_MODE, else promoting
9296 return values in TARGET_PROMOTE_FUNCTION_MODE will not work correctly.
9298 Small structures must be returned in a PARALLEL on PA64 in order
9299 to match the HP Compiler ABI. */
9301 static rtx
9302 pa_function_value (const_tree valtype,
9303 const_tree func ATTRIBUTE_UNUSED,
9304 bool outgoing ATTRIBUTE_UNUSED)
9306 enum machine_mode valmode;
9308 if (AGGREGATE_TYPE_P (valtype)
9309 || TREE_CODE (valtype) == COMPLEX_TYPE
9310 || TREE_CODE (valtype) == VECTOR_TYPE)
9312 if (TARGET_64BIT)
9314 /* Aggregates with a size less than or equal to 128 bits are
9315 returned in GR 28(-29). They are left justified. The pad
9316 bits are undefined. Larger aggregates are returned in
9317 memory. */
9318 rtx loc[2];
9319 int i, offset = 0;
9320 int ub = int_size_in_bytes (valtype) <= UNITS_PER_WORD ? 1 : 2;
9322 for (i = 0; i < ub; i++)
9324 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9325 gen_rtx_REG (DImode, 28 + i),
9326 GEN_INT (offset));
9327 offset += 8;
9330 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9332 else if (int_size_in_bytes (valtype) > UNITS_PER_WORD)
9334 /* Aggregates 5 to 8 bytes in size are returned in general
9335 registers r28-r29 in the same manner as other non
9336 floating-point objects. The data is right-justified and
9337 zero-extended to 64 bits. This is opposite to the normal
9338 justification used on big endian targets and requires
9339 special treatment. */
9340 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9341 gen_rtx_REG (DImode, 28), const0_rtx);
9342 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9346 if ((INTEGRAL_TYPE_P (valtype)
9347 && GET_MODE_BITSIZE (TYPE_MODE (valtype)) < BITS_PER_WORD)
9348 || POINTER_TYPE_P (valtype))
9349 valmode = word_mode;
9350 else
9351 valmode = TYPE_MODE (valtype);
9353 if (TREE_CODE (valtype) == REAL_TYPE
9354 && !AGGREGATE_TYPE_P (valtype)
9355 && TYPE_MODE (valtype) != TFmode
9356 && !TARGET_SOFT_FLOAT)
9357 return gen_rtx_REG (valmode, 32);
9359 return gen_rtx_REG (valmode, 28);
9362 /* Implement the TARGET_LIBCALL_VALUE hook. */
9364 static rtx
9365 pa_libcall_value (enum machine_mode mode,
9366 const_rtx fun ATTRIBUTE_UNUSED)
9368 if (! TARGET_SOFT_FLOAT
9369 && (mode == SFmode || mode == DFmode))
9370 return gen_rtx_REG (mode, 32);
9371 else
9372 return gen_rtx_REG (mode, 28);
9375 /* Implement the TARGET_FUNCTION_VALUE_REGNO_P hook. */
9377 static bool
9378 pa_function_value_regno_p (const unsigned int regno)
9380 if (regno == 28
9381 || (! TARGET_SOFT_FLOAT && regno == 32))
9382 return true;
9384 return false;
9387 /* Update the data in CUM to advance over an argument
9388 of mode MODE and data type TYPE.
9389 (TYPE is null for libcalls where that information may not be available.) */
9391 static void
9392 pa_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
9393 const_tree type, bool named ATTRIBUTE_UNUSED)
9395 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9396 int arg_size = FUNCTION_ARG_SIZE (mode, type);
9398 cum->nargs_prototype--;
9399 cum->words += (arg_size
9400 + ((cum->words & 01)
9401 && type != NULL_TREE
9402 && arg_size > 1));
9405 /* Return the location of a parameter that is passed in a register or NULL
9406 if the parameter has any component that is passed in memory.
9408 This is new code and will be pushed to into the net sources after
9409 further testing.
9411 ??? We might want to restructure this so that it looks more like other
9412 ports. */
9413 static rtx
9414 pa_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
9415 const_tree type, bool named ATTRIBUTE_UNUSED)
9417 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9418 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9419 int alignment = 0;
9420 int arg_size;
9421 int fpr_reg_base;
9422 int gpr_reg_base;
9423 rtx retval;
9425 if (mode == VOIDmode)
9426 return NULL_RTX;
9428 arg_size = FUNCTION_ARG_SIZE (mode, type);
9430 /* If this arg would be passed partially or totally on the stack, then
9431 this routine should return zero. pa_arg_partial_bytes will
9432 handle arguments which are split between regs and stack slots if
9433 the ABI mandates split arguments. */
9434 if (!TARGET_64BIT)
9436 /* The 32-bit ABI does not split arguments. */
9437 if (cum->words + arg_size > max_arg_words)
9438 return NULL_RTX;
9440 else
9442 if (arg_size > 1)
9443 alignment = cum->words & 1;
9444 if (cum->words + alignment >= max_arg_words)
9445 return NULL_RTX;
9448 /* The 32bit ABIs and the 64bit ABIs are rather different,
9449 particularly in their handling of FP registers. We might
9450 be able to cleverly share code between them, but I'm not
9451 going to bother in the hope that splitting them up results
9452 in code that is more easily understood. */
9454 if (TARGET_64BIT)
9456 /* Advance the base registers to their current locations.
9458 Remember, gprs grow towards smaller register numbers while
9459 fprs grow to higher register numbers. Also remember that
9460 although FP regs are 32-bit addressable, we pretend that
9461 the registers are 64-bits wide. */
9462 gpr_reg_base = 26 - cum->words;
9463 fpr_reg_base = 32 + cum->words;
9465 /* Arguments wider than one word and small aggregates need special
9466 treatment. */
9467 if (arg_size > 1
9468 || mode == BLKmode
9469 || (type && (AGGREGATE_TYPE_P (type)
9470 || TREE_CODE (type) == COMPLEX_TYPE
9471 || TREE_CODE (type) == VECTOR_TYPE)))
9473 /* Double-extended precision (80-bit), quad-precision (128-bit)
9474 and aggregates including complex numbers are aligned on
9475 128-bit boundaries. The first eight 64-bit argument slots
9476 are associated one-to-one, with general registers r26
9477 through r19, and also with floating-point registers fr4
9478 through fr11. Arguments larger than one word are always
9479 passed in general registers.
9481 Using a PARALLEL with a word mode register results in left
9482 justified data on a big-endian target. */
9484 rtx loc[8];
9485 int i, offset = 0, ub = arg_size;
9487 /* Align the base register. */
9488 gpr_reg_base -= alignment;
9490 ub = MIN (ub, max_arg_words - cum->words - alignment);
9491 for (i = 0; i < ub; i++)
9493 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9494 gen_rtx_REG (DImode, gpr_reg_base),
9495 GEN_INT (offset));
9496 gpr_reg_base -= 1;
9497 offset += 8;
9500 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9503 else
9505 /* If the argument is larger than a word, then we know precisely
9506 which registers we must use. */
9507 if (arg_size > 1)
9509 if (cum->words)
9511 gpr_reg_base = 23;
9512 fpr_reg_base = 38;
9514 else
9516 gpr_reg_base = 25;
9517 fpr_reg_base = 34;
9520 /* Structures 5 to 8 bytes in size are passed in the general
9521 registers in the same manner as other non floating-point
9522 objects. The data is right-justified and zero-extended
9523 to 64 bits. This is opposite to the normal justification
9524 used on big endian targets and requires special treatment.
9525 We now define BLOCK_REG_PADDING to pad these objects.
9526 Aggregates, complex and vector types are passed in the same
9527 manner as structures. */
9528 if (mode == BLKmode
9529 || (type && (AGGREGATE_TYPE_P (type)
9530 || TREE_CODE (type) == COMPLEX_TYPE
9531 || TREE_CODE (type) == VECTOR_TYPE)))
9533 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9534 gen_rtx_REG (DImode, gpr_reg_base),
9535 const0_rtx);
9536 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9539 else
9541 /* We have a single word (32 bits). A simple computation
9542 will get us the register #s we need. */
9543 gpr_reg_base = 26 - cum->words;
9544 fpr_reg_base = 32 + 2 * cum->words;
9548 /* Determine if the argument needs to be passed in both general and
9549 floating point registers. */
9550 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9551 /* If we are doing soft-float with portable runtime, then there
9552 is no need to worry about FP regs. */
9553 && !TARGET_SOFT_FLOAT
9554 /* The parameter must be some kind of scalar float, else we just
9555 pass it in integer registers. */
9556 && GET_MODE_CLASS (mode) == MODE_FLOAT
9557 /* The target function must not have a prototype. */
9558 && cum->nargs_prototype <= 0
9559 /* libcalls do not need to pass items in both FP and general
9560 registers. */
9561 && type != NULL_TREE
9562 /* All this hair applies to "outgoing" args only. This includes
9563 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9564 && !cum->incoming)
9565 /* Also pass outgoing floating arguments in both registers in indirect
9566 calls with the 32 bit ABI and the HP assembler since there is no
9567 way to the specify argument locations in static functions. */
9568 || (!TARGET_64BIT
9569 && !TARGET_GAS
9570 && !cum->incoming
9571 && cum->indirect
9572 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9574 retval
9575 = gen_rtx_PARALLEL
9576 (mode,
9577 gen_rtvec (2,
9578 gen_rtx_EXPR_LIST (VOIDmode,
9579 gen_rtx_REG (mode, fpr_reg_base),
9580 const0_rtx),
9581 gen_rtx_EXPR_LIST (VOIDmode,
9582 gen_rtx_REG (mode, gpr_reg_base),
9583 const0_rtx)));
9585 else
9587 /* See if we should pass this parameter in a general register. */
9588 if (TARGET_SOFT_FLOAT
9589 /* Indirect calls in the normal 32bit ABI require all arguments
9590 to be passed in general registers. */
9591 || (!TARGET_PORTABLE_RUNTIME
9592 && !TARGET_64BIT
9593 && !TARGET_ELF32
9594 && cum->indirect)
9595 /* If the parameter is not a scalar floating-point parameter,
9596 then it belongs in GPRs. */
9597 || GET_MODE_CLASS (mode) != MODE_FLOAT
9598 /* Structure with single SFmode field belongs in GPR. */
9599 || (type && AGGREGATE_TYPE_P (type)))
9600 retval = gen_rtx_REG (mode, gpr_reg_base);
9601 else
9602 retval = gen_rtx_REG (mode, fpr_reg_base);
9604 return retval;
9607 /* Arguments larger than one word are double word aligned. */
9609 static unsigned int
9610 pa_function_arg_boundary (enum machine_mode mode, const_tree type)
9612 bool singleword = (type
9613 ? (integer_zerop (TYPE_SIZE (type))
9614 || !TREE_CONSTANT (TYPE_SIZE (type))
9615 || int_size_in_bytes (type) <= UNITS_PER_WORD)
9616 : GET_MODE_SIZE (mode) <= UNITS_PER_WORD);
9618 return singleword ? PARM_BOUNDARY : MAX_PARM_BOUNDARY;
9621 /* If this arg would be passed totally in registers or totally on the stack,
9622 then this routine should return zero. */
9624 static int
9625 pa_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
9626 tree type, bool named ATTRIBUTE_UNUSED)
9628 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9629 unsigned int max_arg_words = 8;
9630 unsigned int offset = 0;
9632 if (!TARGET_64BIT)
9633 return 0;
9635 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9636 offset = 1;
9638 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9639 /* Arg fits fully into registers. */
9640 return 0;
9641 else if (cum->words + offset >= max_arg_words)
9642 /* Arg fully on the stack. */
9643 return 0;
9644 else
9645 /* Arg is split. */
9646 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9650 /* A get_unnamed_section callback for switching to the text section.
9652 This function is only used with SOM. Because we don't support
9653 named subspaces, we can only create a new subspace or switch back
9654 to the default text subspace. */
9656 static void
9657 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9659 gcc_assert (TARGET_SOM);
9660 if (TARGET_GAS)
9662 if (cfun && cfun->machine && !cfun->machine->in_nsubspa)
9664 /* We only want to emit a .nsubspa directive once at the
9665 start of the function. */
9666 cfun->machine->in_nsubspa = 1;
9668 /* Create a new subspace for the text. This provides
9669 better stub placement and one-only functions. */
9670 if (cfun->decl
9671 && DECL_ONE_ONLY (cfun->decl)
9672 && !DECL_WEAK (cfun->decl))
9674 output_section_asm_op ("\t.SPACE $TEXT$\n"
9675 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9676 "ACCESS=44,SORT=24,COMDAT");
9677 return;
9680 else
9682 /* There isn't a current function or the body of the current
9683 function has been completed. So, we are changing to the
9684 text section to output debugging information. Thus, we
9685 need to forget that we are in the text section so that
9686 varasm.c will call us when text_section is selected again. */
9687 gcc_assert (!cfun || !cfun->machine
9688 || cfun->machine->in_nsubspa == 2);
9689 in_section = NULL;
9691 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9692 return;
9694 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9697 /* A get_unnamed_section callback for switching to comdat data
9698 sections. This function is only used with SOM. */
9700 static void
9701 som_output_comdat_data_section_asm_op (const void *data)
9703 in_section = NULL;
9704 output_section_asm_op (data);
9707 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9709 static void
9710 pa_som_asm_init_sections (void)
9712 text_section
9713 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9715 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9716 is not being generated. */
9717 som_readonly_data_section
9718 = get_unnamed_section (0, output_section_asm_op,
9719 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9721 /* When secondary definitions are not supported, SOM makes readonly
9722 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9723 the comdat flag. */
9724 som_one_only_readonly_data_section
9725 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9726 "\t.SPACE $TEXT$\n"
9727 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9728 "ACCESS=0x2c,SORT=16,COMDAT");
9731 /* When secondary definitions are not supported, SOM makes data one-only
9732 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9733 som_one_only_data_section
9734 = get_unnamed_section (SECTION_WRITE,
9735 som_output_comdat_data_section_asm_op,
9736 "\t.SPACE $PRIVATE$\n"
9737 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9738 "ACCESS=31,SORT=24,COMDAT");
9740 if (flag_tm)
9741 som_tm_clone_table_section
9742 = get_unnamed_section (0, output_section_asm_op,
9743 "\t.SPACE $PRIVATE$\n\t.SUBSPA $TM_CLONE_TABLE$");
9745 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9746 which reference data within the $TEXT$ space (for example constant
9747 strings in the $LIT$ subspace).
9749 The assemblers (GAS and HP as) both have problems with handling
9750 the difference of two symbols which is the other correct way to
9751 reference constant data during PIC code generation.
9753 So, there's no way to reference constant data which is in the
9754 $TEXT$ space during PIC generation. Instead place all constant
9755 data into the $PRIVATE$ subspace (this reduces sharing, but it
9756 works correctly). */
9757 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9759 /* We must not have a reference to an external symbol defined in a
9760 shared library in a readonly section, else the SOM linker will
9761 complain.
9763 So, we force exception information into the data section. */
9764 exception_section = data_section;
9767 /* Implement TARGET_ASM_TM_CLONE_TABLE_SECTION. */
9769 static section *
9770 pa_som_tm_clone_table_section (void)
9772 return som_tm_clone_table_section;
9775 /* On hpux10, the linker will give an error if we have a reference
9776 in the read-only data section to a symbol defined in a shared
9777 library. Therefore, expressions that might require a reloc can
9778 not be placed in the read-only data section. */
9780 static section *
9781 pa_select_section (tree exp, int reloc,
9782 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9784 if (TREE_CODE (exp) == VAR_DECL
9785 && TREE_READONLY (exp)
9786 && !TREE_THIS_VOLATILE (exp)
9787 && DECL_INITIAL (exp)
9788 && (DECL_INITIAL (exp) == error_mark_node
9789 || TREE_CONSTANT (DECL_INITIAL (exp)))
9790 && !reloc)
9792 if (TARGET_SOM
9793 && DECL_ONE_ONLY (exp)
9794 && !DECL_WEAK (exp))
9795 return som_one_only_readonly_data_section;
9796 else
9797 return readonly_data_section;
9799 else if (CONSTANT_CLASS_P (exp) && !reloc)
9800 return readonly_data_section;
9801 else if (TARGET_SOM
9802 && TREE_CODE (exp) == VAR_DECL
9803 && DECL_ONE_ONLY (exp)
9804 && !DECL_WEAK (exp))
9805 return som_one_only_data_section;
9806 else
9807 return data_section;
9810 static void
9811 pa_globalize_label (FILE *stream, const char *name)
9813 /* We only handle DATA objects here, functions are globalized in
9814 ASM_DECLARE_FUNCTION_NAME. */
9815 if (! FUNCTION_NAME_P (name))
9817 fputs ("\t.EXPORT ", stream);
9818 assemble_name (stream, name);
9819 fputs (",DATA\n", stream);
9823 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9825 static rtx
9826 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9827 int incoming ATTRIBUTE_UNUSED)
9829 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9832 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9834 bool
9835 pa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9837 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9838 PA64 ABI says that objects larger than 128 bits are returned in memory.
9839 Note, int_size_in_bytes can return -1 if the size of the object is
9840 variable or larger than the maximum value that can be expressed as
9841 a HOST_WIDE_INT. It can also return zero for an empty type. The
9842 simplest way to handle variable and empty types is to pass them in
9843 memory. This avoids problems in defining the boundaries of argument
9844 slots, allocating registers, etc. */
9845 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9846 || int_size_in_bytes (type) <= 0);
9849 /* Structure to hold declaration and name of external symbols that are
9850 emitted by GCC. We generate a vector of these symbols and output them
9851 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9852 This avoids putting out names that are never really used. */
9854 typedef struct GTY(()) extern_symbol
9856 tree decl;
9857 const char *name;
9858 } extern_symbol;
9860 /* Define gc'd vector type for extern_symbol. */
9862 /* Vector of extern_symbol pointers. */
9863 static GTY(()) vec<extern_symbol, va_gc> *extern_symbols;
9865 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9866 /* Mark DECL (name NAME) as an external reference (assembler output
9867 file FILE). This saves the names to output at the end of the file
9868 if actually referenced. */
9870 void
9871 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9873 gcc_assert (file == asm_out_file);
9874 extern_symbol p = {decl, name};
9875 vec_safe_push (extern_symbols, p);
9878 /* Output text required at the end of an assembler file.
9879 This includes deferred plabels and .import directives for
9880 all external symbols that were actually referenced. */
9882 static void
9883 pa_hpux_file_end (void)
9885 unsigned int i;
9886 extern_symbol *p;
9888 if (!NO_DEFERRED_PROFILE_COUNTERS)
9889 output_deferred_profile_counters ();
9891 output_deferred_plabels ();
9893 for (i = 0; vec_safe_iterate (extern_symbols, i, &p); i++)
9895 tree decl = p->decl;
9897 if (!TREE_ASM_WRITTEN (decl)
9898 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
9899 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
9902 vec_free (extern_symbols);
9904 #endif
9906 /* Return true if a change from mode FROM to mode TO for a register
9907 in register class RCLASS is invalid. */
9909 bool
9910 pa_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
9911 enum reg_class rclass)
9913 if (from == to)
9914 return false;
9916 /* Reject changes to/from complex and vector modes. */
9917 if (COMPLEX_MODE_P (from) || VECTOR_MODE_P (from)
9918 || COMPLEX_MODE_P (to) || VECTOR_MODE_P (to))
9919 return true;
9921 if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to))
9922 return false;
9924 /* There is no way to load QImode or HImode values directly from
9925 memory. SImode loads to the FP registers are not zero extended.
9926 On the 64-bit target, this conflicts with the definition of
9927 LOAD_EXTEND_OP. Thus, we can't allow changing between modes
9928 with different sizes in the floating-point registers. */
9929 if (MAYBE_FP_REG_CLASS_P (rclass))
9930 return true;
9932 /* HARD_REGNO_MODE_OK places modes with sizes larger than a word
9933 in specific sets of registers. Thus, we cannot allow changing
9934 to a larger mode when it's larger than a word. */
9935 if (GET_MODE_SIZE (to) > UNITS_PER_WORD
9936 && GET_MODE_SIZE (to) > GET_MODE_SIZE (from))
9937 return true;
9939 return false;
9942 /* Returns TRUE if it is a good idea to tie two pseudo registers
9943 when one has mode MODE1 and one has mode MODE2.
9944 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
9945 for any hard reg, then this must be FALSE for correct output.
9947 We should return FALSE for QImode and HImode because these modes
9948 are not ok in the floating-point registers. However, this prevents
9949 tieing these modes to SImode and DImode in the general registers.
9950 So, this isn't a good idea. We rely on HARD_REGNO_MODE_OK and
9951 CANNOT_CHANGE_MODE_CLASS to prevent these modes from being used
9952 in the floating-point registers. */
9954 bool
9955 pa_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
9957 /* Don't tie modes in different classes. */
9958 if (GET_MODE_CLASS (mode1) != GET_MODE_CLASS (mode2))
9959 return false;
9961 return true;
9965 /* Length in units of the trampoline instruction code. */
9967 #define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 32 : 40))
9970 /* Output assembler code for a block containing the constant parts
9971 of a trampoline, leaving space for the variable parts.\
9973 The trampoline sets the static chain pointer to STATIC_CHAIN_REGNUM
9974 and then branches to the specified routine.
9976 This code template is copied from text segment to stack location
9977 and then patched with pa_trampoline_init to contain valid values,
9978 and then entered as a subroutine.
9980 It is best to keep this as small as possible to avoid having to
9981 flush multiple lines in the cache. */
9983 static void
9984 pa_asm_trampoline_template (FILE *f)
9986 if (!TARGET_64BIT)
9988 fputs ("\tldw 36(%r22),%r21\n", f);
9989 fputs ("\tbb,>=,n %r21,30,.+16\n", f);
9990 if (ASSEMBLER_DIALECT == 0)
9991 fputs ("\tdepi 0,31,2,%r21\n", f);
9992 else
9993 fputs ("\tdepwi 0,31,2,%r21\n", f);
9994 fputs ("\tldw 4(%r21),%r19\n", f);
9995 fputs ("\tldw 0(%r21),%r21\n", f);
9996 if (TARGET_PA_20)
9998 fputs ("\tbve (%r21)\n", f);
9999 fputs ("\tldw 40(%r22),%r29\n", f);
10000 fputs ("\t.word 0\n", f);
10001 fputs ("\t.word 0\n", f);
10003 else
10005 fputs ("\tldsid (%r21),%r1\n", f);
10006 fputs ("\tmtsp %r1,%sr0\n", f);
10007 fputs ("\tbe 0(%sr0,%r21)\n", f);
10008 fputs ("\tldw 40(%r22),%r29\n", f);
10010 fputs ("\t.word 0\n", f);
10011 fputs ("\t.word 0\n", f);
10012 fputs ("\t.word 0\n", f);
10013 fputs ("\t.word 0\n", f);
10015 else
10017 fputs ("\t.dword 0\n", f);
10018 fputs ("\t.dword 0\n", f);
10019 fputs ("\t.dword 0\n", f);
10020 fputs ("\t.dword 0\n", f);
10021 fputs ("\tmfia %r31\n", f);
10022 fputs ("\tldd 24(%r31),%r1\n", f);
10023 fputs ("\tldd 24(%r1),%r27\n", f);
10024 fputs ("\tldd 16(%r1),%r1\n", f);
10025 fputs ("\tbve (%r1)\n", f);
10026 fputs ("\tldd 32(%r31),%r31\n", f);
10027 fputs ("\t.dword 0 ; fptr\n", f);
10028 fputs ("\t.dword 0 ; static link\n", f);
10032 /* Emit RTL insns to initialize the variable parts of a trampoline.
10033 FNADDR is an RTX for the address of the function's pure code.
10034 CXT is an RTX for the static chain value for the function.
10036 Move the function address to the trampoline template at offset 36.
10037 Move the static chain value to trampoline template at offset 40.
10038 Move the trampoline address to trampoline template at offset 44.
10039 Move r19 to trampoline template at offset 48. The latter two
10040 words create a plabel for the indirect call to the trampoline.
10042 A similar sequence is used for the 64-bit port but the plabel is
10043 at the beginning of the trampoline.
10045 Finally, the cache entries for the trampoline code are flushed.
10046 This is necessary to ensure that the trampoline instruction sequence
10047 is written to memory prior to any attempts at prefetching the code
10048 sequence. */
10050 static void
10051 pa_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
10053 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
10054 rtx start_addr = gen_reg_rtx (Pmode);
10055 rtx end_addr = gen_reg_rtx (Pmode);
10056 rtx line_length = gen_reg_rtx (Pmode);
10057 rtx r_tramp, tmp;
10059 emit_block_move (m_tramp, assemble_trampoline_template (),
10060 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
10061 r_tramp = force_reg (Pmode, XEXP (m_tramp, 0));
10063 if (!TARGET_64BIT)
10065 tmp = adjust_address (m_tramp, Pmode, 36);
10066 emit_move_insn (tmp, fnaddr);
10067 tmp = adjust_address (m_tramp, Pmode, 40);
10068 emit_move_insn (tmp, chain_value);
10070 /* Create a fat pointer for the trampoline. */
10071 tmp = adjust_address (m_tramp, Pmode, 44);
10072 emit_move_insn (tmp, r_tramp);
10073 tmp = adjust_address (m_tramp, Pmode, 48);
10074 emit_move_insn (tmp, gen_rtx_REG (Pmode, 19));
10076 /* fdc and fic only use registers for the address to flush,
10077 they do not accept integer displacements. We align the
10078 start and end addresses to the beginning of their respective
10079 cache lines to minimize the number of lines flushed. */
10080 emit_insn (gen_andsi3 (start_addr, r_tramp,
10081 GEN_INT (-MIN_CACHELINE_SIZE)));
10082 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp,
10083 TRAMPOLINE_CODE_SIZE-1));
10084 emit_insn (gen_andsi3 (end_addr, tmp,
10085 GEN_INT (-MIN_CACHELINE_SIZE)));
10086 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10087 emit_insn (gen_dcacheflushsi (start_addr, end_addr, line_length));
10088 emit_insn (gen_icacheflushsi (start_addr, end_addr, line_length,
10089 gen_reg_rtx (Pmode),
10090 gen_reg_rtx (Pmode)));
10092 else
10094 tmp = adjust_address (m_tramp, Pmode, 56);
10095 emit_move_insn (tmp, fnaddr);
10096 tmp = adjust_address (m_tramp, Pmode, 64);
10097 emit_move_insn (tmp, chain_value);
10099 /* Create a fat pointer for the trampoline. */
10100 tmp = adjust_address (m_tramp, Pmode, 16);
10101 emit_move_insn (tmp, force_reg (Pmode, plus_constant (Pmode,
10102 r_tramp, 32)));
10103 tmp = adjust_address (m_tramp, Pmode, 24);
10104 emit_move_insn (tmp, gen_rtx_REG (Pmode, 27));
10106 /* fdc and fic only use registers for the address to flush,
10107 they do not accept integer displacements. We align the
10108 start and end addresses to the beginning of their respective
10109 cache lines to minimize the number of lines flushed. */
10110 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp, 32));
10111 emit_insn (gen_anddi3 (start_addr, tmp,
10112 GEN_INT (-MIN_CACHELINE_SIZE)));
10113 tmp = force_reg (Pmode, plus_constant (Pmode, tmp,
10114 TRAMPOLINE_CODE_SIZE - 1));
10115 emit_insn (gen_anddi3 (end_addr, tmp,
10116 GEN_INT (-MIN_CACHELINE_SIZE)));
10117 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10118 emit_insn (gen_dcacheflushdi (start_addr, end_addr, line_length));
10119 emit_insn (gen_icacheflushdi (start_addr, end_addr, line_length,
10120 gen_reg_rtx (Pmode),
10121 gen_reg_rtx (Pmode)));
10124 #ifdef HAVE_ENABLE_EXECUTE_STACK
10125  emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
10126      LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
10127 #endif
10130 /* Perform any machine-specific adjustment in the address of the trampoline.
10131 ADDR contains the address that was passed to pa_trampoline_init.
10132 Adjust the trampoline address to point to the plabel at offset 44. */
10134 static rtx
10135 pa_trampoline_adjust_address (rtx addr)
10137 if (!TARGET_64BIT)
10138 addr = memory_address (Pmode, plus_constant (Pmode, addr, 46));
10139 return addr;
10142 static rtx
10143 pa_delegitimize_address (rtx orig_x)
10145 rtx x = delegitimize_mem_from_attrs (orig_x);
10147 if (GET_CODE (x) == LO_SUM
10148 && GET_CODE (XEXP (x, 1)) == UNSPEC
10149 && XINT (XEXP (x, 1), 1) == UNSPEC_DLTIND14R)
10150 return gen_const_mem (Pmode, XVECEXP (XEXP (x, 1), 0, 0));
10151 return x;
10154 static rtx
10155 pa_internal_arg_pointer (void)
10157 /* The argument pointer and the hard frame pointer are the same in
10158 the 32-bit runtime, so we don't need a copy. */
10159 if (TARGET_64BIT)
10160 return copy_to_reg (virtual_incoming_args_rtx);
10161 else
10162 return virtual_incoming_args_rtx;
10165 /* Given FROM and TO register numbers, say whether this elimination is allowed.
10166 Frame pointer elimination is automatically handled. */
10168 static bool
10169 pa_can_eliminate (const int from, const int to)
10171 /* The argument cannot be eliminated in the 64-bit runtime. */
10172 if (TARGET_64BIT && from == ARG_POINTER_REGNUM)
10173 return false;
10175 return (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
10176 ? ! frame_pointer_needed
10177 : true);
10180 /* Define the offset between two registers, FROM to be eliminated and its
10181 replacement TO, at the start of a routine. */
10182 HOST_WIDE_INT
10183 pa_initial_elimination_offset (int from, int to)
10185 HOST_WIDE_INT offset;
10187 if ((from == HARD_FRAME_POINTER_REGNUM || from == FRAME_POINTER_REGNUM)
10188 && to == STACK_POINTER_REGNUM)
10189 offset = -pa_compute_frame_size (get_frame_size (), 0);
10190 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10191 offset = 0;
10192 else
10193 gcc_unreachable ();
10195 return offset;
10198 static void
10199 pa_conditional_register_usage (void)
10201 int i;
10203 if (!TARGET_64BIT && !TARGET_PA_11)
10205 for (i = 56; i <= FP_REG_LAST; i++)
10206 fixed_regs[i] = call_used_regs[i] = 1;
10207 for (i = 33; i < 56; i += 2)
10208 fixed_regs[i] = call_used_regs[i] = 1;
10210 if (TARGET_DISABLE_FPREGS || TARGET_SOFT_FLOAT)
10212 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
10213 fixed_regs[i] = call_used_regs[i] = 1;
10215 if (flag_pic)
10216 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10219 /* Target hook for c_mode_for_suffix. */
10221 static enum machine_mode
10222 pa_c_mode_for_suffix (char suffix)
10224 if (HPUX_LONG_DOUBLE_LIBRARY)
10226 if (suffix == 'q')
10227 return TFmode;
10230 return VOIDmode;
10233 /* Target hook for function_section. */
10235 static section *
10236 pa_function_section (tree decl, enum node_frequency freq,
10237 bool startup, bool exit)
10239 /* Put functions in text section if target doesn't have named sections. */
10240 if (!targetm_common.have_named_sections)
10241 return text_section;
10243 /* Force nested functions into the same section as the containing
10244 function. */
10245 if (decl
10246 && DECL_SECTION_NAME (decl) == NULL
10247 && DECL_CONTEXT (decl) != NULL_TREE
10248 && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL
10249 && DECL_SECTION_NAME (DECL_CONTEXT (decl)) == NULL)
10250 return function_section (DECL_CONTEXT (decl));
10252 /* Otherwise, use the default function section. */
10253 return default_function_section (decl, freq, startup, exit);
10256 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
10258 In 64-bit mode, we reject CONST_DOUBLES. We also reject CONST_INTS
10259 that need more than three instructions to load prior to reload. This
10260 limit is somewhat arbitrary. It takes three instructions to load a
10261 CONST_INT from memory but two are memory accesses. It may be better
10262 to increase the allowed range for CONST_INTS. We may also be able
10263 to handle CONST_DOUBLES. */
10265 static bool
10266 pa_legitimate_constant_p (enum machine_mode mode, rtx x)
10268 if (GET_MODE_CLASS (mode) == MODE_FLOAT && x != CONST0_RTX (mode))
10269 return false;
10271 if (!NEW_HP_ASSEMBLER && !TARGET_GAS && GET_CODE (x) == LABEL_REF)
10272 return false;
10274 /* TLS_MODEL_GLOBAL_DYNAMIC and TLS_MODEL_LOCAL_DYNAMIC are not
10275 legitimate constants. The other variants can't be handled by
10276 the move patterns after reload starts. */
10277 if (tls_referenced_p (x))
10278 return false;
10280 if (TARGET_64BIT && GET_CODE (x) == CONST_DOUBLE)
10281 return false;
10283 if (TARGET_64BIT
10284 && HOST_BITS_PER_WIDE_INT > 32
10285 && GET_CODE (x) == CONST_INT
10286 && !reload_in_progress
10287 && !reload_completed
10288 && !LEGITIMATE_64BIT_CONST_INT_P (INTVAL (x))
10289 && !pa_cint_ok_for_move (INTVAL (x)))
10290 return false;
10292 if (function_label_operand (x, mode))
10293 return false;
10295 return true;
10298 /* Implement TARGET_SECTION_TYPE_FLAGS. */
10300 static unsigned int
10301 pa_section_type_flags (tree decl, const char *name, int reloc)
10303 unsigned int flags;
10305 flags = default_section_type_flags (decl, name, reloc);
10307 /* Function labels are placed in the constant pool. This can
10308 cause a section conflict if decls are put in ".data.rel.ro"
10309 or ".data.rel.ro.local" using the __attribute__ construct. */
10310 if (strcmp (name, ".data.rel.ro") == 0
10311 || strcmp (name, ".data.rel.ro.local") == 0)
10312 flags |= SECTION_WRITE | SECTION_RELRO;
10314 return flags;
10317 /* pa_legitimate_address_p recognizes an RTL expression that is a
10318 valid memory address for an instruction. The MODE argument is the
10319 machine mode for the MEM expression that wants to use this address.
10321 On HP PA-RISC, the legitimate address forms are REG+SMALLINT,
10322 REG+REG, and REG+(REG*SCALE). The indexed address forms are only
10323 available with floating point loads and stores, and integer loads.
10324 We get better code by allowing indexed addresses in the initial
10325 RTL generation.
10327 The acceptance of indexed addresses as legitimate implies that we
10328 must provide patterns for doing indexed integer stores, or the move
10329 expanders must force the address of an indexed store to a register.
10330 We have adopted the latter approach.
10332 Another function of pa_legitimate_address_p is to ensure that
10333 the base register is a valid pointer for indexed instructions.
10334 On targets that have non-equivalent space registers, we have to
10335 know at the time of assembler output which register in a REG+REG
10336 pair is the base register. The REG_POINTER flag is sometimes lost
10337 in reload and the following passes, so it can't be relied on during
10338 code generation. Thus, we either have to canonicalize the order
10339 of the registers in REG+REG indexed addresses, or treat REG+REG
10340 addresses separately and provide patterns for both permutations.
10342 The latter approach requires several hundred additional lines of
10343 code in pa.md. The downside to canonicalizing is that a PLUS
10344 in the wrong order can't combine to form to make a scaled indexed
10345 memory operand. As we won't need to canonicalize the operands if
10346 the REG_POINTER lossage can be fixed, it seems better canonicalize.
10348 We initially break out scaled indexed addresses in canonical order
10349 in pa_emit_move_sequence. LEGITIMIZE_ADDRESS also canonicalizes
10350 scaled indexed addresses during RTL generation. However, fold_rtx
10351 has its own opinion on how the operands of a PLUS should be ordered.
10352 If one of the operands is equivalent to a constant, it will make
10353 that operand the second operand. As the base register is likely to
10354 be equivalent to a SYMBOL_REF, we have made it the second operand.
10356 pa_legitimate_address_p accepts REG+REG as legitimate when the
10357 operands are in the order INDEX+BASE on targets with non-equivalent
10358 space registers, and in any order on targets with equivalent space
10359 registers. It accepts both MULT+BASE and BASE+MULT for scaled indexing.
10361 We treat a SYMBOL_REF as legitimate if it is part of the current
10362 function's constant-pool, because such addresses can actually be
10363 output as REG+SMALLINT. */
10365 static bool
10366 pa_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
10368 if ((REG_P (x)
10369 && (strict ? STRICT_REG_OK_FOR_BASE_P (x)
10370 : REG_OK_FOR_BASE_P (x)))
10371 || ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_DEC
10372 || GET_CODE (x) == PRE_INC || GET_CODE (x) == POST_INC)
10373 && REG_P (XEXP (x, 0))
10374 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10375 : REG_OK_FOR_BASE_P (XEXP (x, 0)))))
10376 return true;
10378 if (GET_CODE (x) == PLUS)
10380 rtx base, index;
10382 /* For REG+REG, the base register should be in XEXP (x, 1),
10383 so check it first. */
10384 if (REG_P (XEXP (x, 1))
10385 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 1))
10386 : REG_OK_FOR_BASE_P (XEXP (x, 1))))
10387 base = XEXP (x, 1), index = XEXP (x, 0);
10388 else if (REG_P (XEXP (x, 0))
10389 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10390 : REG_OK_FOR_BASE_P (XEXP (x, 0))))
10391 base = XEXP (x, 0), index = XEXP (x, 1);
10392 else
10393 return false;
10395 if (GET_CODE (index) == CONST_INT)
10397 if (INT_5_BITS (index))
10398 return true;
10400 /* When INT14_OK_STRICT is false, a secondary reload is needed
10401 to adjust the displacement of SImode and DImode floating point
10402 instructions but this may fail when the register also needs
10403 reloading. So, we return false when STRICT is true. We
10404 also reject long displacements for float mode addresses since
10405 the majority of accesses will use floating point instructions
10406 that don't support 14-bit offsets. */
10407 if (!INT14_OK_STRICT
10408 && (strict || !(reload_in_progress || reload_completed))
10409 && mode != QImode
10410 && mode != HImode)
10411 return false;
10413 return base14_operand (index, mode);
10416 if (!TARGET_DISABLE_INDEXING
10417 /* Only accept the "canonical" INDEX+BASE operand order
10418 on targets with non-equivalent space registers. */
10419 && (TARGET_NO_SPACE_REGS
10420 ? REG_P (index)
10421 : (base == XEXP (x, 1) && REG_P (index)
10422 && (reload_completed
10423 || (reload_in_progress && HARD_REGISTER_P (base))
10424 || REG_POINTER (base))
10425 && (reload_completed
10426 || (reload_in_progress && HARD_REGISTER_P (index))
10427 || !REG_POINTER (index))))
10428 && MODE_OK_FOR_UNSCALED_INDEXING_P (mode)
10429 && (strict ? STRICT_REG_OK_FOR_INDEX_P (index)
10430 : REG_OK_FOR_INDEX_P (index))
10431 && borx_reg_operand (base, Pmode)
10432 && borx_reg_operand (index, Pmode))
10433 return true;
10435 if (!TARGET_DISABLE_INDEXING
10436 && GET_CODE (index) == MULT
10437 && MODE_OK_FOR_SCALED_INDEXING_P (mode)
10438 && REG_P (XEXP (index, 0))
10439 && GET_MODE (XEXP (index, 0)) == Pmode
10440 && (strict ? STRICT_REG_OK_FOR_INDEX_P (XEXP (index, 0))
10441 : REG_OK_FOR_INDEX_P (XEXP (index, 0)))
10442 && GET_CODE (XEXP (index, 1)) == CONST_INT
10443 && INTVAL (XEXP (index, 1))
10444 == (HOST_WIDE_INT) GET_MODE_SIZE (mode)
10445 && borx_reg_operand (base, Pmode))
10446 return true;
10448 return false;
10451 if (GET_CODE (x) == LO_SUM)
10453 rtx y = XEXP (x, 0);
10455 if (GET_CODE (y) == SUBREG)
10456 y = SUBREG_REG (y);
10458 if (REG_P (y)
10459 && (strict ? STRICT_REG_OK_FOR_BASE_P (y)
10460 : REG_OK_FOR_BASE_P (y)))
10462 /* Needed for -fPIC */
10463 if (mode == Pmode
10464 && GET_CODE (XEXP (x, 1)) == UNSPEC)
10465 return true;
10467 if (!INT14_OK_STRICT
10468 && (strict || !(reload_in_progress || reload_completed))
10469 && mode != QImode
10470 && mode != HImode)
10471 return false;
10473 if (CONSTANT_P (XEXP (x, 1)))
10474 return true;
10476 return false;
10479 if (GET_CODE (x) == CONST_INT && INT_5_BITS (x))
10480 return true;
10482 return false;
10485 /* Look for machine dependent ways to make the invalid address AD a
10486 valid address.
10488 For the PA, transform:
10490 memory(X + <large int>)
10492 into:
10494 if (<large int> & mask) >= 16
10495 Y = (<large int> & ~mask) + mask + 1 Round up.
10496 else
10497 Y = (<large int> & ~mask) Round down.
10498 Z = X + Y
10499 memory (Z + (<large int> - Y));
10501 This makes reload inheritance and reload_cse work better since Z
10502 can be reused.
10504 There may be more opportunities to improve code with this hook. */
10507 pa_legitimize_reload_address (rtx ad, enum machine_mode mode,
10508 int opnum, int type,
10509 int ind_levels ATTRIBUTE_UNUSED)
10511 long offset, newoffset, mask;
10512 rtx new_rtx, temp = NULL_RTX;
10514 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
10515 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
10517 if (optimize && GET_CODE (ad) == PLUS)
10518 temp = simplify_binary_operation (PLUS, Pmode,
10519 XEXP (ad, 0), XEXP (ad, 1));
10521 new_rtx = temp ? temp : ad;
10523 if (optimize
10524 && GET_CODE (new_rtx) == PLUS
10525 && GET_CODE (XEXP (new_rtx, 0)) == REG
10526 && GET_CODE (XEXP (new_rtx, 1)) == CONST_INT)
10528 offset = INTVAL (XEXP ((new_rtx), 1));
10530 /* Choose rounding direction. Round up if we are >= halfway. */
10531 if ((offset & mask) >= ((mask + 1) / 2))
10532 newoffset = (offset & ~mask) + mask + 1;
10533 else
10534 newoffset = offset & ~mask;
10536 /* Ensure that long displacements are aligned. */
10537 if (mask == 0x3fff
10538 && (GET_MODE_CLASS (mode) == MODE_FLOAT
10539 || (TARGET_64BIT && (mode) == DImode)))
10540 newoffset &= ~(GET_MODE_SIZE (mode) - 1);
10542 if (newoffset != 0 && VAL_14_BITS_P (newoffset))
10544 temp = gen_rtx_PLUS (Pmode, XEXP (new_rtx, 0),
10545 GEN_INT (newoffset));
10546 ad = gen_rtx_PLUS (Pmode, temp, GEN_INT (offset - newoffset));
10547 push_reload (XEXP (ad, 0), 0, &XEXP (ad, 0), 0,
10548 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
10549 opnum, (enum reload_type) type);
10550 return ad;
10554 return NULL_RTX;
10557 #include "gt-pa.h"