gcc/ada/
[official-gcc.git] / gcc / config / pa / pa.c
blob707272203cbe81795b3bea7b35b732ed515ad4c9
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992-2014 Free Software Foundation, Inc.
3 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
31 #include "flags.h"
32 #include "tree.h"
33 #include "stor-layout.h"
34 #include "stringpool.h"
35 #include "varasm.h"
36 #include "calls.h"
37 #include "output.h"
38 #include "dbxout.h"
39 #include "except.h"
40 #include "expr.h"
41 #include "optabs.h"
42 #include "reload.h"
43 #include "hashtab.h"
44 #include "hash-set.h"
45 #include "vec.h"
46 #include "machmode.h"
47 #include "input.h"
48 #include "function.h"
49 #include "diagnostic-core.h"
50 #include "ggc.h"
51 #include "recog.h"
52 #include "predict.h"
53 #include "tm_p.h"
54 #include "target.h"
55 #include "common/common-target.h"
56 #include "target-def.h"
57 #include "langhooks.h"
58 #include "dominance.h"
59 #include "cfg.h"
60 #include "cfgrtl.h"
61 #include "cfganal.h"
62 #include "lcm.h"
63 #include "cfgbuild.h"
64 #include "cfgcleanup.h"
65 #include "basic-block.h"
66 #include "df.h"
67 #include "opts.h"
68 #include "builtins.h"
70 /* Return nonzero if there is a bypass for the output of
71 OUT_INSN and the fp store IN_INSN. */
72 int
73 pa_fpstore_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
75 machine_mode store_mode;
76 machine_mode other_mode;
77 rtx set;
79 if (recog_memoized (in_insn) < 0
80 || (get_attr_type (in_insn) != TYPE_FPSTORE
81 && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD)
82 || recog_memoized (out_insn) < 0)
83 return 0;
85 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
87 set = single_set (out_insn);
88 if (!set)
89 return 0;
91 other_mode = GET_MODE (SET_SRC (set));
93 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
97 #ifndef DO_FRAME_NOTES
98 #ifdef INCOMING_RETURN_ADDR_RTX
99 #define DO_FRAME_NOTES 1
100 #else
101 #define DO_FRAME_NOTES 0
102 #endif
103 #endif
105 static void pa_option_override (void);
106 static void copy_reg_pointer (rtx, rtx);
107 static void fix_range (const char *);
108 static int hppa_register_move_cost (machine_mode mode, reg_class_t,
109 reg_class_t);
110 static int hppa_address_cost (rtx, machine_mode mode, addr_space_t, bool);
111 static bool hppa_rtx_costs (rtx, int, int, int, int *, bool);
112 static inline rtx force_mode (machine_mode, rtx);
113 static void pa_reorg (void);
114 static void pa_combine_instructions (void);
115 static int pa_can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, int, rtx,
116 rtx, rtx);
117 static bool forward_branch_p (rtx_insn *);
118 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
119 static void compute_zdepdi_operands (unsigned HOST_WIDE_INT, unsigned *);
120 static int compute_movmem_length (rtx);
121 static int compute_clrmem_length (rtx);
122 static bool pa_assemble_integer (rtx, unsigned int, int);
123 static void remove_useless_addtr_insns (int);
124 static void store_reg (int, HOST_WIDE_INT, int);
125 static void store_reg_modify (int, int, HOST_WIDE_INT);
126 static void load_reg (int, HOST_WIDE_INT, int);
127 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
128 static rtx pa_function_value (const_tree, const_tree, bool);
129 static rtx pa_libcall_value (machine_mode, const_rtx);
130 static bool pa_function_value_regno_p (const unsigned int);
131 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
132 static void update_total_code_bytes (unsigned int);
133 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
134 static int pa_adjust_cost (rtx_insn *, rtx, rtx_insn *, int);
135 static int pa_adjust_priority (rtx_insn *, int);
136 static int pa_issue_rate (void);
137 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
138 static section *pa_som_tm_clone_table_section (void) ATTRIBUTE_UNUSED;
139 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
140 ATTRIBUTE_UNUSED;
141 static void pa_encode_section_info (tree, rtx, int);
142 static const char *pa_strip_name_encoding (const char *);
143 static bool pa_function_ok_for_sibcall (tree, tree);
144 static void pa_globalize_label (FILE *, const char *)
145 ATTRIBUTE_UNUSED;
146 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
147 HOST_WIDE_INT, tree);
148 #if !defined(USE_COLLECT2)
149 static void pa_asm_out_constructor (rtx, int);
150 static void pa_asm_out_destructor (rtx, int);
151 #endif
152 static void pa_init_builtins (void);
153 static rtx pa_expand_builtin (tree, rtx, rtx, machine_mode mode, int);
154 static rtx hppa_builtin_saveregs (void);
155 static void hppa_va_start (tree, rtx);
156 static tree hppa_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
157 static bool pa_scalar_mode_supported_p (machine_mode);
158 static bool pa_commutative_p (const_rtx x, int outer_code);
159 static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
160 static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
161 static rtx hppa_legitimize_address (rtx, rtx, machine_mode);
162 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
163 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
164 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
165 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
166 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
167 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
168 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
169 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
170 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
171 static void output_deferred_plabels (void);
172 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
173 #ifdef ASM_OUTPUT_EXTERNAL_REAL
174 static void pa_hpux_file_end (void);
175 #endif
176 static void pa_init_libfuncs (void);
177 static rtx pa_struct_value_rtx (tree, int);
178 static bool pa_pass_by_reference (cumulative_args_t, machine_mode,
179 const_tree, bool);
180 static int pa_arg_partial_bytes (cumulative_args_t, machine_mode,
181 tree, bool);
182 static void pa_function_arg_advance (cumulative_args_t, machine_mode,
183 const_tree, bool);
184 static rtx pa_function_arg (cumulative_args_t, machine_mode,
185 const_tree, bool);
186 static unsigned int pa_function_arg_boundary (machine_mode, const_tree);
187 static struct machine_function * pa_init_machine_status (void);
188 static reg_class_t pa_secondary_reload (bool, rtx, reg_class_t,
189 machine_mode,
190 secondary_reload_info *);
191 static void pa_extra_live_on_entry (bitmap);
192 static machine_mode pa_promote_function_mode (const_tree,
193 machine_mode, int *,
194 const_tree, int);
196 static void pa_asm_trampoline_template (FILE *);
197 static void pa_trampoline_init (rtx, tree, rtx);
198 static rtx pa_trampoline_adjust_address (rtx);
199 static rtx pa_delegitimize_address (rtx);
200 static bool pa_print_operand_punct_valid_p (unsigned char);
201 static rtx pa_internal_arg_pointer (void);
202 static bool pa_can_eliminate (const int, const int);
203 static void pa_conditional_register_usage (void);
204 static machine_mode pa_c_mode_for_suffix (char);
205 static section *pa_function_section (tree, enum node_frequency, bool, bool);
206 static bool pa_cannot_force_const_mem (machine_mode, rtx);
207 static bool pa_legitimate_constant_p (machine_mode, rtx);
208 static unsigned int pa_section_type_flags (tree, const char *, int);
209 static bool pa_legitimate_address_p (machine_mode, rtx, bool);
211 /* The following extra sections are only used for SOM. */
212 static GTY(()) section *som_readonly_data_section;
213 static GTY(()) section *som_one_only_readonly_data_section;
214 static GTY(()) section *som_one_only_data_section;
215 static GTY(()) section *som_tm_clone_table_section;
217 /* Counts for the number of callee-saved general and floating point
218 registers which were saved by the current function's prologue. */
219 static int gr_saved, fr_saved;
221 /* Boolean indicating whether the return pointer was saved by the
222 current function's prologue. */
223 static bool rp_saved;
225 static rtx find_addr_reg (rtx);
227 /* Keep track of the number of bytes we have output in the CODE subspace
228 during this compilation so we'll know when to emit inline long-calls. */
229 unsigned long total_code_bytes;
231 /* The last address of the previous function plus the number of bytes in
232 associated thunks that have been output. This is used to determine if
233 a thunk can use an IA-relative branch to reach its target function. */
234 static unsigned int last_address;
236 /* Variables to handle plabels that we discover are necessary at assembly
237 output time. They are output after the current function. */
238 struct GTY(()) deferred_plabel
240 rtx internal_label;
241 rtx symbol;
243 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
244 deferred_plabels;
245 static size_t n_deferred_plabels = 0;
247 /* Initialize the GCC target structure. */
249 #undef TARGET_OPTION_OVERRIDE
250 #define TARGET_OPTION_OVERRIDE pa_option_override
252 #undef TARGET_ASM_ALIGNED_HI_OP
253 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
254 #undef TARGET_ASM_ALIGNED_SI_OP
255 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
256 #undef TARGET_ASM_ALIGNED_DI_OP
257 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
258 #undef TARGET_ASM_UNALIGNED_HI_OP
259 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
260 #undef TARGET_ASM_UNALIGNED_SI_OP
261 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
262 #undef TARGET_ASM_UNALIGNED_DI_OP
263 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
264 #undef TARGET_ASM_INTEGER
265 #define TARGET_ASM_INTEGER pa_assemble_integer
267 #undef TARGET_ASM_FUNCTION_PROLOGUE
268 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
269 #undef TARGET_ASM_FUNCTION_EPILOGUE
270 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
272 #undef TARGET_FUNCTION_VALUE
273 #define TARGET_FUNCTION_VALUE pa_function_value
274 #undef TARGET_LIBCALL_VALUE
275 #define TARGET_LIBCALL_VALUE pa_libcall_value
276 #undef TARGET_FUNCTION_VALUE_REGNO_P
277 #define TARGET_FUNCTION_VALUE_REGNO_P pa_function_value_regno_p
279 #undef TARGET_LEGITIMIZE_ADDRESS
280 #define TARGET_LEGITIMIZE_ADDRESS hppa_legitimize_address
282 #undef TARGET_SCHED_ADJUST_COST
283 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
284 #undef TARGET_SCHED_ADJUST_PRIORITY
285 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
286 #undef TARGET_SCHED_ISSUE_RATE
287 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
289 #undef TARGET_ENCODE_SECTION_INFO
290 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
291 #undef TARGET_STRIP_NAME_ENCODING
292 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
294 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
295 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
297 #undef TARGET_COMMUTATIVE_P
298 #define TARGET_COMMUTATIVE_P pa_commutative_p
300 #undef TARGET_ASM_OUTPUT_MI_THUNK
301 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
302 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
303 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
305 #undef TARGET_ASM_FILE_END
306 #ifdef ASM_OUTPUT_EXTERNAL_REAL
307 #define TARGET_ASM_FILE_END pa_hpux_file_end
308 #else
309 #define TARGET_ASM_FILE_END output_deferred_plabels
310 #endif
312 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
313 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P pa_print_operand_punct_valid_p
315 #if !defined(USE_COLLECT2)
316 #undef TARGET_ASM_CONSTRUCTOR
317 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
318 #undef TARGET_ASM_DESTRUCTOR
319 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
320 #endif
322 #undef TARGET_INIT_BUILTINS
323 #define TARGET_INIT_BUILTINS pa_init_builtins
325 #undef TARGET_EXPAND_BUILTIN
326 #define TARGET_EXPAND_BUILTIN pa_expand_builtin
328 #undef TARGET_REGISTER_MOVE_COST
329 #define TARGET_REGISTER_MOVE_COST hppa_register_move_cost
330 #undef TARGET_RTX_COSTS
331 #define TARGET_RTX_COSTS hppa_rtx_costs
332 #undef TARGET_ADDRESS_COST
333 #define TARGET_ADDRESS_COST hppa_address_cost
335 #undef TARGET_MACHINE_DEPENDENT_REORG
336 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
338 #undef TARGET_INIT_LIBFUNCS
339 #define TARGET_INIT_LIBFUNCS pa_init_libfuncs
341 #undef TARGET_PROMOTE_FUNCTION_MODE
342 #define TARGET_PROMOTE_FUNCTION_MODE pa_promote_function_mode
343 #undef TARGET_PROMOTE_PROTOTYPES
344 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
346 #undef TARGET_STRUCT_VALUE_RTX
347 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
348 #undef TARGET_RETURN_IN_MEMORY
349 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
350 #undef TARGET_MUST_PASS_IN_STACK
351 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
352 #undef TARGET_PASS_BY_REFERENCE
353 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
354 #undef TARGET_CALLEE_COPIES
355 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
356 #undef TARGET_ARG_PARTIAL_BYTES
357 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
358 #undef TARGET_FUNCTION_ARG
359 #define TARGET_FUNCTION_ARG pa_function_arg
360 #undef TARGET_FUNCTION_ARG_ADVANCE
361 #define TARGET_FUNCTION_ARG_ADVANCE pa_function_arg_advance
362 #undef TARGET_FUNCTION_ARG_BOUNDARY
363 #define TARGET_FUNCTION_ARG_BOUNDARY pa_function_arg_boundary
365 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
366 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
367 #undef TARGET_EXPAND_BUILTIN_VA_START
368 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
369 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
370 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
372 #undef TARGET_SCALAR_MODE_SUPPORTED_P
373 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
375 #undef TARGET_CANNOT_FORCE_CONST_MEM
376 #define TARGET_CANNOT_FORCE_CONST_MEM pa_cannot_force_const_mem
378 #undef TARGET_SECONDARY_RELOAD
379 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
381 #undef TARGET_EXTRA_LIVE_ON_ENTRY
382 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
384 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
385 #define TARGET_ASM_TRAMPOLINE_TEMPLATE pa_asm_trampoline_template
386 #undef TARGET_TRAMPOLINE_INIT
387 #define TARGET_TRAMPOLINE_INIT pa_trampoline_init
388 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
389 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS pa_trampoline_adjust_address
390 #undef TARGET_DELEGITIMIZE_ADDRESS
391 #define TARGET_DELEGITIMIZE_ADDRESS pa_delegitimize_address
392 #undef TARGET_INTERNAL_ARG_POINTER
393 #define TARGET_INTERNAL_ARG_POINTER pa_internal_arg_pointer
394 #undef TARGET_CAN_ELIMINATE
395 #define TARGET_CAN_ELIMINATE pa_can_eliminate
396 #undef TARGET_CONDITIONAL_REGISTER_USAGE
397 #define TARGET_CONDITIONAL_REGISTER_USAGE pa_conditional_register_usage
398 #undef TARGET_C_MODE_FOR_SUFFIX
399 #define TARGET_C_MODE_FOR_SUFFIX pa_c_mode_for_suffix
400 #undef TARGET_ASM_FUNCTION_SECTION
401 #define TARGET_ASM_FUNCTION_SECTION pa_function_section
403 #undef TARGET_LEGITIMATE_CONSTANT_P
404 #define TARGET_LEGITIMATE_CONSTANT_P pa_legitimate_constant_p
405 #undef TARGET_SECTION_TYPE_FLAGS
406 #define TARGET_SECTION_TYPE_FLAGS pa_section_type_flags
407 #undef TARGET_LEGITIMATE_ADDRESS_P
408 #define TARGET_LEGITIMATE_ADDRESS_P pa_legitimate_address_p
410 struct gcc_target targetm = TARGET_INITIALIZER;
412 /* Parse the -mfixed-range= option string. */
414 static void
415 fix_range (const char *const_str)
417 int i, first, last;
418 char *str, *dash, *comma;
420 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
421 REG2 are either register names or register numbers. The effect
422 of this option is to mark the registers in the range from REG1 to
423 REG2 as ``fixed'' so they won't be used by the compiler. This is
424 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
426 i = strlen (const_str);
427 str = (char *) alloca (i + 1);
428 memcpy (str, const_str, i + 1);
430 while (1)
432 dash = strchr (str, '-');
433 if (!dash)
435 warning (0, "value of -mfixed-range must have form REG1-REG2");
436 return;
438 *dash = '\0';
440 comma = strchr (dash + 1, ',');
441 if (comma)
442 *comma = '\0';
444 first = decode_reg_name (str);
445 if (first < 0)
447 warning (0, "unknown register name: %s", str);
448 return;
451 last = decode_reg_name (dash + 1);
452 if (last < 0)
454 warning (0, "unknown register name: %s", dash + 1);
455 return;
458 *dash = '-';
460 if (first > last)
462 warning (0, "%s-%s is an empty range", str, dash + 1);
463 return;
466 for (i = first; i <= last; ++i)
467 fixed_regs[i] = call_used_regs[i] = 1;
469 if (!comma)
470 break;
472 *comma = ',';
473 str = comma + 1;
476 /* Check if all floating point registers have been fixed. */
477 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
478 if (!fixed_regs[i])
479 break;
481 if (i > FP_REG_LAST)
482 target_flags |= MASK_DISABLE_FPREGS;
485 /* Implement the TARGET_OPTION_OVERRIDE hook. */
487 static void
488 pa_option_override (void)
490 unsigned int i;
491 cl_deferred_option *opt;
492 vec<cl_deferred_option> *v
493 = (vec<cl_deferred_option> *) pa_deferred_options;
495 if (v)
496 FOR_EACH_VEC_ELT (*v, i, opt)
498 switch (opt->opt_index)
500 case OPT_mfixed_range_:
501 fix_range (opt->arg);
502 break;
504 default:
505 gcc_unreachable ();
509 if (flag_pic && TARGET_PORTABLE_RUNTIME)
511 warning (0, "PIC code generation is not supported in the portable runtime model");
514 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
516 warning (0, "PIC code generation is not compatible with fast indirect calls");
519 if (! TARGET_GAS && write_symbols != NO_DEBUG)
521 warning (0, "-g is only supported when using GAS on this processor,");
522 warning (0, "-g option disabled");
523 write_symbols = NO_DEBUG;
526 /* We only support the "big PIC" model now. And we always generate PIC
527 code when in 64bit mode. */
528 if (flag_pic == 1 || TARGET_64BIT)
529 flag_pic = 2;
531 /* Disable -freorder-blocks-and-partition as we don't support hot and
532 cold partitioning. */
533 if (flag_reorder_blocks_and_partition)
535 inform (input_location,
536 "-freorder-blocks-and-partition does not work "
537 "on this architecture");
538 flag_reorder_blocks_and_partition = 0;
539 flag_reorder_blocks = 1;
542 /* We can't guarantee that .dword is available for 32-bit targets. */
543 if (UNITS_PER_WORD == 4)
544 targetm.asm_out.aligned_op.di = NULL;
546 /* The unaligned ops are only available when using GAS. */
547 if (!TARGET_GAS)
549 targetm.asm_out.unaligned_op.hi = NULL;
550 targetm.asm_out.unaligned_op.si = NULL;
551 targetm.asm_out.unaligned_op.di = NULL;
554 init_machine_status = pa_init_machine_status;
557 enum pa_builtins
559 PA_BUILTIN_COPYSIGNQ,
560 PA_BUILTIN_FABSQ,
561 PA_BUILTIN_INFQ,
562 PA_BUILTIN_HUGE_VALQ,
563 PA_BUILTIN_max
566 static GTY(()) tree pa_builtins[(int) PA_BUILTIN_max];
568 static void
569 pa_init_builtins (void)
571 #ifdef DONT_HAVE_FPUTC_UNLOCKED
573 tree decl = builtin_decl_explicit (BUILT_IN_PUTC_UNLOCKED);
574 set_builtin_decl (BUILT_IN_FPUTC_UNLOCKED, decl,
575 builtin_decl_implicit_p (BUILT_IN_PUTC_UNLOCKED));
577 #endif
578 #if TARGET_HPUX_11
580 tree decl;
582 if ((decl = builtin_decl_explicit (BUILT_IN_FINITE)) != NULL_TREE)
583 set_user_assembler_name (decl, "_Isfinite");
584 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEF)) != NULL_TREE)
585 set_user_assembler_name (decl, "_Isfinitef");
587 #endif
589 if (HPUX_LONG_DOUBLE_LIBRARY)
591 tree decl, ftype;
593 /* Under HPUX, the __float128 type is a synonym for "long double". */
594 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
595 "__float128");
597 /* TFmode support builtins. */
598 ftype = build_function_type_list (long_double_type_node,
599 long_double_type_node,
600 NULL_TREE);
601 decl = add_builtin_function ("__builtin_fabsq", ftype,
602 PA_BUILTIN_FABSQ, BUILT_IN_MD,
603 "_U_Qfabs", NULL_TREE);
604 TREE_READONLY (decl) = 1;
605 pa_builtins[PA_BUILTIN_FABSQ] = decl;
607 ftype = build_function_type_list (long_double_type_node,
608 long_double_type_node,
609 long_double_type_node,
610 NULL_TREE);
611 decl = add_builtin_function ("__builtin_copysignq", ftype,
612 PA_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
613 "_U_Qfcopysign", NULL_TREE);
614 TREE_READONLY (decl) = 1;
615 pa_builtins[PA_BUILTIN_COPYSIGNQ] = decl;
617 ftype = build_function_type_list (long_double_type_node, NULL_TREE);
618 decl = add_builtin_function ("__builtin_infq", ftype,
619 PA_BUILTIN_INFQ, BUILT_IN_MD,
620 NULL, NULL_TREE);
621 pa_builtins[PA_BUILTIN_INFQ] = decl;
623 decl = add_builtin_function ("__builtin_huge_valq", ftype,
624 PA_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
625 NULL, NULL_TREE);
626 pa_builtins[PA_BUILTIN_HUGE_VALQ] = decl;
630 static rtx
631 pa_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
632 machine_mode mode ATTRIBUTE_UNUSED,
633 int ignore ATTRIBUTE_UNUSED)
635 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
636 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
638 switch (fcode)
640 case PA_BUILTIN_FABSQ:
641 case PA_BUILTIN_COPYSIGNQ:
642 return expand_call (exp, target, ignore);
644 case PA_BUILTIN_INFQ:
645 case PA_BUILTIN_HUGE_VALQ:
647 machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
648 REAL_VALUE_TYPE inf;
649 rtx tmp;
651 real_inf (&inf);
652 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, target_mode);
654 tmp = validize_mem (force_const_mem (target_mode, tmp));
656 if (target == 0)
657 target = gen_reg_rtx (target_mode);
659 emit_move_insn (target, tmp);
660 return target;
663 default:
664 gcc_unreachable ();
667 return NULL_RTX;
670 /* Function to init struct machine_function.
671 This will be called, via a pointer variable,
672 from push_function_context. */
674 static struct machine_function *
675 pa_init_machine_status (void)
677 return ggc_cleared_alloc<machine_function> ();
680 /* If FROM is a probable pointer register, mark TO as a probable
681 pointer register with the same pointer alignment as FROM. */
683 static void
684 copy_reg_pointer (rtx to, rtx from)
686 if (REG_POINTER (from))
687 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
690 /* Return 1 if X contains a symbolic expression. We know these
691 expressions will have one of a few well defined forms, so
692 we need only check those forms. */
694 pa_symbolic_expression_p (rtx x)
697 /* Strip off any HIGH. */
698 if (GET_CODE (x) == HIGH)
699 x = XEXP (x, 0);
701 return symbolic_operand (x, VOIDmode);
704 /* Accept any constant that can be moved in one instruction into a
705 general register. */
707 pa_cint_ok_for_move (HOST_WIDE_INT ival)
709 /* OK if ldo, ldil, or zdepi, can be used. */
710 return (VAL_14_BITS_P (ival)
711 || pa_ldil_cint_p (ival)
712 || pa_zdepi_cint_p (ival));
715 /* True iff ldil can be used to load this CONST_INT. The least
716 significant 11 bits of the value must be zero and the value must
717 not change sign when extended from 32 to 64 bits. */
719 pa_ldil_cint_p (HOST_WIDE_INT ival)
721 HOST_WIDE_INT x = ival & (((HOST_WIDE_INT) -1 << 31) | 0x7ff);
723 return x == 0 || x == ((HOST_WIDE_INT) -1 << 31);
726 /* True iff zdepi can be used to generate this CONST_INT.
727 zdepi first sign extends a 5-bit signed number to a given field
728 length, then places this field anywhere in a zero. */
730 pa_zdepi_cint_p (unsigned HOST_WIDE_INT x)
732 unsigned HOST_WIDE_INT lsb_mask, t;
734 /* This might not be obvious, but it's at least fast.
735 This function is critical; we don't have the time loops would take. */
736 lsb_mask = x & -x;
737 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
738 /* Return true iff t is a power of two. */
739 return ((t & (t - 1)) == 0);
742 /* True iff depi or extru can be used to compute (reg & mask).
743 Accept bit pattern like these:
744 0....01....1
745 1....10....0
746 1..10..01..1 */
748 pa_and_mask_p (unsigned HOST_WIDE_INT mask)
750 mask = ~mask;
751 mask += mask & -mask;
752 return (mask & (mask - 1)) == 0;
755 /* True iff depi can be used to compute (reg | MASK). */
757 pa_ior_mask_p (unsigned HOST_WIDE_INT mask)
759 mask += mask & -mask;
760 return (mask & (mask - 1)) == 0;
763 /* Legitimize PIC addresses. If the address is already
764 position-independent, we return ORIG. Newly generated
765 position-independent addresses go to REG. If we need more
766 than one register, we lose. */
768 static rtx
769 legitimize_pic_address (rtx orig, machine_mode mode, rtx reg)
771 rtx pic_ref = orig;
773 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
775 /* Labels need special handling. */
776 if (pic_label_operand (orig, mode))
778 rtx insn;
780 /* We do not want to go through the movXX expanders here since that
781 would create recursion.
783 Nor do we really want to call a generator for a named pattern
784 since that requires multiple patterns if we want to support
785 multiple word sizes.
787 So instead we just emit the raw set, which avoids the movXX
788 expanders completely. */
789 mark_reg_pointer (reg, BITS_PER_UNIT);
790 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
792 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
793 add_reg_note (insn, REG_EQUAL, orig);
795 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
796 and update LABEL_NUSES because this is not done automatically. */
797 if (reload_in_progress || reload_completed)
799 /* Extract LABEL_REF. */
800 if (GET_CODE (orig) == CONST)
801 orig = XEXP (XEXP (orig, 0), 0);
802 /* Extract CODE_LABEL. */
803 orig = XEXP (orig, 0);
804 add_reg_note (insn, REG_LABEL_OPERAND, orig);
805 /* Make sure we have label and not a note. */
806 if (LABEL_P (orig))
807 LABEL_NUSES (orig)++;
809 crtl->uses_pic_offset_table = 1;
810 return reg;
812 if (GET_CODE (orig) == SYMBOL_REF)
814 rtx insn, tmp_reg;
816 gcc_assert (reg);
818 /* Before reload, allocate a temporary register for the intermediate
819 result. This allows the sequence to be deleted when the final
820 result is unused and the insns are trivially dead. */
821 tmp_reg = ((reload_in_progress || reload_completed)
822 ? reg : gen_reg_rtx (Pmode));
824 if (function_label_operand (orig, VOIDmode))
826 /* Force function label into memory in word mode. */
827 orig = XEXP (force_const_mem (word_mode, orig), 0);
828 /* Load plabel address from DLT. */
829 emit_move_insn (tmp_reg,
830 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
831 gen_rtx_HIGH (word_mode, orig)));
832 pic_ref
833 = gen_const_mem (Pmode,
834 gen_rtx_LO_SUM (Pmode, tmp_reg,
835 gen_rtx_UNSPEC (Pmode,
836 gen_rtvec (1, orig),
837 UNSPEC_DLTIND14R)));
838 emit_move_insn (reg, pic_ref);
839 /* Now load address of function descriptor. */
840 pic_ref = gen_rtx_MEM (Pmode, reg);
842 else
844 /* Load symbol reference from DLT. */
845 emit_move_insn (tmp_reg,
846 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
847 gen_rtx_HIGH (word_mode, orig)));
848 pic_ref
849 = gen_const_mem (Pmode,
850 gen_rtx_LO_SUM (Pmode, tmp_reg,
851 gen_rtx_UNSPEC (Pmode,
852 gen_rtvec (1, orig),
853 UNSPEC_DLTIND14R)));
856 crtl->uses_pic_offset_table = 1;
857 mark_reg_pointer (reg, BITS_PER_UNIT);
858 insn = emit_move_insn (reg, pic_ref);
860 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
861 set_unique_reg_note (insn, REG_EQUAL, orig);
863 return reg;
865 else if (GET_CODE (orig) == CONST)
867 rtx base;
869 if (GET_CODE (XEXP (orig, 0)) == PLUS
870 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
871 return orig;
873 gcc_assert (reg);
874 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
876 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
877 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
878 base == reg ? 0 : reg);
880 if (GET_CODE (orig) == CONST_INT)
882 if (INT_14_BITS (orig))
883 return plus_constant (Pmode, base, INTVAL (orig));
884 orig = force_reg (Pmode, orig);
886 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
887 /* Likewise, should we set special REG_NOTEs here? */
890 return pic_ref;
893 static GTY(()) rtx gen_tls_tga;
895 static rtx
896 gen_tls_get_addr (void)
898 if (!gen_tls_tga)
899 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
900 return gen_tls_tga;
903 static rtx
904 hppa_tls_call (rtx arg)
906 rtx ret;
908 ret = gen_reg_rtx (Pmode);
909 emit_library_call_value (gen_tls_get_addr (), ret,
910 LCT_CONST, Pmode, 1, arg, Pmode);
912 return ret;
915 static rtx
916 legitimize_tls_address (rtx addr)
918 rtx ret, insn, tmp, t1, t2, tp;
920 /* Currently, we can't handle anything but a SYMBOL_REF. */
921 if (GET_CODE (addr) != SYMBOL_REF)
922 return addr;
924 switch (SYMBOL_REF_TLS_MODEL (addr))
926 case TLS_MODEL_GLOBAL_DYNAMIC:
927 tmp = gen_reg_rtx (Pmode);
928 if (flag_pic)
929 emit_insn (gen_tgd_load_pic (tmp, addr));
930 else
931 emit_insn (gen_tgd_load (tmp, addr));
932 ret = hppa_tls_call (tmp);
933 break;
935 case TLS_MODEL_LOCAL_DYNAMIC:
936 ret = gen_reg_rtx (Pmode);
937 tmp = gen_reg_rtx (Pmode);
938 start_sequence ();
939 if (flag_pic)
940 emit_insn (gen_tld_load_pic (tmp, addr));
941 else
942 emit_insn (gen_tld_load (tmp, addr));
943 t1 = hppa_tls_call (tmp);
944 insn = get_insns ();
945 end_sequence ();
946 t2 = gen_reg_rtx (Pmode);
947 emit_libcall_block (insn, t2, t1,
948 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
949 UNSPEC_TLSLDBASE));
950 emit_insn (gen_tld_offset_load (ret, addr, t2));
951 break;
953 case TLS_MODEL_INITIAL_EXEC:
954 tp = gen_reg_rtx (Pmode);
955 tmp = gen_reg_rtx (Pmode);
956 ret = gen_reg_rtx (Pmode);
957 emit_insn (gen_tp_load (tp));
958 if (flag_pic)
959 emit_insn (gen_tie_load_pic (tmp, addr));
960 else
961 emit_insn (gen_tie_load (tmp, addr));
962 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
963 break;
965 case TLS_MODEL_LOCAL_EXEC:
966 tp = gen_reg_rtx (Pmode);
967 ret = gen_reg_rtx (Pmode);
968 emit_insn (gen_tp_load (tp));
969 emit_insn (gen_tle_load (ret, addr, tp));
970 break;
972 default:
973 gcc_unreachable ();
976 return ret;
979 /* Try machine-dependent ways of modifying an illegitimate address
980 to be legitimate. If we find one, return the new, valid address.
981 This macro is used in only one place: `memory_address' in explow.c.
983 OLDX is the address as it was before break_out_memory_refs was called.
984 In some cases it is useful to look at this to decide what needs to be done.
986 It is always safe for this macro to do nothing. It exists to recognize
987 opportunities to optimize the output.
989 For the PA, transform:
991 memory(X + <large int>)
993 into:
995 if (<large int> & mask) >= 16
996 Y = (<large int> & ~mask) + mask + 1 Round up.
997 else
998 Y = (<large int> & ~mask) Round down.
999 Z = X + Y
1000 memory (Z + (<large int> - Y));
1002 This is for CSE to find several similar references, and only use one Z.
1004 X can either be a SYMBOL_REF or REG, but because combine cannot
1005 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
1006 D will not fit in 14 bits.
1008 MODE_FLOAT references allow displacements which fit in 5 bits, so use
1009 0x1f as the mask.
1011 MODE_INT references allow displacements which fit in 14 bits, so use
1012 0x3fff as the mask.
1014 This relies on the fact that most mode MODE_FLOAT references will use FP
1015 registers and most mode MODE_INT references will use integer registers.
1016 (In the rare case of an FP register used in an integer MODE, we depend
1017 on secondary reloads to clean things up.)
1020 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
1021 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
1022 addressing modes to be used).
1024 Put X and Z into registers. Then put the entire expression into
1025 a register. */
1028 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1029 machine_mode mode)
1031 rtx orig = x;
1033 /* We need to canonicalize the order of operands in unscaled indexed
1034 addresses since the code that checks if an address is valid doesn't
1035 always try both orders. */
1036 if (!TARGET_NO_SPACE_REGS
1037 && GET_CODE (x) == PLUS
1038 && GET_MODE (x) == Pmode
1039 && REG_P (XEXP (x, 0))
1040 && REG_P (XEXP (x, 1))
1041 && REG_POINTER (XEXP (x, 0))
1042 && !REG_POINTER (XEXP (x, 1)))
1043 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
1045 if (tls_referenced_p (x))
1046 return legitimize_tls_address (x);
1047 else if (flag_pic)
1048 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
1050 /* Strip off CONST. */
1051 if (GET_CODE (x) == CONST)
1052 x = XEXP (x, 0);
1054 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1055 That should always be safe. */
1056 if (GET_CODE (x) == PLUS
1057 && GET_CODE (XEXP (x, 0)) == REG
1058 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
1060 rtx reg = force_reg (Pmode, XEXP (x, 1));
1061 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
1064 /* Note we must reject symbols which represent function addresses
1065 since the assembler/linker can't handle arithmetic on plabels. */
1066 if (GET_CODE (x) == PLUS
1067 && GET_CODE (XEXP (x, 1)) == CONST_INT
1068 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1069 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
1070 || GET_CODE (XEXP (x, 0)) == REG))
1072 rtx int_part, ptr_reg;
1073 int newoffset;
1074 int offset = INTVAL (XEXP (x, 1));
1075 int mask;
1077 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
1078 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
1080 /* Choose which way to round the offset. Round up if we
1081 are >= halfway to the next boundary. */
1082 if ((offset & mask) >= ((mask + 1) / 2))
1083 newoffset = (offset & ~ mask) + mask + 1;
1084 else
1085 newoffset = (offset & ~ mask);
1087 /* If the newoffset will not fit in 14 bits (ldo), then
1088 handling this would take 4 or 5 instructions (2 to load
1089 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1090 add the new offset and the SYMBOL_REF.) Combine can
1091 not handle 4->2 or 5->2 combinations, so do not create
1092 them. */
1093 if (! VAL_14_BITS_P (newoffset)
1094 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
1096 rtx const_part = plus_constant (Pmode, XEXP (x, 0), newoffset);
1097 rtx tmp_reg
1098 = force_reg (Pmode,
1099 gen_rtx_HIGH (Pmode, const_part));
1100 ptr_reg
1101 = force_reg (Pmode,
1102 gen_rtx_LO_SUM (Pmode,
1103 tmp_reg, const_part));
1105 else
1107 if (! VAL_14_BITS_P (newoffset))
1108 int_part = force_reg (Pmode, GEN_INT (newoffset));
1109 else
1110 int_part = GEN_INT (newoffset);
1112 ptr_reg = force_reg (Pmode,
1113 gen_rtx_PLUS (Pmode,
1114 force_reg (Pmode, XEXP (x, 0)),
1115 int_part));
1117 return plus_constant (Pmode, ptr_reg, offset - newoffset);
1120 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
1122 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
1123 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1124 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
1125 && (OBJECT_P (XEXP (x, 1))
1126 || GET_CODE (XEXP (x, 1)) == SUBREG)
1127 && GET_CODE (XEXP (x, 1)) != CONST)
1129 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1130 rtx reg1, reg2;
1132 reg1 = XEXP (x, 1);
1133 if (GET_CODE (reg1) != REG)
1134 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1136 reg2 = XEXP (XEXP (x, 0), 0);
1137 if (GET_CODE (reg2) != REG)
1138 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1140 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1141 gen_rtx_MULT (Pmode,
1142 reg2,
1143 GEN_INT (val)),
1144 reg1));
1147 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
1149 Only do so for floating point modes since this is more speculative
1150 and we lose if it's an integer store. */
1151 if (GET_CODE (x) == PLUS
1152 && GET_CODE (XEXP (x, 0)) == PLUS
1153 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
1154 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
1155 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
1156 && (mode == SFmode || mode == DFmode))
1159 /* First, try and figure out what to use as a base register. */
1160 rtx reg1, reg2, base, idx;
1162 reg1 = XEXP (XEXP (x, 0), 1);
1163 reg2 = XEXP (x, 1);
1164 base = NULL_RTX;
1165 idx = NULL_RTX;
1167 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1168 then pa_emit_move_sequence will turn on REG_POINTER so we'll know
1169 it's a base register below. */
1170 if (GET_CODE (reg1) != REG)
1171 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1173 if (GET_CODE (reg2) != REG)
1174 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1176 /* Figure out what the base and index are. */
1178 if (GET_CODE (reg1) == REG
1179 && REG_POINTER (reg1))
1181 base = reg1;
1182 idx = gen_rtx_PLUS (Pmode,
1183 gen_rtx_MULT (Pmode,
1184 XEXP (XEXP (XEXP (x, 0), 0), 0),
1185 XEXP (XEXP (XEXP (x, 0), 0), 1)),
1186 XEXP (x, 1));
1188 else if (GET_CODE (reg2) == REG
1189 && REG_POINTER (reg2))
1191 base = reg2;
1192 idx = XEXP (x, 0);
1195 if (base == 0)
1196 return orig;
1198 /* If the index adds a large constant, try to scale the
1199 constant so that it can be loaded with only one insn. */
1200 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1201 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1202 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1203 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1205 /* Divide the CONST_INT by the scale factor, then add it to A. */
1206 int val = INTVAL (XEXP (idx, 1));
1208 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
1209 reg1 = XEXP (XEXP (idx, 0), 0);
1210 if (GET_CODE (reg1) != REG)
1211 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1213 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1215 /* We can now generate a simple scaled indexed address. */
1216 return
1217 force_reg
1218 (Pmode, gen_rtx_PLUS (Pmode,
1219 gen_rtx_MULT (Pmode, reg1,
1220 XEXP (XEXP (idx, 0), 1)),
1221 base));
1224 /* If B + C is still a valid base register, then add them. */
1225 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1226 && INTVAL (XEXP (idx, 1)) <= 4096
1227 && INTVAL (XEXP (idx, 1)) >= -4096)
1229 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
1230 rtx reg1, reg2;
1232 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1234 reg2 = XEXP (XEXP (idx, 0), 0);
1235 if (GET_CODE (reg2) != CONST_INT)
1236 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1238 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1239 gen_rtx_MULT (Pmode,
1240 reg2,
1241 GEN_INT (val)),
1242 reg1));
1245 /* Get the index into a register, then add the base + index and
1246 return a register holding the result. */
1248 /* First get A into a register. */
1249 reg1 = XEXP (XEXP (idx, 0), 0);
1250 if (GET_CODE (reg1) != REG)
1251 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1253 /* And get B into a register. */
1254 reg2 = XEXP (idx, 1);
1255 if (GET_CODE (reg2) != REG)
1256 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1258 reg1 = force_reg (Pmode,
1259 gen_rtx_PLUS (Pmode,
1260 gen_rtx_MULT (Pmode, reg1,
1261 XEXP (XEXP (idx, 0), 1)),
1262 reg2));
1264 /* Add the result to our base register and return. */
1265 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1269 /* Uh-oh. We might have an address for x[n-100000]. This needs
1270 special handling to avoid creating an indexed memory address
1271 with x-100000 as the base.
1273 If the constant part is small enough, then it's still safe because
1274 there is a guard page at the beginning and end of the data segment.
1276 Scaled references are common enough that we want to try and rearrange the
1277 terms so that we can use indexing for these addresses too. Only
1278 do the optimization for floatint point modes. */
1280 if (GET_CODE (x) == PLUS
1281 && pa_symbolic_expression_p (XEXP (x, 1)))
1283 /* Ugly. We modify things here so that the address offset specified
1284 by the index expression is computed first, then added to x to form
1285 the entire address. */
1287 rtx regx1, regx2, regy1, regy2, y;
1289 /* Strip off any CONST. */
1290 y = XEXP (x, 1);
1291 if (GET_CODE (y) == CONST)
1292 y = XEXP (y, 0);
1294 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1296 /* See if this looks like
1297 (plus (mult (reg) (shadd_const))
1298 (const (plus (symbol_ref) (const_int))))
1300 Where const_int is small. In that case the const
1301 expression is a valid pointer for indexing.
1303 If const_int is big, but can be divided evenly by shadd_const
1304 and added to (reg). This allows more scaled indexed addresses. */
1305 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1306 && GET_CODE (XEXP (x, 0)) == MULT
1307 && GET_CODE (XEXP (y, 1)) == CONST_INT
1308 && INTVAL (XEXP (y, 1)) >= -4096
1309 && INTVAL (XEXP (y, 1)) <= 4095
1310 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1311 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1313 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1314 rtx reg1, reg2;
1316 reg1 = XEXP (x, 1);
1317 if (GET_CODE (reg1) != REG)
1318 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1320 reg2 = XEXP (XEXP (x, 0), 0);
1321 if (GET_CODE (reg2) != REG)
1322 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1324 return force_reg (Pmode,
1325 gen_rtx_PLUS (Pmode,
1326 gen_rtx_MULT (Pmode,
1327 reg2,
1328 GEN_INT (val)),
1329 reg1));
1331 else if ((mode == DFmode || mode == SFmode)
1332 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1333 && GET_CODE (XEXP (x, 0)) == MULT
1334 && GET_CODE (XEXP (y, 1)) == CONST_INT
1335 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1336 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1337 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1339 regx1
1340 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1341 / INTVAL (XEXP (XEXP (x, 0), 1))));
1342 regx2 = XEXP (XEXP (x, 0), 0);
1343 if (GET_CODE (regx2) != REG)
1344 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1345 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1346 regx2, regx1));
1347 return
1348 force_reg (Pmode,
1349 gen_rtx_PLUS (Pmode,
1350 gen_rtx_MULT (Pmode, regx2,
1351 XEXP (XEXP (x, 0), 1)),
1352 force_reg (Pmode, XEXP (y, 0))));
1354 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1355 && INTVAL (XEXP (y, 1)) >= -4096
1356 && INTVAL (XEXP (y, 1)) <= 4095)
1358 /* This is safe because of the guard page at the
1359 beginning and end of the data space. Just
1360 return the original address. */
1361 return orig;
1363 else
1365 /* Doesn't look like one we can optimize. */
1366 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1367 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1368 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1369 regx1 = force_reg (Pmode,
1370 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1371 regx1, regy2));
1372 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1377 return orig;
1380 /* Implement the TARGET_REGISTER_MOVE_COST hook.
1382 Compute extra cost of moving data between one register class
1383 and another.
1385 Make moves from SAR so expensive they should never happen. We used to
1386 have 0xffff here, but that generates overflow in rare cases.
1388 Copies involving a FP register and a non-FP register are relatively
1389 expensive because they must go through memory.
1391 Other copies are reasonably cheap. */
1393 static int
1394 hppa_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
1395 reg_class_t from, reg_class_t to)
1397 if (from == SHIFT_REGS)
1398 return 0x100;
1399 else if (to == SHIFT_REGS && FP_REG_CLASS_P (from))
1400 return 18;
1401 else if ((FP_REG_CLASS_P (from) && ! FP_REG_CLASS_P (to))
1402 || (FP_REG_CLASS_P (to) && ! FP_REG_CLASS_P (from)))
1403 return 16;
1404 else
1405 return 2;
1408 /* For the HPPA, REG and REG+CONST is cost 0
1409 and addresses involving symbolic constants are cost 2.
1411 PIC addresses are very expensive.
1413 It is no coincidence that this has the same structure
1414 as pa_legitimate_address_p. */
1416 static int
1417 hppa_address_cost (rtx X, machine_mode mode ATTRIBUTE_UNUSED,
1418 addr_space_t as ATTRIBUTE_UNUSED,
1419 bool speed ATTRIBUTE_UNUSED)
1421 switch (GET_CODE (X))
1423 case REG:
1424 case PLUS:
1425 case LO_SUM:
1426 return 1;
1427 case HIGH:
1428 return 2;
1429 default:
1430 return 4;
1434 /* Compute a (partial) cost for rtx X. Return true if the complete
1435 cost has been computed, and false if subexpressions should be
1436 scanned. In either case, *TOTAL contains the cost result. */
1438 static bool
1439 hppa_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
1440 int *total, bool speed ATTRIBUTE_UNUSED)
1442 int factor;
1444 switch (code)
1446 case CONST_INT:
1447 if (INTVAL (x) == 0)
1448 *total = 0;
1449 else if (INT_14_BITS (x))
1450 *total = 1;
1451 else
1452 *total = 2;
1453 return true;
1455 case HIGH:
1456 *total = 2;
1457 return true;
1459 case CONST:
1460 case LABEL_REF:
1461 case SYMBOL_REF:
1462 *total = 4;
1463 return true;
1465 case CONST_DOUBLE:
1466 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1467 && outer_code != SET)
1468 *total = 0;
1469 else
1470 *total = 8;
1471 return true;
1473 case MULT:
1474 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1476 *total = COSTS_N_INSNS (3);
1477 return true;
1480 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1481 factor = GET_MODE_SIZE (GET_MODE (x)) / 4;
1482 if (factor == 0)
1483 factor = 1;
1485 if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1486 *total = factor * factor * COSTS_N_INSNS (8);
1487 else
1488 *total = factor * factor * COSTS_N_INSNS (20);
1489 return true;
1491 case DIV:
1492 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1494 *total = COSTS_N_INSNS (14);
1495 return true;
1497 /* FALLTHRU */
1499 case UDIV:
1500 case MOD:
1501 case UMOD:
1502 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1503 factor = GET_MODE_SIZE (GET_MODE (x)) / 4;
1504 if (factor == 0)
1505 factor = 1;
1507 *total = factor * factor * COSTS_N_INSNS (60);
1508 return true;
1510 case PLUS: /* this includes shNadd insns */
1511 case MINUS:
1512 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1514 *total = COSTS_N_INSNS (3);
1515 return true;
1518 /* A size N times larger than UNITS_PER_WORD needs N times as
1519 many insns, taking N times as long. */
1520 factor = GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD;
1521 if (factor == 0)
1522 factor = 1;
1523 *total = factor * COSTS_N_INSNS (1);
1524 return true;
1526 case ASHIFT:
1527 case ASHIFTRT:
1528 case LSHIFTRT:
1529 *total = COSTS_N_INSNS (1);
1530 return true;
1532 default:
1533 return false;
1537 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1538 new rtx with the correct mode. */
1539 static inline rtx
1540 force_mode (machine_mode mode, rtx orig)
1542 if (mode == GET_MODE (orig))
1543 return orig;
1545 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1547 return gen_rtx_REG (mode, REGNO (orig));
1550 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1552 static bool
1553 pa_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1555 return tls_referenced_p (x);
1558 /* Emit insns to move operands[1] into operands[0].
1560 Return 1 if we have written out everything that needs to be done to
1561 do the move. Otherwise, return 0 and the caller will emit the move
1562 normally.
1564 Note SCRATCH_REG may not be in the proper mode depending on how it
1565 will be used. This routine is responsible for creating a new copy
1566 of SCRATCH_REG in the proper mode. */
1569 pa_emit_move_sequence (rtx *operands, machine_mode mode, rtx scratch_reg)
1571 register rtx operand0 = operands[0];
1572 register rtx operand1 = operands[1];
1573 register rtx tem;
1575 /* We can only handle indexed addresses in the destination operand
1576 of floating point stores. Thus, we need to break out indexed
1577 addresses from the destination operand. */
1578 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1580 gcc_assert (can_create_pseudo_p ());
1582 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1583 operand0 = replace_equiv_address (operand0, tem);
1586 /* On targets with non-equivalent space registers, break out unscaled
1587 indexed addresses from the source operand before the final CSE.
1588 We have to do this because the REG_POINTER flag is not correctly
1589 carried through various optimization passes and CSE may substitute
1590 a pseudo without the pointer set for one with the pointer set. As
1591 a result, we loose various opportunities to create insns with
1592 unscaled indexed addresses. */
1593 if (!TARGET_NO_SPACE_REGS
1594 && !cse_not_expected
1595 && GET_CODE (operand1) == MEM
1596 && GET_CODE (XEXP (operand1, 0)) == PLUS
1597 && REG_P (XEXP (XEXP (operand1, 0), 0))
1598 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1599 operand1
1600 = replace_equiv_address (operand1,
1601 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1603 if (scratch_reg
1604 && reload_in_progress && GET_CODE (operand0) == REG
1605 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1606 operand0 = reg_equiv_mem (REGNO (operand0));
1607 else if (scratch_reg
1608 && reload_in_progress && GET_CODE (operand0) == SUBREG
1609 && GET_CODE (SUBREG_REG (operand0)) == REG
1610 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1612 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1613 the code which tracks sets/uses for delete_output_reload. */
1614 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1615 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
1616 SUBREG_BYTE (operand0));
1617 operand0 = alter_subreg (&temp, true);
1620 if (scratch_reg
1621 && reload_in_progress && GET_CODE (operand1) == REG
1622 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1623 operand1 = reg_equiv_mem (REGNO (operand1));
1624 else if (scratch_reg
1625 && reload_in_progress && GET_CODE (operand1) == SUBREG
1626 && GET_CODE (SUBREG_REG (operand1)) == REG
1627 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1629 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1630 the code which tracks sets/uses for delete_output_reload. */
1631 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1632 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
1633 SUBREG_BYTE (operand1));
1634 operand1 = alter_subreg (&temp, true);
1637 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1638 && ((tem = find_replacement (&XEXP (operand0, 0)))
1639 != XEXP (operand0, 0)))
1640 operand0 = replace_equiv_address (operand0, tem);
1642 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1643 && ((tem = find_replacement (&XEXP (operand1, 0)))
1644 != XEXP (operand1, 0)))
1645 operand1 = replace_equiv_address (operand1, tem);
1647 /* Handle secondary reloads for loads/stores of FP registers from
1648 REG+D addresses where D does not fit in 5 or 14 bits, including
1649 (subreg (mem (addr))) cases. */
1650 if (scratch_reg
1651 && fp_reg_operand (operand0, mode)
1652 && (MEM_P (operand1)
1653 || (GET_CODE (operand1) == SUBREG
1654 && MEM_P (XEXP (operand1, 0))))
1655 && !floating_point_store_memory_operand (operand1, mode))
1657 if (GET_CODE (operand1) == SUBREG)
1658 operand1 = XEXP (operand1, 0);
1660 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1661 it in WORD_MODE regardless of what mode it was originally given
1662 to us. */
1663 scratch_reg = force_mode (word_mode, scratch_reg);
1665 /* D might not fit in 14 bits either; for such cases load D into
1666 scratch reg. */
1667 if (reg_plus_base_memory_operand (operand1, mode)
1668 && !(TARGET_PA_20
1669 && !TARGET_ELF32
1670 && INT_14_BITS (XEXP (XEXP (operand1, 0), 1))))
1672 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1673 emit_move_insn (scratch_reg,
1674 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1675 Pmode,
1676 XEXP (XEXP (operand1, 0), 0),
1677 scratch_reg));
1679 else
1680 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1681 emit_insn (gen_rtx_SET (VOIDmode, operand0,
1682 replace_equiv_address (operand1, scratch_reg)));
1683 return 1;
1685 else if (scratch_reg
1686 && fp_reg_operand (operand1, mode)
1687 && (MEM_P (operand0)
1688 || (GET_CODE (operand0) == SUBREG
1689 && MEM_P (XEXP (operand0, 0))))
1690 && !floating_point_store_memory_operand (operand0, mode))
1692 if (GET_CODE (operand0) == SUBREG)
1693 operand0 = XEXP (operand0, 0);
1695 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1696 it in WORD_MODE regardless of what mode it was originally given
1697 to us. */
1698 scratch_reg = force_mode (word_mode, scratch_reg);
1700 /* D might not fit in 14 bits either; for such cases load D into
1701 scratch reg. */
1702 if (reg_plus_base_memory_operand (operand0, mode)
1703 && !(TARGET_PA_20
1704 && !TARGET_ELF32
1705 && INT_14_BITS (XEXP (XEXP (operand0, 0), 1))))
1707 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1708 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1709 0)),
1710 Pmode,
1711 XEXP (XEXP (operand0, 0),
1713 scratch_reg));
1715 else
1716 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1717 emit_insn (gen_rtx_SET (VOIDmode,
1718 replace_equiv_address (operand0, scratch_reg),
1719 operand1));
1720 return 1;
1722 /* Handle secondary reloads for loads of FP registers from constant
1723 expressions by forcing the constant into memory. For the most part,
1724 this is only necessary for SImode and DImode.
1726 Use scratch_reg to hold the address of the memory location. */
1727 else if (scratch_reg
1728 && CONSTANT_P (operand1)
1729 && fp_reg_operand (operand0, mode))
1731 rtx const_mem, xoperands[2];
1733 if (operand1 == CONST0_RTX (mode))
1735 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1736 return 1;
1739 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1740 it in WORD_MODE regardless of what mode it was originally given
1741 to us. */
1742 scratch_reg = force_mode (word_mode, scratch_reg);
1744 /* Force the constant into memory and put the address of the
1745 memory location into scratch_reg. */
1746 const_mem = force_const_mem (mode, operand1);
1747 xoperands[0] = scratch_reg;
1748 xoperands[1] = XEXP (const_mem, 0);
1749 pa_emit_move_sequence (xoperands, Pmode, 0);
1751 /* Now load the destination register. */
1752 emit_insn (gen_rtx_SET (mode, operand0,
1753 replace_equiv_address (const_mem, scratch_reg)));
1754 return 1;
1756 /* Handle secondary reloads for SAR. These occur when trying to load
1757 the SAR from memory or a constant. */
1758 else if (scratch_reg
1759 && GET_CODE (operand0) == REG
1760 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1761 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1762 && (GET_CODE (operand1) == MEM || GET_CODE (operand1) == CONST_INT))
1764 /* D might not fit in 14 bits either; for such cases load D into
1765 scratch reg. */
1766 if (GET_CODE (operand1) == MEM
1767 && !memory_address_p (GET_MODE (operand0), XEXP (operand1, 0)))
1769 /* We are reloading the address into the scratch register, so we
1770 want to make sure the scratch register is a full register. */
1771 scratch_reg = force_mode (word_mode, scratch_reg);
1773 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1774 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1775 0)),
1776 Pmode,
1777 XEXP (XEXP (operand1, 0),
1779 scratch_reg));
1781 /* Now we are going to load the scratch register from memory,
1782 we want to load it in the same width as the original MEM,
1783 which must be the same as the width of the ultimate destination,
1784 OPERAND0. */
1785 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1787 emit_move_insn (scratch_reg,
1788 replace_equiv_address (operand1, scratch_reg));
1790 else
1792 /* We want to load the scratch register using the same mode as
1793 the ultimate destination. */
1794 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1796 emit_move_insn (scratch_reg, operand1);
1799 /* And emit the insn to set the ultimate destination. We know that
1800 the scratch register has the same mode as the destination at this
1801 point. */
1802 emit_move_insn (operand0, scratch_reg);
1803 return 1;
1805 /* Handle the most common case: storing into a register. */
1806 else if (register_operand (operand0, mode))
1808 /* Legitimize TLS symbol references. This happens for references
1809 that aren't a legitimate constant. */
1810 if (PA_SYMBOL_REF_TLS_P (operand1))
1811 operand1 = legitimize_tls_address (operand1);
1813 if (register_operand (operand1, mode)
1814 || (GET_CODE (operand1) == CONST_INT
1815 && pa_cint_ok_for_move (INTVAL (operand1)))
1816 || (operand1 == CONST0_RTX (mode))
1817 || (GET_CODE (operand1) == HIGH
1818 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1819 /* Only `general_operands' can come here, so MEM is ok. */
1820 || GET_CODE (operand1) == MEM)
1822 /* Various sets are created during RTL generation which don't
1823 have the REG_POINTER flag correctly set. After the CSE pass,
1824 instruction recognition can fail if we don't consistently
1825 set this flag when performing register copies. This should
1826 also improve the opportunities for creating insns that use
1827 unscaled indexing. */
1828 if (REG_P (operand0) && REG_P (operand1))
1830 if (REG_POINTER (operand1)
1831 && !REG_POINTER (operand0)
1832 && !HARD_REGISTER_P (operand0))
1833 copy_reg_pointer (operand0, operand1);
1836 /* When MEMs are broken out, the REG_POINTER flag doesn't
1837 get set. In some cases, we can set the REG_POINTER flag
1838 from the declaration for the MEM. */
1839 if (REG_P (operand0)
1840 && GET_CODE (operand1) == MEM
1841 && !REG_POINTER (operand0))
1843 tree decl = MEM_EXPR (operand1);
1845 /* Set the register pointer flag and register alignment
1846 if the declaration for this memory reference is a
1847 pointer type. */
1848 if (decl)
1850 tree type;
1852 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1853 tree operand 1. */
1854 if (TREE_CODE (decl) == COMPONENT_REF)
1855 decl = TREE_OPERAND (decl, 1);
1857 type = TREE_TYPE (decl);
1858 type = strip_array_types (type);
1860 if (POINTER_TYPE_P (type))
1862 int align;
1864 type = TREE_TYPE (type);
1865 /* Using TYPE_ALIGN_OK is rather conservative as
1866 only the ada frontend actually sets it. */
1867 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1868 : BITS_PER_UNIT);
1869 mark_reg_pointer (operand0, align);
1874 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1875 return 1;
1878 else if (GET_CODE (operand0) == MEM)
1880 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1881 && !(reload_in_progress || reload_completed))
1883 rtx temp = gen_reg_rtx (DFmode);
1885 emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
1886 emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
1887 return 1;
1889 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1891 /* Run this case quickly. */
1892 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1893 return 1;
1895 if (! (reload_in_progress || reload_completed))
1897 operands[0] = validize_mem (operand0);
1898 operands[1] = operand1 = force_reg (mode, operand1);
1902 /* Simplify the source if we need to.
1903 Note we do have to handle function labels here, even though we do
1904 not consider them legitimate constants. Loop optimizations can
1905 call the emit_move_xxx with one as a source. */
1906 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1907 || (GET_CODE (operand1) == HIGH
1908 && symbolic_operand (XEXP (operand1, 0), mode))
1909 || function_label_operand (operand1, VOIDmode)
1910 || tls_referenced_p (operand1))
1912 int ishighonly = 0;
1914 if (GET_CODE (operand1) == HIGH)
1916 ishighonly = 1;
1917 operand1 = XEXP (operand1, 0);
1919 if (symbolic_operand (operand1, mode))
1921 /* Argh. The assembler and linker can't handle arithmetic
1922 involving plabels.
1924 So we force the plabel into memory, load operand0 from
1925 the memory location, then add in the constant part. */
1926 if ((GET_CODE (operand1) == CONST
1927 && GET_CODE (XEXP (operand1, 0)) == PLUS
1928 && function_label_operand (XEXP (XEXP (operand1, 0), 0),
1929 VOIDmode))
1930 || function_label_operand (operand1, VOIDmode))
1932 rtx temp, const_part;
1934 /* Figure out what (if any) scratch register to use. */
1935 if (reload_in_progress || reload_completed)
1937 scratch_reg = scratch_reg ? scratch_reg : operand0;
1938 /* SCRATCH_REG will hold an address and maybe the actual
1939 data. We want it in WORD_MODE regardless of what mode it
1940 was originally given to us. */
1941 scratch_reg = force_mode (word_mode, scratch_reg);
1943 else if (flag_pic)
1944 scratch_reg = gen_reg_rtx (Pmode);
1946 if (GET_CODE (operand1) == CONST)
1948 /* Save away the constant part of the expression. */
1949 const_part = XEXP (XEXP (operand1, 0), 1);
1950 gcc_assert (GET_CODE (const_part) == CONST_INT);
1952 /* Force the function label into memory. */
1953 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1955 else
1957 /* No constant part. */
1958 const_part = NULL_RTX;
1960 /* Force the function label into memory. */
1961 temp = force_const_mem (mode, operand1);
1965 /* Get the address of the memory location. PIC-ify it if
1966 necessary. */
1967 temp = XEXP (temp, 0);
1968 if (flag_pic)
1969 temp = legitimize_pic_address (temp, mode, scratch_reg);
1971 /* Put the address of the memory location into our destination
1972 register. */
1973 operands[1] = temp;
1974 pa_emit_move_sequence (operands, mode, scratch_reg);
1976 /* Now load from the memory location into our destination
1977 register. */
1978 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
1979 pa_emit_move_sequence (operands, mode, scratch_reg);
1981 /* And add back in the constant part. */
1982 if (const_part != NULL_RTX)
1983 expand_inc (operand0, const_part);
1985 return 1;
1988 if (flag_pic)
1990 rtx temp;
1992 if (reload_in_progress || reload_completed)
1994 temp = scratch_reg ? scratch_reg : operand0;
1995 /* TEMP will hold an address and maybe the actual
1996 data. We want it in WORD_MODE regardless of what mode it
1997 was originally given to us. */
1998 temp = force_mode (word_mode, temp);
2000 else
2001 temp = gen_reg_rtx (Pmode);
2003 /* (const (plus (symbol) (const_int))) must be forced to
2004 memory during/after reload if the const_int will not fit
2005 in 14 bits. */
2006 if (GET_CODE (operand1) == CONST
2007 && GET_CODE (XEXP (operand1, 0)) == PLUS
2008 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
2009 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
2010 && (reload_completed || reload_in_progress)
2011 && flag_pic)
2013 rtx const_mem = force_const_mem (mode, operand1);
2014 operands[1] = legitimize_pic_address (XEXP (const_mem, 0),
2015 mode, temp);
2016 operands[1] = replace_equiv_address (const_mem, operands[1]);
2017 pa_emit_move_sequence (operands, mode, temp);
2019 else
2021 operands[1] = legitimize_pic_address (operand1, mode, temp);
2022 if (REG_P (operand0) && REG_P (operands[1]))
2023 copy_reg_pointer (operand0, operands[1]);
2024 emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
2027 /* On the HPPA, references to data space are supposed to use dp,
2028 register 27, but showing it in the RTL inhibits various cse
2029 and loop optimizations. */
2030 else
2032 rtx temp, set;
2034 if (reload_in_progress || reload_completed)
2036 temp = scratch_reg ? scratch_reg : operand0;
2037 /* TEMP will hold an address and maybe the actual
2038 data. We want it in WORD_MODE regardless of what mode it
2039 was originally given to us. */
2040 temp = force_mode (word_mode, temp);
2042 else
2043 temp = gen_reg_rtx (mode);
2045 /* Loading a SYMBOL_REF into a register makes that register
2046 safe to be used as the base in an indexed address.
2048 Don't mark hard registers though. That loses. */
2049 if (GET_CODE (operand0) == REG
2050 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
2051 mark_reg_pointer (operand0, BITS_PER_UNIT);
2052 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
2053 mark_reg_pointer (temp, BITS_PER_UNIT);
2055 if (ishighonly)
2056 set = gen_rtx_SET (mode, operand0, temp);
2057 else
2058 set = gen_rtx_SET (VOIDmode,
2059 operand0,
2060 gen_rtx_LO_SUM (mode, temp, operand1));
2062 emit_insn (gen_rtx_SET (VOIDmode,
2063 temp,
2064 gen_rtx_HIGH (mode, operand1)));
2065 emit_insn (set);
2068 return 1;
2070 else if (tls_referenced_p (operand1))
2072 rtx tmp = operand1;
2073 rtx addend = NULL;
2075 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
2077 addend = XEXP (XEXP (tmp, 0), 1);
2078 tmp = XEXP (XEXP (tmp, 0), 0);
2081 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
2082 tmp = legitimize_tls_address (tmp);
2083 if (addend)
2085 tmp = gen_rtx_PLUS (mode, tmp, addend);
2086 tmp = force_operand (tmp, operands[0]);
2088 operands[1] = tmp;
2090 else if (GET_CODE (operand1) != CONST_INT
2091 || !pa_cint_ok_for_move (INTVAL (operand1)))
2093 rtx insn, temp;
2094 rtx op1 = operand1;
2095 HOST_WIDE_INT value = 0;
2096 HOST_WIDE_INT insv = 0;
2097 int insert = 0;
2099 if (GET_CODE (operand1) == CONST_INT)
2100 value = INTVAL (operand1);
2102 if (TARGET_64BIT
2103 && GET_CODE (operand1) == CONST_INT
2104 && HOST_BITS_PER_WIDE_INT > 32
2105 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
2107 HOST_WIDE_INT nval;
2109 /* Extract the low order 32 bits of the value and sign extend.
2110 If the new value is the same as the original value, we can
2111 can use the original value as-is. If the new value is
2112 different, we use it and insert the most-significant 32-bits
2113 of the original value into the final result. */
2114 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
2115 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
2116 if (value != nval)
2118 #if HOST_BITS_PER_WIDE_INT > 32
2119 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
2120 #endif
2121 insert = 1;
2122 value = nval;
2123 operand1 = GEN_INT (nval);
2127 if (reload_in_progress || reload_completed)
2128 temp = scratch_reg ? scratch_reg : operand0;
2129 else
2130 temp = gen_reg_rtx (mode);
2132 /* We don't directly split DImode constants on 32-bit targets
2133 because PLUS uses an 11-bit immediate and the insn sequence
2134 generated is not as efficient as the one using HIGH/LO_SUM. */
2135 if (GET_CODE (operand1) == CONST_INT
2136 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
2137 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2138 && !insert)
2140 /* Directly break constant into high and low parts. This
2141 provides better optimization opportunities because various
2142 passes recognize constants split with PLUS but not LO_SUM.
2143 We use a 14-bit signed low part except when the addition
2144 of 0x4000 to the high part might change the sign of the
2145 high part. */
2146 HOST_WIDE_INT low = value & 0x3fff;
2147 HOST_WIDE_INT high = value & ~ 0x3fff;
2149 if (low >= 0x2000)
2151 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2152 high += 0x2000;
2153 else
2154 high += 0x4000;
2157 low = value - high;
2159 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
2160 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2162 else
2164 emit_insn (gen_rtx_SET (VOIDmode, temp,
2165 gen_rtx_HIGH (mode, operand1)));
2166 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2169 insn = emit_move_insn (operands[0], operands[1]);
2171 /* Now insert the most significant 32 bits of the value
2172 into the register. When we don't have a second register
2173 available, it could take up to nine instructions to load
2174 a 64-bit integer constant. Prior to reload, we force
2175 constants that would take more than three instructions
2176 to load to the constant pool. During and after reload,
2177 we have to handle all possible values. */
2178 if (insert)
2180 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2181 register and the value to be inserted is outside the
2182 range that can be loaded with three depdi instructions. */
2183 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2185 operand1 = GEN_INT (insv);
2187 emit_insn (gen_rtx_SET (VOIDmode, temp,
2188 gen_rtx_HIGH (mode, operand1)));
2189 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2190 if (mode == DImode)
2191 emit_insn (gen_insvdi (operand0, GEN_INT (32),
2192 const0_rtx, temp));
2193 else
2194 emit_insn (gen_insvsi (operand0, GEN_INT (32),
2195 const0_rtx, temp));
2197 else
2199 int len = 5, pos = 27;
2201 /* Insert the bits using the depdi instruction. */
2202 while (pos >= 0)
2204 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2205 HOST_WIDE_INT sign = v5 < 0;
2207 /* Left extend the insertion. */
2208 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2209 while (pos > 0 && (insv & 1) == sign)
2211 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2212 len += 1;
2213 pos -= 1;
2216 if (mode == DImode)
2217 emit_insn (gen_insvdi (operand0, GEN_INT (len),
2218 GEN_INT (pos), GEN_INT (v5)));
2219 else
2220 emit_insn (gen_insvsi (operand0, GEN_INT (len),
2221 GEN_INT (pos), GEN_INT (v5)));
2223 len = pos > 0 && pos < 5 ? pos : 5;
2224 pos -= len;
2229 set_unique_reg_note (insn, REG_EQUAL, op1);
2231 return 1;
2234 /* Now have insn-emit do whatever it normally does. */
2235 return 0;
2238 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2239 it will need a link/runtime reloc). */
2242 pa_reloc_needed (tree exp)
2244 int reloc = 0;
2246 switch (TREE_CODE (exp))
2248 case ADDR_EXPR:
2249 return 1;
2251 case POINTER_PLUS_EXPR:
2252 case PLUS_EXPR:
2253 case MINUS_EXPR:
2254 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2255 reloc |= pa_reloc_needed (TREE_OPERAND (exp, 1));
2256 break;
2258 CASE_CONVERT:
2259 case NON_LVALUE_EXPR:
2260 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2261 break;
2263 case CONSTRUCTOR:
2265 tree value;
2266 unsigned HOST_WIDE_INT ix;
2268 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2269 if (value)
2270 reloc |= pa_reloc_needed (value);
2272 break;
2274 case ERROR_MARK:
2275 break;
2277 default:
2278 break;
2280 return reloc;
2284 /* Return the best assembler insn template
2285 for moving operands[1] into operands[0] as a fullword. */
2286 const char *
2287 pa_singlemove_string (rtx *operands)
2289 HOST_WIDE_INT intval;
2291 if (GET_CODE (operands[0]) == MEM)
2292 return "stw %r1,%0";
2293 if (GET_CODE (operands[1]) == MEM)
2294 return "ldw %1,%0";
2295 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2297 long i;
2298 REAL_VALUE_TYPE d;
2300 gcc_assert (GET_MODE (operands[1]) == SFmode);
2302 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2303 bit pattern. */
2304 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
2305 REAL_VALUE_TO_TARGET_SINGLE (d, i);
2307 operands[1] = GEN_INT (i);
2308 /* Fall through to CONST_INT case. */
2310 if (GET_CODE (operands[1]) == CONST_INT)
2312 intval = INTVAL (operands[1]);
2314 if (VAL_14_BITS_P (intval))
2315 return "ldi %1,%0";
2316 else if ((intval & 0x7ff) == 0)
2317 return "ldil L'%1,%0";
2318 else if (pa_zdepi_cint_p (intval))
2319 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2320 else
2321 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2323 return "copy %1,%0";
2327 /* Compute position (in OP[1]) and width (in OP[2])
2328 useful for copying IMM to a register using the zdepi
2329 instructions. Store the immediate value to insert in OP[0]. */
2330 static void
2331 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2333 int lsb, len;
2335 /* Find the least significant set bit in IMM. */
2336 for (lsb = 0; lsb < 32; lsb++)
2338 if ((imm & 1) != 0)
2339 break;
2340 imm >>= 1;
2343 /* Choose variants based on *sign* of the 5-bit field. */
2344 if ((imm & 0x10) == 0)
2345 len = (lsb <= 28) ? 4 : 32 - lsb;
2346 else
2348 /* Find the width of the bitstring in IMM. */
2349 for (len = 5; len < 32 - lsb; len++)
2351 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2352 break;
2355 /* Sign extend IMM as a 5-bit value. */
2356 imm = (imm & 0xf) - 0x10;
2359 op[0] = imm;
2360 op[1] = 31 - lsb;
2361 op[2] = len;
2364 /* Compute position (in OP[1]) and width (in OP[2])
2365 useful for copying IMM to a register using the depdi,z
2366 instructions. Store the immediate value to insert in OP[0]. */
2368 static void
2369 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2371 int lsb, len, maxlen;
2373 maxlen = MIN (HOST_BITS_PER_WIDE_INT, 64);
2375 /* Find the least significant set bit in IMM. */
2376 for (lsb = 0; lsb < maxlen; lsb++)
2378 if ((imm & 1) != 0)
2379 break;
2380 imm >>= 1;
2383 /* Choose variants based on *sign* of the 5-bit field. */
2384 if ((imm & 0x10) == 0)
2385 len = (lsb <= maxlen - 4) ? 4 : maxlen - lsb;
2386 else
2388 /* Find the width of the bitstring in IMM. */
2389 for (len = 5; len < maxlen - lsb; len++)
2391 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2392 break;
2395 /* Extend length if host is narrow and IMM is negative. */
2396 if (HOST_BITS_PER_WIDE_INT == 32 && len == maxlen - lsb)
2397 len += 32;
2399 /* Sign extend IMM as a 5-bit value. */
2400 imm = (imm & 0xf) - 0x10;
2403 op[0] = imm;
2404 op[1] = 63 - lsb;
2405 op[2] = len;
2408 /* Output assembler code to perform a doubleword move insn
2409 with operands OPERANDS. */
2411 const char *
2412 pa_output_move_double (rtx *operands)
2414 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2415 rtx latehalf[2];
2416 rtx addreg0 = 0, addreg1 = 0;
2418 /* First classify both operands. */
2420 if (REG_P (operands[0]))
2421 optype0 = REGOP;
2422 else if (offsettable_memref_p (operands[0]))
2423 optype0 = OFFSOP;
2424 else if (GET_CODE (operands[0]) == MEM)
2425 optype0 = MEMOP;
2426 else
2427 optype0 = RNDOP;
2429 if (REG_P (operands[1]))
2430 optype1 = REGOP;
2431 else if (CONSTANT_P (operands[1]))
2432 optype1 = CNSTOP;
2433 else if (offsettable_memref_p (operands[1]))
2434 optype1 = OFFSOP;
2435 else if (GET_CODE (operands[1]) == MEM)
2436 optype1 = MEMOP;
2437 else
2438 optype1 = RNDOP;
2440 /* Check for the cases that the operand constraints are not
2441 supposed to allow to happen. */
2442 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2444 /* Handle copies between general and floating registers. */
2446 if (optype0 == REGOP && optype1 == REGOP
2447 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2449 if (FP_REG_P (operands[0]))
2451 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2452 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2453 return "{fldds|fldd} -16(%%sp),%0";
2455 else
2457 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2458 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2459 return "{ldws|ldw} -12(%%sp),%R0";
2463 /* Handle auto decrementing and incrementing loads and stores
2464 specifically, since the structure of the function doesn't work
2465 for them without major modification. Do it better when we learn
2466 this port about the general inc/dec addressing of PA.
2467 (This was written by tege. Chide him if it doesn't work.) */
2469 if (optype0 == MEMOP)
2471 /* We have to output the address syntax ourselves, since print_operand
2472 doesn't deal with the addresses we want to use. Fix this later. */
2474 rtx addr = XEXP (operands[0], 0);
2475 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2477 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2479 operands[0] = XEXP (addr, 0);
2480 gcc_assert (GET_CODE (operands[1]) == REG
2481 && GET_CODE (operands[0]) == REG);
2483 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2485 /* No overlap between high target register and address
2486 register. (We do this in a non-obvious way to
2487 save a register file writeback) */
2488 if (GET_CODE (addr) == POST_INC)
2489 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2490 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2492 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2494 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2496 operands[0] = XEXP (addr, 0);
2497 gcc_assert (GET_CODE (operands[1]) == REG
2498 && GET_CODE (operands[0]) == REG);
2500 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2501 /* No overlap between high target register and address
2502 register. (We do this in a non-obvious way to save a
2503 register file writeback) */
2504 if (GET_CODE (addr) == PRE_INC)
2505 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2506 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2509 if (optype1 == MEMOP)
2511 /* We have to output the address syntax ourselves, since print_operand
2512 doesn't deal with the addresses we want to use. Fix this later. */
2514 rtx addr = XEXP (operands[1], 0);
2515 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2517 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2519 operands[1] = XEXP (addr, 0);
2520 gcc_assert (GET_CODE (operands[0]) == REG
2521 && GET_CODE (operands[1]) == REG);
2523 if (!reg_overlap_mentioned_p (high_reg, addr))
2525 /* No overlap between high target register and address
2526 register. (We do this in a non-obvious way to
2527 save a register file writeback) */
2528 if (GET_CODE (addr) == POST_INC)
2529 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2530 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2532 else
2534 /* This is an undefined situation. We should load into the
2535 address register *and* update that register. Probably
2536 we don't need to handle this at all. */
2537 if (GET_CODE (addr) == POST_INC)
2538 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2539 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2542 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2544 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2546 operands[1] = XEXP (addr, 0);
2547 gcc_assert (GET_CODE (operands[0]) == REG
2548 && GET_CODE (operands[1]) == REG);
2550 if (!reg_overlap_mentioned_p (high_reg, addr))
2552 /* No overlap between high target register and address
2553 register. (We do this in a non-obvious way to
2554 save a register file writeback) */
2555 if (GET_CODE (addr) == PRE_INC)
2556 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2557 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2559 else
2561 /* This is an undefined situation. We should load into the
2562 address register *and* update that register. Probably
2563 we don't need to handle this at all. */
2564 if (GET_CODE (addr) == PRE_INC)
2565 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2566 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2569 else if (GET_CODE (addr) == PLUS
2570 && GET_CODE (XEXP (addr, 0)) == MULT)
2572 rtx xoperands[4];
2573 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2575 if (!reg_overlap_mentioned_p (high_reg, addr))
2577 xoperands[0] = high_reg;
2578 xoperands[1] = XEXP (addr, 1);
2579 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2580 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2581 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2582 xoperands);
2583 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2585 else
2587 xoperands[0] = high_reg;
2588 xoperands[1] = XEXP (addr, 1);
2589 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2590 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2591 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2592 xoperands);
2593 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2598 /* If an operand is an unoffsettable memory ref, find a register
2599 we can increment temporarily to make it refer to the second word. */
2601 if (optype0 == MEMOP)
2602 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2604 if (optype1 == MEMOP)
2605 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2607 /* Ok, we can do one word at a time.
2608 Normally we do the low-numbered word first.
2610 In either case, set up in LATEHALF the operands to use
2611 for the high-numbered word and in some cases alter the
2612 operands in OPERANDS to be suitable for the low-numbered word. */
2614 if (optype0 == REGOP)
2615 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2616 else if (optype0 == OFFSOP)
2617 latehalf[0] = adjust_address_nv (operands[0], SImode, 4);
2618 else
2619 latehalf[0] = operands[0];
2621 if (optype1 == REGOP)
2622 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2623 else if (optype1 == OFFSOP)
2624 latehalf[1] = adjust_address_nv (operands[1], SImode, 4);
2625 else if (optype1 == CNSTOP)
2626 split_double (operands[1], &operands[1], &latehalf[1]);
2627 else
2628 latehalf[1] = operands[1];
2630 /* If the first move would clobber the source of the second one,
2631 do them in the other order.
2633 This can happen in two cases:
2635 mem -> register where the first half of the destination register
2636 is the same register used in the memory's address. Reload
2637 can create such insns.
2639 mem in this case will be either register indirect or register
2640 indirect plus a valid offset.
2642 register -> register move where REGNO(dst) == REGNO(src + 1)
2643 someone (Tim/Tege?) claimed this can happen for parameter loads.
2645 Handle mem -> register case first. */
2646 if (optype0 == REGOP
2647 && (optype1 == MEMOP || optype1 == OFFSOP)
2648 && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
2649 operands[1], 0))
2651 /* Do the late half first. */
2652 if (addreg1)
2653 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2654 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2656 /* Then clobber. */
2657 if (addreg1)
2658 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2659 return pa_singlemove_string (operands);
2662 /* Now handle register -> register case. */
2663 if (optype0 == REGOP && optype1 == REGOP
2664 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2666 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2667 return pa_singlemove_string (operands);
2670 /* Normal case: do the two words, low-numbered first. */
2672 output_asm_insn (pa_singlemove_string (operands), operands);
2674 /* Make any unoffsettable addresses point at high-numbered word. */
2675 if (addreg0)
2676 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2677 if (addreg1)
2678 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2680 /* Do that word. */
2681 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2683 /* Undo the adds we just did. */
2684 if (addreg0)
2685 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2686 if (addreg1)
2687 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2689 return "";
2692 const char *
2693 pa_output_fp_move_double (rtx *operands)
2695 if (FP_REG_P (operands[0]))
2697 if (FP_REG_P (operands[1])
2698 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2699 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2700 else
2701 output_asm_insn ("fldd%F1 %1,%0", operands);
2703 else if (FP_REG_P (operands[1]))
2705 output_asm_insn ("fstd%F0 %1,%0", operands);
2707 else
2709 rtx xoperands[2];
2711 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2713 /* This is a pain. You have to be prepared to deal with an
2714 arbitrary address here including pre/post increment/decrement.
2716 so avoid this in the MD. */
2717 gcc_assert (GET_CODE (operands[0]) == REG);
2719 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2720 xoperands[0] = operands[0];
2721 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2723 return "";
2726 /* Return a REG that occurs in ADDR with coefficient 1.
2727 ADDR can be effectively incremented by incrementing REG. */
2729 static rtx
2730 find_addr_reg (rtx addr)
2732 while (GET_CODE (addr) == PLUS)
2734 if (GET_CODE (XEXP (addr, 0)) == REG)
2735 addr = XEXP (addr, 0);
2736 else if (GET_CODE (XEXP (addr, 1)) == REG)
2737 addr = XEXP (addr, 1);
2738 else if (CONSTANT_P (XEXP (addr, 0)))
2739 addr = XEXP (addr, 1);
2740 else if (CONSTANT_P (XEXP (addr, 1)))
2741 addr = XEXP (addr, 0);
2742 else
2743 gcc_unreachable ();
2745 gcc_assert (GET_CODE (addr) == REG);
2746 return addr;
2749 /* Emit code to perform a block move.
2751 OPERANDS[0] is the destination pointer as a REG, clobbered.
2752 OPERANDS[1] is the source pointer as a REG, clobbered.
2753 OPERANDS[2] is a register for temporary storage.
2754 OPERANDS[3] is a register for temporary storage.
2755 OPERANDS[4] is the size as a CONST_INT
2756 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2757 OPERANDS[6] is another temporary register. */
2759 const char *
2760 pa_output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2762 int align = INTVAL (operands[5]);
2763 unsigned long n_bytes = INTVAL (operands[4]);
2765 /* We can't move more than a word at a time because the PA
2766 has no longer integer move insns. (Could use fp mem ops?) */
2767 if (align > (TARGET_64BIT ? 8 : 4))
2768 align = (TARGET_64BIT ? 8 : 4);
2770 /* Note that we know each loop below will execute at least twice
2771 (else we would have open-coded the copy). */
2772 switch (align)
2774 case 8:
2775 /* Pre-adjust the loop counter. */
2776 operands[4] = GEN_INT (n_bytes - 16);
2777 output_asm_insn ("ldi %4,%2", operands);
2779 /* Copying loop. */
2780 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2781 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2782 output_asm_insn ("std,ma %3,8(%0)", operands);
2783 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2784 output_asm_insn ("std,ma %6,8(%0)", operands);
2786 /* Handle the residual. There could be up to 7 bytes of
2787 residual to copy! */
2788 if (n_bytes % 16 != 0)
2790 operands[4] = GEN_INT (n_bytes % 8);
2791 if (n_bytes % 16 >= 8)
2792 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2793 if (n_bytes % 8 != 0)
2794 output_asm_insn ("ldd 0(%1),%6", operands);
2795 if (n_bytes % 16 >= 8)
2796 output_asm_insn ("std,ma %3,8(%0)", operands);
2797 if (n_bytes % 8 != 0)
2798 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2800 return "";
2802 case 4:
2803 /* Pre-adjust the loop counter. */
2804 operands[4] = GEN_INT (n_bytes - 8);
2805 output_asm_insn ("ldi %4,%2", operands);
2807 /* Copying loop. */
2808 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2809 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2810 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2811 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2812 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2814 /* Handle the residual. There could be up to 7 bytes of
2815 residual to copy! */
2816 if (n_bytes % 8 != 0)
2818 operands[4] = GEN_INT (n_bytes % 4);
2819 if (n_bytes % 8 >= 4)
2820 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2821 if (n_bytes % 4 != 0)
2822 output_asm_insn ("ldw 0(%1),%6", operands);
2823 if (n_bytes % 8 >= 4)
2824 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2825 if (n_bytes % 4 != 0)
2826 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2828 return "";
2830 case 2:
2831 /* Pre-adjust the loop counter. */
2832 operands[4] = GEN_INT (n_bytes - 4);
2833 output_asm_insn ("ldi %4,%2", operands);
2835 /* Copying loop. */
2836 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2837 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2838 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2839 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2840 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2842 /* Handle the residual. */
2843 if (n_bytes % 4 != 0)
2845 if (n_bytes % 4 >= 2)
2846 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2847 if (n_bytes % 2 != 0)
2848 output_asm_insn ("ldb 0(%1),%6", operands);
2849 if (n_bytes % 4 >= 2)
2850 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2851 if (n_bytes % 2 != 0)
2852 output_asm_insn ("stb %6,0(%0)", operands);
2854 return "";
2856 case 1:
2857 /* Pre-adjust the loop counter. */
2858 operands[4] = GEN_INT (n_bytes - 2);
2859 output_asm_insn ("ldi %4,%2", operands);
2861 /* Copying loop. */
2862 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2863 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2864 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2865 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2866 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2868 /* Handle the residual. */
2869 if (n_bytes % 2 != 0)
2871 output_asm_insn ("ldb 0(%1),%3", operands);
2872 output_asm_insn ("stb %3,0(%0)", operands);
2874 return "";
2876 default:
2877 gcc_unreachable ();
2881 /* Count the number of insns necessary to handle this block move.
2883 Basic structure is the same as emit_block_move, except that we
2884 count insns rather than emit them. */
2886 static int
2887 compute_movmem_length (rtx insn)
2889 rtx pat = PATTERN (insn);
2890 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2891 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2892 unsigned int n_insns = 0;
2894 /* We can't move more than four bytes at a time because the PA
2895 has no longer integer move insns. (Could use fp mem ops?) */
2896 if (align > (TARGET_64BIT ? 8 : 4))
2897 align = (TARGET_64BIT ? 8 : 4);
2899 /* The basic copying loop. */
2900 n_insns = 6;
2902 /* Residuals. */
2903 if (n_bytes % (2 * align) != 0)
2905 if ((n_bytes % (2 * align)) >= align)
2906 n_insns += 2;
2908 if ((n_bytes % align) != 0)
2909 n_insns += 2;
2912 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2913 return n_insns * 4;
2916 /* Emit code to perform a block clear.
2918 OPERANDS[0] is the destination pointer as a REG, clobbered.
2919 OPERANDS[1] is a register for temporary storage.
2920 OPERANDS[2] is the size as a CONST_INT
2921 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2923 const char *
2924 pa_output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2926 int align = INTVAL (operands[3]);
2927 unsigned long n_bytes = INTVAL (operands[2]);
2929 /* We can't clear more than a word at a time because the PA
2930 has no longer integer move insns. */
2931 if (align > (TARGET_64BIT ? 8 : 4))
2932 align = (TARGET_64BIT ? 8 : 4);
2934 /* Note that we know each loop below will execute at least twice
2935 (else we would have open-coded the copy). */
2936 switch (align)
2938 case 8:
2939 /* Pre-adjust the loop counter. */
2940 operands[2] = GEN_INT (n_bytes - 16);
2941 output_asm_insn ("ldi %2,%1", operands);
2943 /* Loop. */
2944 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2945 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2946 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2948 /* Handle the residual. There could be up to 7 bytes of
2949 residual to copy! */
2950 if (n_bytes % 16 != 0)
2952 operands[2] = GEN_INT (n_bytes % 8);
2953 if (n_bytes % 16 >= 8)
2954 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2955 if (n_bytes % 8 != 0)
2956 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2958 return "";
2960 case 4:
2961 /* Pre-adjust the loop counter. */
2962 operands[2] = GEN_INT (n_bytes - 8);
2963 output_asm_insn ("ldi %2,%1", operands);
2965 /* Loop. */
2966 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2967 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2968 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2970 /* Handle the residual. There could be up to 7 bytes of
2971 residual to copy! */
2972 if (n_bytes % 8 != 0)
2974 operands[2] = GEN_INT (n_bytes % 4);
2975 if (n_bytes % 8 >= 4)
2976 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2977 if (n_bytes % 4 != 0)
2978 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
2980 return "";
2982 case 2:
2983 /* Pre-adjust the loop counter. */
2984 operands[2] = GEN_INT (n_bytes - 4);
2985 output_asm_insn ("ldi %2,%1", operands);
2987 /* Loop. */
2988 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2989 output_asm_insn ("addib,>= -4,%1,.-4", operands);
2990 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2992 /* Handle the residual. */
2993 if (n_bytes % 4 != 0)
2995 if (n_bytes % 4 >= 2)
2996 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2997 if (n_bytes % 2 != 0)
2998 output_asm_insn ("stb %%r0,0(%0)", operands);
3000 return "";
3002 case 1:
3003 /* Pre-adjust the loop counter. */
3004 operands[2] = GEN_INT (n_bytes - 2);
3005 output_asm_insn ("ldi %2,%1", operands);
3007 /* Loop. */
3008 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3009 output_asm_insn ("addib,>= -2,%1,.-4", operands);
3010 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3012 /* Handle the residual. */
3013 if (n_bytes % 2 != 0)
3014 output_asm_insn ("stb %%r0,0(%0)", operands);
3016 return "";
3018 default:
3019 gcc_unreachable ();
3023 /* Count the number of insns necessary to handle this block move.
3025 Basic structure is the same as emit_block_move, except that we
3026 count insns rather than emit them. */
3028 static int
3029 compute_clrmem_length (rtx insn)
3031 rtx pat = PATTERN (insn);
3032 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
3033 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
3034 unsigned int n_insns = 0;
3036 /* We can't clear more than a word at a time because the PA
3037 has no longer integer move insns. */
3038 if (align > (TARGET_64BIT ? 8 : 4))
3039 align = (TARGET_64BIT ? 8 : 4);
3041 /* The basic loop. */
3042 n_insns = 4;
3044 /* Residuals. */
3045 if (n_bytes % (2 * align) != 0)
3047 if ((n_bytes % (2 * align)) >= align)
3048 n_insns++;
3050 if ((n_bytes % align) != 0)
3051 n_insns++;
3054 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3055 return n_insns * 4;
3059 const char *
3060 pa_output_and (rtx *operands)
3062 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3064 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3065 int ls0, ls1, ms0, p, len;
3067 for (ls0 = 0; ls0 < 32; ls0++)
3068 if ((mask & (1 << ls0)) == 0)
3069 break;
3071 for (ls1 = ls0; ls1 < 32; ls1++)
3072 if ((mask & (1 << ls1)) != 0)
3073 break;
3075 for (ms0 = ls1; ms0 < 32; ms0++)
3076 if ((mask & (1 << ms0)) == 0)
3077 break;
3079 gcc_assert (ms0 == 32);
3081 if (ls1 == 32)
3083 len = ls0;
3085 gcc_assert (len);
3087 operands[2] = GEN_INT (len);
3088 return "{extru|extrw,u} %1,31,%2,%0";
3090 else
3092 /* We could use this `depi' for the case above as well, but `depi'
3093 requires one more register file access than an `extru'. */
3095 p = 31 - ls0;
3096 len = ls1 - ls0;
3098 operands[2] = GEN_INT (p);
3099 operands[3] = GEN_INT (len);
3100 return "{depi|depwi} 0,%2,%3,%0";
3103 else
3104 return "and %1,%2,%0";
3107 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3108 storing the result in operands[0]. */
3109 const char *
3110 pa_output_64bit_and (rtx *operands)
3112 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3114 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3115 int ls0, ls1, ms0, p, len;
3117 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
3118 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
3119 break;
3121 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
3122 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
3123 break;
3125 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
3126 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
3127 break;
3129 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
3131 if (ls1 == HOST_BITS_PER_WIDE_INT)
3133 len = ls0;
3135 gcc_assert (len);
3137 operands[2] = GEN_INT (len);
3138 return "extrd,u %1,63,%2,%0";
3140 else
3142 /* We could use this `depi' for the case above as well, but `depi'
3143 requires one more register file access than an `extru'. */
3145 p = 63 - ls0;
3146 len = ls1 - ls0;
3148 operands[2] = GEN_INT (p);
3149 operands[3] = GEN_INT (len);
3150 return "depdi 0,%2,%3,%0";
3153 else
3154 return "and %1,%2,%0";
3157 const char *
3158 pa_output_ior (rtx *operands)
3160 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3161 int bs0, bs1, p, len;
3163 if (INTVAL (operands[2]) == 0)
3164 return "copy %1,%0";
3166 for (bs0 = 0; bs0 < 32; bs0++)
3167 if ((mask & (1 << bs0)) != 0)
3168 break;
3170 for (bs1 = bs0; bs1 < 32; bs1++)
3171 if ((mask & (1 << bs1)) == 0)
3172 break;
3174 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3176 p = 31 - bs0;
3177 len = bs1 - bs0;
3179 operands[2] = GEN_INT (p);
3180 operands[3] = GEN_INT (len);
3181 return "{depi|depwi} -1,%2,%3,%0";
3184 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3185 storing the result in operands[0]. */
3186 const char *
3187 pa_output_64bit_ior (rtx *operands)
3189 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3190 int bs0, bs1, p, len;
3192 if (INTVAL (operands[2]) == 0)
3193 return "copy %1,%0";
3195 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3196 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3197 break;
3199 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3200 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3201 break;
3203 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
3204 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3206 p = 63 - bs0;
3207 len = bs1 - bs0;
3209 operands[2] = GEN_INT (p);
3210 operands[3] = GEN_INT (len);
3211 return "depdi -1,%2,%3,%0";
3214 /* Target hook for assembling integer objects. This code handles
3215 aligned SI and DI integers specially since function references
3216 must be preceded by P%. */
3218 static bool
3219 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3221 if (size == UNITS_PER_WORD
3222 && aligned_p
3223 && function_label_operand (x, VOIDmode))
3225 fputs (size == 8? "\t.dword\t" : "\t.word\t", asm_out_file);
3227 /* We don't want an OPD when generating fast indirect calls. */
3228 if (!TARGET_FAST_INDIRECT_CALLS)
3229 fputs ("P%", asm_out_file);
3231 output_addr_const (asm_out_file, x);
3232 fputc ('\n', asm_out_file);
3233 return true;
3235 return default_assemble_integer (x, size, aligned_p);
3238 /* Output an ascii string. */
3239 void
3240 pa_output_ascii (FILE *file, const char *p, int size)
3242 int i;
3243 int chars_output;
3244 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3246 /* The HP assembler can only take strings of 256 characters at one
3247 time. This is a limitation on input line length, *not* the
3248 length of the string. Sigh. Even worse, it seems that the
3249 restriction is in number of input characters (see \xnn &
3250 \whatever). So we have to do this very carefully. */
3252 fputs ("\t.STRING \"", file);
3254 chars_output = 0;
3255 for (i = 0; i < size; i += 4)
3257 int co = 0;
3258 int io = 0;
3259 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3261 register unsigned int c = (unsigned char) p[i + io];
3263 if (c == '\"' || c == '\\')
3264 partial_output[co++] = '\\';
3265 if (c >= ' ' && c < 0177)
3266 partial_output[co++] = c;
3267 else
3269 unsigned int hexd;
3270 partial_output[co++] = '\\';
3271 partial_output[co++] = 'x';
3272 hexd = c / 16 - 0 + '0';
3273 if (hexd > '9')
3274 hexd -= '9' - 'a' + 1;
3275 partial_output[co++] = hexd;
3276 hexd = c % 16 - 0 + '0';
3277 if (hexd > '9')
3278 hexd -= '9' - 'a' + 1;
3279 partial_output[co++] = hexd;
3282 if (chars_output + co > 243)
3284 fputs ("\"\n\t.STRING \"", file);
3285 chars_output = 0;
3287 fwrite (partial_output, 1, (size_t) co, file);
3288 chars_output += co;
3289 co = 0;
3291 fputs ("\"\n", file);
3294 /* Try to rewrite floating point comparisons & branches to avoid
3295 useless add,tr insns.
3297 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3298 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3299 first attempt to remove useless add,tr insns. It is zero
3300 for the second pass as reorg sometimes leaves bogus REG_DEAD
3301 notes lying around.
3303 When CHECK_NOTES is zero we can only eliminate add,tr insns
3304 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3305 instructions. */
3306 static void
3307 remove_useless_addtr_insns (int check_notes)
3309 rtx_insn *insn;
3310 static int pass = 0;
3312 /* This is fairly cheap, so always run it when optimizing. */
3313 if (optimize > 0)
3315 int fcmp_count = 0;
3316 int fbranch_count = 0;
3318 /* Walk all the insns in this function looking for fcmp & fbranch
3319 instructions. Keep track of how many of each we find. */
3320 for (insn = get_insns (); insn; insn = next_insn (insn))
3322 rtx tmp;
3324 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3325 if (! NONJUMP_INSN_P (insn) && ! JUMP_P (insn))
3326 continue;
3328 tmp = PATTERN (insn);
3330 /* It must be a set. */
3331 if (GET_CODE (tmp) != SET)
3332 continue;
3334 /* If the destination is CCFP, then we've found an fcmp insn. */
3335 tmp = SET_DEST (tmp);
3336 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3338 fcmp_count++;
3339 continue;
3342 tmp = PATTERN (insn);
3343 /* If this is an fbranch instruction, bump the fbranch counter. */
3344 if (GET_CODE (tmp) == SET
3345 && SET_DEST (tmp) == pc_rtx
3346 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3347 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3348 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3349 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3351 fbranch_count++;
3352 continue;
3357 /* Find all floating point compare + branch insns. If possible,
3358 reverse the comparison & the branch to avoid add,tr insns. */
3359 for (insn = get_insns (); insn; insn = next_insn (insn))
3361 rtx tmp;
3362 rtx_insn *next;
3364 /* Ignore anything that isn't an INSN. */
3365 if (! NONJUMP_INSN_P (insn))
3366 continue;
3368 tmp = PATTERN (insn);
3370 /* It must be a set. */
3371 if (GET_CODE (tmp) != SET)
3372 continue;
3374 /* The destination must be CCFP, which is register zero. */
3375 tmp = SET_DEST (tmp);
3376 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3377 continue;
3379 /* INSN should be a set of CCFP.
3381 See if the result of this insn is used in a reversed FP
3382 conditional branch. If so, reverse our condition and
3383 the branch. Doing so avoids useless add,tr insns. */
3384 next = next_insn (insn);
3385 while (next)
3387 /* Jumps, calls and labels stop our search. */
3388 if (JUMP_P (next) || CALL_P (next) || LABEL_P (next))
3389 break;
3391 /* As does another fcmp insn. */
3392 if (NONJUMP_INSN_P (next)
3393 && GET_CODE (PATTERN (next)) == SET
3394 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3395 && REGNO (SET_DEST (PATTERN (next))) == 0)
3396 break;
3398 next = next_insn (next);
3401 /* Is NEXT_INSN a branch? */
3402 if (next && JUMP_P (next))
3404 rtx pattern = PATTERN (next);
3406 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3407 and CCFP dies, then reverse our conditional and the branch
3408 to avoid the add,tr. */
3409 if (GET_CODE (pattern) == SET
3410 && SET_DEST (pattern) == pc_rtx
3411 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3412 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3413 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3414 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3415 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3416 && (fcmp_count == fbranch_count
3417 || (check_notes
3418 && find_regno_note (next, REG_DEAD, 0))))
3420 /* Reverse the branch. */
3421 tmp = XEXP (SET_SRC (pattern), 1);
3422 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3423 XEXP (SET_SRC (pattern), 2) = tmp;
3424 INSN_CODE (next) = -1;
3426 /* Reverse our condition. */
3427 tmp = PATTERN (insn);
3428 PUT_CODE (XEXP (tmp, 1),
3429 (reverse_condition_maybe_unordered
3430 (GET_CODE (XEXP (tmp, 1)))));
3436 pass = !pass;
3440 /* You may have trouble believing this, but this is the 32 bit HP-PA
3441 stack layout. Wow.
3443 Offset Contents
3445 Variable arguments (optional; any number may be allocated)
3447 SP-(4*(N+9)) arg word N
3449 SP-56 arg word 5
3450 SP-52 arg word 4
3452 Fixed arguments (must be allocated; may remain unused)
3454 SP-48 arg word 3
3455 SP-44 arg word 2
3456 SP-40 arg word 1
3457 SP-36 arg word 0
3459 Frame Marker
3461 SP-32 External Data Pointer (DP)
3462 SP-28 External sr4
3463 SP-24 External/stub RP (RP')
3464 SP-20 Current RP
3465 SP-16 Static Link
3466 SP-12 Clean up
3467 SP-8 Calling Stub RP (RP'')
3468 SP-4 Previous SP
3470 Top of Frame
3472 SP-0 Stack Pointer (points to next available address)
3476 /* This function saves registers as follows. Registers marked with ' are
3477 this function's registers (as opposed to the previous function's).
3478 If a frame_pointer isn't needed, r4 is saved as a general register;
3479 the space for the frame pointer is still allocated, though, to keep
3480 things simple.
3483 Top of Frame
3485 SP (FP') Previous FP
3486 SP + 4 Alignment filler (sigh)
3487 SP + 8 Space for locals reserved here.
3491 SP + n All call saved register used.
3495 SP + o All call saved fp registers used.
3499 SP + p (SP') points to next available address.
3503 /* Global variables set by output_function_prologue(). */
3504 /* Size of frame. Need to know this to emit return insns from
3505 leaf procedures. */
3506 static HOST_WIDE_INT actual_fsize, local_fsize;
3507 static int save_fregs;
3509 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3510 Handle case where DISP > 8k by using the add_high_const patterns.
3512 Note in DISP > 8k case, we will leave the high part of the address
3513 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3515 static void
3516 store_reg (int reg, HOST_WIDE_INT disp, int base)
3518 rtx insn, dest, src, basereg;
3520 src = gen_rtx_REG (word_mode, reg);
3521 basereg = gen_rtx_REG (Pmode, base);
3522 if (VAL_14_BITS_P (disp))
3524 dest = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
3525 insn = emit_move_insn (dest, src);
3527 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3529 rtx delta = GEN_INT (disp);
3530 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3532 emit_move_insn (tmpreg, delta);
3533 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3534 if (DO_FRAME_NOTES)
3536 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3537 gen_rtx_SET (VOIDmode, tmpreg,
3538 gen_rtx_PLUS (Pmode, basereg, delta)));
3539 RTX_FRAME_RELATED_P (insn) = 1;
3541 dest = gen_rtx_MEM (word_mode, tmpreg);
3542 insn = emit_move_insn (dest, src);
3544 else
3546 rtx delta = GEN_INT (disp);
3547 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3548 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3550 emit_move_insn (tmpreg, high);
3551 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3552 insn = emit_move_insn (dest, src);
3553 if (DO_FRAME_NOTES)
3554 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3555 gen_rtx_SET (VOIDmode,
3556 gen_rtx_MEM (word_mode,
3557 gen_rtx_PLUS (word_mode,
3558 basereg,
3559 delta)),
3560 src));
3563 if (DO_FRAME_NOTES)
3564 RTX_FRAME_RELATED_P (insn) = 1;
3567 /* Emit RTL to store REG at the memory location specified by BASE and then
3568 add MOD to BASE. MOD must be <= 8k. */
3570 static void
3571 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3573 rtx insn, basereg, srcreg, delta;
3575 gcc_assert (VAL_14_BITS_P (mod));
3577 basereg = gen_rtx_REG (Pmode, base);
3578 srcreg = gen_rtx_REG (word_mode, reg);
3579 delta = GEN_INT (mod);
3581 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3582 if (DO_FRAME_NOTES)
3584 RTX_FRAME_RELATED_P (insn) = 1;
3586 /* RTX_FRAME_RELATED_P must be set on each frame related set
3587 in a parallel with more than one element. */
3588 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3589 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3593 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3594 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3595 whether to add a frame note or not.
3597 In the DISP > 8k case, we leave the high part of the address in %r1.
3598 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3600 static void
3601 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3603 rtx insn;
3605 if (VAL_14_BITS_P (disp))
3607 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3608 plus_constant (Pmode,
3609 gen_rtx_REG (Pmode, base), disp));
3611 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3613 rtx basereg = gen_rtx_REG (Pmode, base);
3614 rtx delta = GEN_INT (disp);
3615 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3617 emit_move_insn (tmpreg, delta);
3618 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3619 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3620 if (DO_FRAME_NOTES)
3621 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3622 gen_rtx_SET (VOIDmode, tmpreg,
3623 gen_rtx_PLUS (Pmode, basereg, delta)));
3625 else
3627 rtx basereg = gen_rtx_REG (Pmode, base);
3628 rtx delta = GEN_INT (disp);
3629 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3631 emit_move_insn (tmpreg,
3632 gen_rtx_PLUS (Pmode, basereg,
3633 gen_rtx_HIGH (Pmode, delta)));
3634 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3635 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3638 if (DO_FRAME_NOTES && note)
3639 RTX_FRAME_RELATED_P (insn) = 1;
3642 HOST_WIDE_INT
3643 pa_compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3645 int freg_saved = 0;
3646 int i, j;
3648 /* The code in pa_expand_prologue and pa_expand_epilogue must
3649 be consistent with the rounding and size calculation done here.
3650 Change them at the same time. */
3652 /* We do our own stack alignment. First, round the size of the
3653 stack locals up to a word boundary. */
3654 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3656 /* Space for previous frame pointer + filler. If any frame is
3657 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3658 waste some space here for the sake of HP compatibility. The
3659 first slot is only used when the frame pointer is needed. */
3660 if (size || frame_pointer_needed)
3661 size += STARTING_FRAME_OFFSET;
3663 /* If the current function calls __builtin_eh_return, then we need
3664 to allocate stack space for registers that will hold data for
3665 the exception handler. */
3666 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3668 unsigned int i;
3670 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3671 continue;
3672 size += i * UNITS_PER_WORD;
3675 /* Account for space used by the callee general register saves. */
3676 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3677 if (df_regs_ever_live_p (i))
3678 size += UNITS_PER_WORD;
3680 /* Account for space used by the callee floating point register saves. */
3681 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3682 if (df_regs_ever_live_p (i)
3683 || (!TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3685 freg_saved = 1;
3687 /* We always save both halves of the FP register, so always
3688 increment the frame size by 8 bytes. */
3689 size += 8;
3692 /* If any of the floating registers are saved, account for the
3693 alignment needed for the floating point register save block. */
3694 if (freg_saved)
3696 size = (size + 7) & ~7;
3697 if (fregs_live)
3698 *fregs_live = 1;
3701 /* The various ABIs include space for the outgoing parameters in the
3702 size of the current function's stack frame. We don't need to align
3703 for the outgoing arguments as their alignment is set by the final
3704 rounding for the frame as a whole. */
3705 size += crtl->outgoing_args_size;
3707 /* Allocate space for the fixed frame marker. This space must be
3708 allocated for any function that makes calls or allocates
3709 stack space. */
3710 if (!crtl->is_leaf || size)
3711 size += TARGET_64BIT ? 48 : 32;
3713 /* Finally, round to the preferred stack boundary. */
3714 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3715 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3718 /* Generate the assembly code for function entry. FILE is a stdio
3719 stream to output the code to. SIZE is an int: how many units of
3720 temporary storage to allocate.
3722 Refer to the array `regs_ever_live' to determine which registers to
3723 save; `regs_ever_live[I]' is nonzero if register number I is ever
3724 used in the function. This function is responsible for knowing
3725 which registers should not be saved even if used. */
3727 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3728 of memory. If any fpu reg is used in the function, we allocate
3729 such a block here, at the bottom of the frame, just in case it's needed.
3731 If this function is a leaf procedure, then we may choose not
3732 to do a "save" insn. The decision about whether or not
3733 to do this is made in regclass.c. */
3735 static void
3736 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3738 /* The function's label and associated .PROC must never be
3739 separated and must be output *after* any profiling declarations
3740 to avoid changing spaces/subspaces within a procedure. */
3741 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3742 fputs ("\t.PROC\n", file);
3744 /* pa_expand_prologue does the dirty work now. We just need
3745 to output the assembler directives which denote the start
3746 of a function. */
3747 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3748 if (crtl->is_leaf)
3749 fputs (",NO_CALLS", file);
3750 else
3751 fputs (",CALLS", file);
3752 if (rp_saved)
3753 fputs (",SAVE_RP", file);
3755 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3756 at the beginning of the frame and that it is used as the frame
3757 pointer for the frame. We do this because our current frame
3758 layout doesn't conform to that specified in the HP runtime
3759 documentation and we need a way to indicate to programs such as
3760 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3761 isn't used by HP compilers but is supported by the assembler.
3762 However, SAVE_SP is supposed to indicate that the previous stack
3763 pointer has been saved in the frame marker. */
3764 if (frame_pointer_needed)
3765 fputs (",SAVE_SP", file);
3767 /* Pass on information about the number of callee register saves
3768 performed in the prologue.
3770 The compiler is supposed to pass the highest register number
3771 saved, the assembler then has to adjust that number before
3772 entering it into the unwind descriptor (to account for any
3773 caller saved registers with lower register numbers than the
3774 first callee saved register). */
3775 if (gr_saved)
3776 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3778 if (fr_saved)
3779 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3781 fputs ("\n\t.ENTRY\n", file);
3783 remove_useless_addtr_insns (0);
3786 void
3787 pa_expand_prologue (void)
3789 int merge_sp_adjust_with_store = 0;
3790 HOST_WIDE_INT size = get_frame_size ();
3791 HOST_WIDE_INT offset;
3792 int i;
3793 rtx insn, tmpreg;
3795 gr_saved = 0;
3796 fr_saved = 0;
3797 save_fregs = 0;
3799 /* Compute total size for frame pointer, filler, locals and rounding to
3800 the next word boundary. Similar code appears in pa_compute_frame_size
3801 and must be changed in tandem with this code. */
3802 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3803 if (local_fsize || frame_pointer_needed)
3804 local_fsize += STARTING_FRAME_OFFSET;
3806 actual_fsize = pa_compute_frame_size (size, &save_fregs);
3807 if (flag_stack_usage_info)
3808 current_function_static_stack_size = actual_fsize;
3810 /* Compute a few things we will use often. */
3811 tmpreg = gen_rtx_REG (word_mode, 1);
3813 /* Save RP first. The calling conventions manual states RP will
3814 always be stored into the caller's frame at sp - 20 or sp - 16
3815 depending on which ABI is in use. */
3816 if (df_regs_ever_live_p (2) || crtl->calls_eh_return)
3818 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3819 rp_saved = true;
3821 else
3822 rp_saved = false;
3824 /* Allocate the local frame and set up the frame pointer if needed. */
3825 if (actual_fsize != 0)
3827 if (frame_pointer_needed)
3829 /* Copy the old frame pointer temporarily into %r1. Set up the
3830 new stack pointer, then store away the saved old frame pointer
3831 into the stack at sp and at the same time update the stack
3832 pointer by actual_fsize bytes. Two versions, first
3833 handles small (<8k) frames. The second handles large (>=8k)
3834 frames. */
3835 insn = emit_move_insn (tmpreg, hard_frame_pointer_rtx);
3836 if (DO_FRAME_NOTES)
3837 RTX_FRAME_RELATED_P (insn) = 1;
3839 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3840 if (DO_FRAME_NOTES)
3841 RTX_FRAME_RELATED_P (insn) = 1;
3843 if (VAL_14_BITS_P (actual_fsize))
3844 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3845 else
3847 /* It is incorrect to store the saved frame pointer at *sp,
3848 then increment sp (writes beyond the current stack boundary).
3850 So instead use stwm to store at *sp and post-increment the
3851 stack pointer as an atomic operation. Then increment sp to
3852 finish allocating the new frame. */
3853 HOST_WIDE_INT adjust1 = 8192 - 64;
3854 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3856 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3857 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3858 adjust2, 1);
3861 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3862 we need to store the previous stack pointer (frame pointer)
3863 into the frame marker on targets that use the HP unwind
3864 library. This allows the HP unwind library to be used to
3865 unwind GCC frames. However, we are not fully compatible
3866 with the HP library because our frame layout differs from
3867 that specified in the HP runtime specification.
3869 We don't want a frame note on this instruction as the frame
3870 marker moves during dynamic stack allocation.
3872 This instruction also serves as a blockage to prevent
3873 register spills from being scheduled before the stack
3874 pointer is raised. This is necessary as we store
3875 registers using the frame pointer as a base register,
3876 and the frame pointer is set before sp is raised. */
3877 if (TARGET_HPUX_UNWIND_LIBRARY)
3879 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3880 GEN_INT (TARGET_64BIT ? -8 : -4));
3882 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3883 hard_frame_pointer_rtx);
3885 else
3886 emit_insn (gen_blockage ());
3888 /* no frame pointer needed. */
3889 else
3891 /* In some cases we can perform the first callee register save
3892 and allocating the stack frame at the same time. If so, just
3893 make a note of it and defer allocating the frame until saving
3894 the callee registers. */
3895 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3896 merge_sp_adjust_with_store = 1;
3897 /* Can not optimize. Adjust the stack frame by actual_fsize
3898 bytes. */
3899 else
3900 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3901 actual_fsize, 1);
3905 /* Normal register save.
3907 Do not save the frame pointer in the frame_pointer_needed case. It
3908 was done earlier. */
3909 if (frame_pointer_needed)
3911 offset = local_fsize;
3913 /* Saving the EH return data registers in the frame is the simplest
3914 way to get the frame unwind information emitted. We put them
3915 just before the general registers. */
3916 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3918 unsigned int i, regno;
3920 for (i = 0; ; ++i)
3922 regno = EH_RETURN_DATA_REGNO (i);
3923 if (regno == INVALID_REGNUM)
3924 break;
3926 store_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
3927 offset += UNITS_PER_WORD;
3931 for (i = 18; i >= 4; i--)
3932 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3934 store_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
3935 offset += UNITS_PER_WORD;
3936 gr_saved++;
3938 /* Account for %r3 which is saved in a special place. */
3939 gr_saved++;
3941 /* No frame pointer needed. */
3942 else
3944 offset = local_fsize - actual_fsize;
3946 /* Saving the EH return data registers in the frame is the simplest
3947 way to get the frame unwind information emitted. */
3948 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3950 unsigned int i, regno;
3952 for (i = 0; ; ++i)
3954 regno = EH_RETURN_DATA_REGNO (i);
3955 if (regno == INVALID_REGNUM)
3956 break;
3958 /* If merge_sp_adjust_with_store is nonzero, then we can
3959 optimize the first save. */
3960 if (merge_sp_adjust_with_store)
3962 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
3963 merge_sp_adjust_with_store = 0;
3965 else
3966 store_reg (regno, offset, STACK_POINTER_REGNUM);
3967 offset += UNITS_PER_WORD;
3971 for (i = 18; i >= 3; i--)
3972 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3974 /* If merge_sp_adjust_with_store is nonzero, then we can
3975 optimize the first GR save. */
3976 if (merge_sp_adjust_with_store)
3978 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
3979 merge_sp_adjust_with_store = 0;
3981 else
3982 store_reg (i, offset, STACK_POINTER_REGNUM);
3983 offset += UNITS_PER_WORD;
3984 gr_saved++;
3987 /* If we wanted to merge the SP adjustment with a GR save, but we never
3988 did any GR saves, then just emit the adjustment here. */
3989 if (merge_sp_adjust_with_store)
3990 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3991 actual_fsize, 1);
3994 /* The hppa calling conventions say that %r19, the pic offset
3995 register, is saved at sp - 32 (in this function's frame)
3996 when generating PIC code. FIXME: What is the correct thing
3997 to do for functions which make no calls and allocate no
3998 frame? Do we need to allocate a frame, or can we just omit
3999 the save? For now we'll just omit the save.
4001 We don't want a note on this insn as the frame marker can
4002 move if there is a dynamic stack allocation. */
4003 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
4005 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
4007 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
4011 /* Align pointer properly (doubleword boundary). */
4012 offset = (offset + 7) & ~7;
4014 /* Floating point register store. */
4015 if (save_fregs)
4017 rtx base;
4019 /* First get the frame or stack pointer to the start of the FP register
4020 save area. */
4021 if (frame_pointer_needed)
4023 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4024 base = hard_frame_pointer_rtx;
4026 else
4028 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4029 base = stack_pointer_rtx;
4032 /* Now actually save the FP registers. */
4033 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4035 if (df_regs_ever_live_p (i)
4036 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4038 rtx addr, insn, reg;
4039 addr = gen_rtx_MEM (DFmode,
4040 gen_rtx_POST_INC (word_mode, tmpreg));
4041 reg = gen_rtx_REG (DFmode, i);
4042 insn = emit_move_insn (addr, reg);
4043 if (DO_FRAME_NOTES)
4045 RTX_FRAME_RELATED_P (insn) = 1;
4046 if (TARGET_64BIT)
4048 rtx mem = gen_rtx_MEM (DFmode,
4049 plus_constant (Pmode, base,
4050 offset));
4051 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4052 gen_rtx_SET (VOIDmode, mem, reg));
4054 else
4056 rtx meml = gen_rtx_MEM (SFmode,
4057 plus_constant (Pmode, base,
4058 offset));
4059 rtx memr = gen_rtx_MEM (SFmode,
4060 plus_constant (Pmode, base,
4061 offset + 4));
4062 rtx regl = gen_rtx_REG (SFmode, i);
4063 rtx regr = gen_rtx_REG (SFmode, i + 1);
4064 rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
4065 rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
4066 rtvec vec;
4068 RTX_FRAME_RELATED_P (setl) = 1;
4069 RTX_FRAME_RELATED_P (setr) = 1;
4070 vec = gen_rtvec (2, setl, setr);
4071 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4072 gen_rtx_SEQUENCE (VOIDmode, vec));
4075 offset += GET_MODE_SIZE (DFmode);
4076 fr_saved++;
4082 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4083 Handle case where DISP > 8k by using the add_high_const patterns. */
4085 static void
4086 load_reg (int reg, HOST_WIDE_INT disp, int base)
4088 rtx dest = gen_rtx_REG (word_mode, reg);
4089 rtx basereg = gen_rtx_REG (Pmode, base);
4090 rtx src;
4092 if (VAL_14_BITS_P (disp))
4093 src = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
4094 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
4096 rtx delta = GEN_INT (disp);
4097 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4099 emit_move_insn (tmpreg, delta);
4100 if (TARGET_DISABLE_INDEXING)
4102 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4103 src = gen_rtx_MEM (word_mode, tmpreg);
4105 else
4106 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4108 else
4110 rtx delta = GEN_INT (disp);
4111 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
4112 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4114 emit_move_insn (tmpreg, high);
4115 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
4118 emit_move_insn (dest, src);
4121 /* Update the total code bytes output to the text section. */
4123 static void
4124 update_total_code_bytes (unsigned int nbytes)
4126 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
4127 && !IN_NAMED_SECTION_P (cfun->decl))
4129 unsigned int old_total = total_code_bytes;
4131 total_code_bytes += nbytes;
4133 /* Be prepared to handle overflows. */
4134 if (old_total > total_code_bytes)
4135 total_code_bytes = UINT_MAX;
4139 /* This function generates the assembly code for function exit.
4140 Args are as for output_function_prologue ().
4142 The function epilogue should not depend on the current stack
4143 pointer! It should use the frame pointer only. This is mandatory
4144 because of alloca; we also take advantage of it to omit stack
4145 adjustments before returning. */
4147 static void
4148 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4150 rtx_insn *insn = get_last_insn ();
4151 bool extra_nop;
4153 /* pa_expand_epilogue does the dirty work now. We just need
4154 to output the assembler directives which denote the end
4155 of a function.
4157 To make debuggers happy, emit a nop if the epilogue was completely
4158 eliminated due to a volatile call as the last insn in the
4159 current function. That way the return address (in %r2) will
4160 always point to a valid instruction in the current function. */
4162 /* Get the last real insn. */
4163 if (NOTE_P (insn))
4164 insn = prev_real_insn (insn);
4166 /* If it is a sequence, then look inside. */
4167 if (insn && NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
4168 insn = as_a <rtx_sequence *> (PATTERN (insn))-> insn (0);
4170 /* If insn is a CALL_INSN, then it must be a call to a volatile
4171 function (otherwise there would be epilogue insns). */
4172 if (insn && CALL_P (insn))
4174 fputs ("\tnop\n", file);
4175 extra_nop = true;
4177 else
4178 extra_nop = false;
4180 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4182 if (TARGET_SOM && TARGET_GAS)
4184 /* We are done with this subspace except possibly for some additional
4185 debug information. Forget that we are in this subspace to ensure
4186 that the next function is output in its own subspace. */
4187 in_section = NULL;
4188 cfun->machine->in_nsubspa = 2;
4191 /* Thunks do their own insn accounting. */
4192 if (cfun->is_thunk)
4193 return;
4195 if (INSN_ADDRESSES_SET_P ())
4197 last_address = extra_nop ? 4 : 0;
4198 insn = get_last_nonnote_insn ();
4199 if (insn)
4201 last_address += INSN_ADDRESSES (INSN_UID (insn));
4202 if (INSN_P (insn))
4203 last_address += insn_default_length (insn);
4205 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4206 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4208 else
4209 last_address = UINT_MAX;
4211 /* Finally, update the total number of code bytes output so far. */
4212 update_total_code_bytes (last_address);
4215 void
4216 pa_expand_epilogue (void)
4218 rtx tmpreg;
4219 HOST_WIDE_INT offset;
4220 HOST_WIDE_INT ret_off = 0;
4221 int i;
4222 int merge_sp_adjust_with_load = 0;
4224 /* We will use this often. */
4225 tmpreg = gen_rtx_REG (word_mode, 1);
4227 /* Try to restore RP early to avoid load/use interlocks when
4228 RP gets used in the return (bv) instruction. This appears to still
4229 be necessary even when we schedule the prologue and epilogue. */
4230 if (rp_saved)
4232 ret_off = TARGET_64BIT ? -16 : -20;
4233 if (frame_pointer_needed)
4235 load_reg (2, ret_off, HARD_FRAME_POINTER_REGNUM);
4236 ret_off = 0;
4238 else
4240 /* No frame pointer, and stack is smaller than 8k. */
4241 if (VAL_14_BITS_P (ret_off - actual_fsize))
4243 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4244 ret_off = 0;
4249 /* General register restores. */
4250 if (frame_pointer_needed)
4252 offset = local_fsize;
4254 /* If the current function calls __builtin_eh_return, then we need
4255 to restore the saved EH data registers. */
4256 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4258 unsigned int i, regno;
4260 for (i = 0; ; ++i)
4262 regno = EH_RETURN_DATA_REGNO (i);
4263 if (regno == INVALID_REGNUM)
4264 break;
4266 load_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4267 offset += UNITS_PER_WORD;
4271 for (i = 18; i >= 4; i--)
4272 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4274 load_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4275 offset += UNITS_PER_WORD;
4278 else
4280 offset = local_fsize - actual_fsize;
4282 /* If the current function calls __builtin_eh_return, then we need
4283 to restore the saved EH data registers. */
4284 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4286 unsigned int i, regno;
4288 for (i = 0; ; ++i)
4290 regno = EH_RETURN_DATA_REGNO (i);
4291 if (regno == INVALID_REGNUM)
4292 break;
4294 /* Only for the first load.
4295 merge_sp_adjust_with_load holds the register load
4296 with which we will merge the sp adjustment. */
4297 if (merge_sp_adjust_with_load == 0
4298 && local_fsize == 0
4299 && VAL_14_BITS_P (-actual_fsize))
4300 merge_sp_adjust_with_load = regno;
4301 else
4302 load_reg (regno, offset, STACK_POINTER_REGNUM);
4303 offset += UNITS_PER_WORD;
4307 for (i = 18; i >= 3; i--)
4309 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4311 /* Only for the first load.
4312 merge_sp_adjust_with_load holds the register load
4313 with which we will merge the sp adjustment. */
4314 if (merge_sp_adjust_with_load == 0
4315 && local_fsize == 0
4316 && VAL_14_BITS_P (-actual_fsize))
4317 merge_sp_adjust_with_load = i;
4318 else
4319 load_reg (i, offset, STACK_POINTER_REGNUM);
4320 offset += UNITS_PER_WORD;
4325 /* Align pointer properly (doubleword boundary). */
4326 offset = (offset + 7) & ~7;
4328 /* FP register restores. */
4329 if (save_fregs)
4331 /* Adjust the register to index off of. */
4332 if (frame_pointer_needed)
4333 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4334 else
4335 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4337 /* Actually do the restores now. */
4338 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4339 if (df_regs_ever_live_p (i)
4340 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4342 rtx src = gen_rtx_MEM (DFmode,
4343 gen_rtx_POST_INC (word_mode, tmpreg));
4344 rtx dest = gen_rtx_REG (DFmode, i);
4345 emit_move_insn (dest, src);
4349 /* Emit a blockage insn here to keep these insns from being moved to
4350 an earlier spot in the epilogue, or into the main instruction stream.
4352 This is necessary as we must not cut the stack back before all the
4353 restores are finished. */
4354 emit_insn (gen_blockage ());
4356 /* Reset stack pointer (and possibly frame pointer). The stack
4357 pointer is initially set to fp + 64 to avoid a race condition. */
4358 if (frame_pointer_needed)
4360 rtx delta = GEN_INT (-64);
4362 set_reg_plus_d (STACK_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM, 64, 0);
4363 emit_insn (gen_pre_load (hard_frame_pointer_rtx,
4364 stack_pointer_rtx, delta));
4366 /* If we were deferring a callee register restore, do it now. */
4367 else if (merge_sp_adjust_with_load)
4369 rtx delta = GEN_INT (-actual_fsize);
4370 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4372 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4374 else if (actual_fsize != 0)
4375 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4376 - actual_fsize, 0);
4378 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4379 frame greater than 8k), do so now. */
4380 if (ret_off != 0)
4381 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4383 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4385 rtx sa = EH_RETURN_STACKADJ_RTX;
4387 emit_insn (gen_blockage ());
4388 emit_insn (TARGET_64BIT
4389 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4390 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4394 bool
4395 pa_can_use_return_insn (void)
4397 if (!reload_completed)
4398 return false;
4400 if (frame_pointer_needed)
4401 return false;
4403 if (df_regs_ever_live_p (2))
4404 return false;
4406 if (crtl->profile)
4407 return false;
4409 return pa_compute_frame_size (get_frame_size (), 0) == 0;
4413 hppa_pic_save_rtx (void)
4415 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4418 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4419 #define NO_DEFERRED_PROFILE_COUNTERS 0
4420 #endif
4423 /* Vector of funcdef numbers. */
4424 static vec<int> funcdef_nos;
4426 /* Output deferred profile counters. */
4427 static void
4428 output_deferred_profile_counters (void)
4430 unsigned int i;
4431 int align, n;
4433 if (funcdef_nos.is_empty ())
4434 return;
4436 switch_to_section (data_section);
4437 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4438 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4440 for (i = 0; funcdef_nos.iterate (i, &n); i++)
4442 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4443 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4446 funcdef_nos.release ();
4449 void
4450 hppa_profile_hook (int label_no)
4452 /* We use SImode for the address of the function in both 32 and
4453 64-bit code to avoid having to provide DImode versions of the
4454 lcla2 and load_offset_label_address insn patterns. */
4455 rtx reg = gen_reg_rtx (SImode);
4456 rtx_code_label *label_rtx = gen_label_rtx ();
4457 rtx begin_label_rtx, call_insn;
4458 char begin_label_name[16];
4460 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4461 label_no);
4462 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4464 if (TARGET_64BIT)
4465 emit_move_insn (arg_pointer_rtx,
4466 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4467 GEN_INT (64)));
4469 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4471 /* The address of the function is loaded into %r25 with an instruction-
4472 relative sequence that avoids the use of relocations. The sequence
4473 is split so that the load_offset_label_address instruction can
4474 occupy the delay slot of the call to _mcount. */
4475 if (TARGET_PA_20)
4476 emit_insn (gen_lcla2 (reg, label_rtx));
4477 else
4478 emit_insn (gen_lcla1 (reg, label_rtx));
4480 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4481 reg, begin_label_rtx, label_rtx));
4483 #if !NO_DEFERRED_PROFILE_COUNTERS
4485 rtx count_label_rtx, addr, r24;
4486 char count_label_name[16];
4488 funcdef_nos.safe_push (label_no);
4489 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4490 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4492 addr = force_reg (Pmode, count_label_rtx);
4493 r24 = gen_rtx_REG (Pmode, 24);
4494 emit_move_insn (r24, addr);
4496 call_insn =
4497 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4498 gen_rtx_SYMBOL_REF (Pmode,
4499 "_mcount")),
4500 GEN_INT (TARGET_64BIT ? 24 : 12)));
4502 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4504 #else
4506 call_insn =
4507 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4508 gen_rtx_SYMBOL_REF (Pmode,
4509 "_mcount")),
4510 GEN_INT (TARGET_64BIT ? 16 : 8)));
4512 #endif
4514 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4515 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4517 /* Indicate the _mcount call cannot throw, nor will it execute a
4518 non-local goto. */
4519 make_reg_eh_region_note_nothrow_nononlocal (call_insn);
4522 /* Fetch the return address for the frame COUNT steps up from
4523 the current frame, after the prologue. FRAMEADDR is the
4524 frame pointer of the COUNT frame.
4526 We want to ignore any export stub remnants here. To handle this,
4527 we examine the code at the return address, and if it is an export
4528 stub, we return a memory rtx for the stub return address stored
4529 at frame-24.
4531 The value returned is used in two different ways:
4533 1. To find a function's caller.
4535 2. To change the return address for a function.
4537 This function handles most instances of case 1; however, it will
4538 fail if there are two levels of stubs to execute on the return
4539 path. The only way I believe that can happen is if the return value
4540 needs a parameter relocation, which never happens for C code.
4542 This function handles most instances of case 2; however, it will
4543 fail if we did not originally have stub code on the return path
4544 but will need stub code on the new return path. This can happen if
4545 the caller & callee are both in the main program, but the new
4546 return location is in a shared library. */
4549 pa_return_addr_rtx (int count, rtx frameaddr)
4551 rtx label;
4552 rtx rp;
4553 rtx saved_rp;
4554 rtx ins;
4556 /* The instruction stream at the return address of a PA1.X export stub is:
4558 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4559 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4560 0x00011820 | stub+16: mtsp r1,sr0
4561 0xe0400002 | stub+20: be,n 0(sr0,rp)
4563 0xe0400002 must be specified as -532676606 so that it won't be
4564 rejected as an invalid immediate operand on 64-bit hosts.
4566 The instruction stream at the return address of a PA2.0 export stub is:
4568 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4569 0xe840d002 | stub+12: bve,n (rp)
4572 HOST_WIDE_INT insns[4];
4573 int i, len;
4575 if (count != 0)
4576 return NULL_RTX;
4578 rp = get_hard_reg_initial_val (Pmode, 2);
4580 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4581 return rp;
4583 /* If there is no export stub then just use the value saved from
4584 the return pointer register. */
4586 saved_rp = gen_reg_rtx (Pmode);
4587 emit_move_insn (saved_rp, rp);
4589 /* Get pointer to the instruction stream. We have to mask out the
4590 privilege level from the two low order bits of the return address
4591 pointer here so that ins will point to the start of the first
4592 instruction that would have been executed if we returned. */
4593 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4594 label = gen_label_rtx ();
4596 if (TARGET_PA_20)
4598 insns[0] = 0x4bc23fd1;
4599 insns[1] = -398405630;
4600 len = 2;
4602 else
4604 insns[0] = 0x4bc23fd1;
4605 insns[1] = 0x004010a1;
4606 insns[2] = 0x00011820;
4607 insns[3] = -532676606;
4608 len = 4;
4611 /* Check the instruction stream at the normal return address for the
4612 export stub. If it is an export stub, than our return address is
4613 really in -24[frameaddr]. */
4615 for (i = 0; i < len; i++)
4617 rtx op0 = gen_rtx_MEM (SImode, plus_constant (Pmode, ins, i * 4));
4618 rtx op1 = GEN_INT (insns[i]);
4619 emit_cmp_and_jump_insns (op0, op1, NE, NULL, SImode, 0, label);
4622 /* Here we know that our return address points to an export
4623 stub. We don't want to return the address of the export stub,
4624 but rather the return address of the export stub. That return
4625 address is stored at -24[frameaddr]. */
4627 emit_move_insn (saved_rp,
4628 gen_rtx_MEM (Pmode,
4629 memory_address (Pmode,
4630 plus_constant (Pmode, frameaddr,
4631 -24))));
4633 emit_label (label);
4635 return saved_rp;
4638 void
4639 pa_emit_bcond_fp (rtx operands[])
4641 enum rtx_code code = GET_CODE (operands[0]);
4642 rtx operand0 = operands[1];
4643 rtx operand1 = operands[2];
4644 rtx label = operands[3];
4646 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0),
4647 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1)));
4649 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
4650 gen_rtx_IF_THEN_ELSE (VOIDmode,
4651 gen_rtx_fmt_ee (NE,
4652 VOIDmode,
4653 gen_rtx_REG (CCFPmode, 0),
4654 const0_rtx),
4655 gen_rtx_LABEL_REF (VOIDmode, label),
4656 pc_rtx)));
4660 /* Adjust the cost of a scheduling dependency. Return the new cost of
4661 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4663 static int
4664 pa_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn, int cost)
4666 enum attr_type attr_type;
4668 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4669 true dependencies as they are described with bypasses now. */
4670 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4671 return cost;
4673 if (! recog_memoized (insn))
4674 return 0;
4676 attr_type = get_attr_type (insn);
4678 switch (REG_NOTE_KIND (link))
4680 case REG_DEP_ANTI:
4681 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4682 cycles later. */
4684 if (attr_type == TYPE_FPLOAD)
4686 rtx pat = PATTERN (insn);
4687 rtx dep_pat = PATTERN (dep_insn);
4688 if (GET_CODE (pat) == PARALLEL)
4690 /* This happens for the fldXs,mb patterns. */
4691 pat = XVECEXP (pat, 0, 0);
4693 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4694 /* If this happens, we have to extend this to schedule
4695 optimally. Return 0 for now. */
4696 return 0;
4698 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4700 if (! recog_memoized (dep_insn))
4701 return 0;
4702 switch (get_attr_type (dep_insn))
4704 case TYPE_FPALU:
4705 case TYPE_FPMULSGL:
4706 case TYPE_FPMULDBL:
4707 case TYPE_FPDIVSGL:
4708 case TYPE_FPDIVDBL:
4709 case TYPE_FPSQRTSGL:
4710 case TYPE_FPSQRTDBL:
4711 /* A fpload can't be issued until one cycle before a
4712 preceding arithmetic operation has finished if
4713 the target of the fpload is any of the sources
4714 (or destination) of the arithmetic operation. */
4715 return insn_default_latency (dep_insn) - 1;
4717 default:
4718 return 0;
4722 else if (attr_type == TYPE_FPALU)
4724 rtx pat = PATTERN (insn);
4725 rtx dep_pat = PATTERN (dep_insn);
4726 if (GET_CODE (pat) == PARALLEL)
4728 /* This happens for the fldXs,mb patterns. */
4729 pat = XVECEXP (pat, 0, 0);
4731 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4732 /* If this happens, we have to extend this to schedule
4733 optimally. Return 0 for now. */
4734 return 0;
4736 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4738 if (! recog_memoized (dep_insn))
4739 return 0;
4740 switch (get_attr_type (dep_insn))
4742 case TYPE_FPDIVSGL:
4743 case TYPE_FPDIVDBL:
4744 case TYPE_FPSQRTSGL:
4745 case TYPE_FPSQRTDBL:
4746 /* An ALU flop can't be issued until two cycles before a
4747 preceding divide or sqrt operation has finished if
4748 the target of the ALU flop is any of the sources
4749 (or destination) of the divide or sqrt operation. */
4750 return insn_default_latency (dep_insn) - 2;
4752 default:
4753 return 0;
4758 /* For other anti dependencies, the cost is 0. */
4759 return 0;
4761 case REG_DEP_OUTPUT:
4762 /* Output dependency; DEP_INSN writes a register that INSN writes some
4763 cycles later. */
4764 if (attr_type == TYPE_FPLOAD)
4766 rtx pat = PATTERN (insn);
4767 rtx dep_pat = PATTERN (dep_insn);
4768 if (GET_CODE (pat) == PARALLEL)
4770 /* This happens for the fldXs,mb patterns. */
4771 pat = XVECEXP (pat, 0, 0);
4773 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4774 /* If this happens, we have to extend this to schedule
4775 optimally. Return 0 for now. */
4776 return 0;
4778 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4780 if (! recog_memoized (dep_insn))
4781 return 0;
4782 switch (get_attr_type (dep_insn))
4784 case TYPE_FPALU:
4785 case TYPE_FPMULSGL:
4786 case TYPE_FPMULDBL:
4787 case TYPE_FPDIVSGL:
4788 case TYPE_FPDIVDBL:
4789 case TYPE_FPSQRTSGL:
4790 case TYPE_FPSQRTDBL:
4791 /* A fpload can't be issued until one cycle before a
4792 preceding arithmetic operation has finished if
4793 the target of the fpload is the destination of the
4794 arithmetic operation.
4796 Exception: For PA7100LC, PA7200 and PA7300, the cost
4797 is 3 cycles, unless they bundle together. We also
4798 pay the penalty if the second insn is a fpload. */
4799 return insn_default_latency (dep_insn) - 1;
4801 default:
4802 return 0;
4806 else if (attr_type == TYPE_FPALU)
4808 rtx pat = PATTERN (insn);
4809 rtx dep_pat = PATTERN (dep_insn);
4810 if (GET_CODE (pat) == PARALLEL)
4812 /* This happens for the fldXs,mb patterns. */
4813 pat = XVECEXP (pat, 0, 0);
4815 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4816 /* If this happens, we have to extend this to schedule
4817 optimally. Return 0 for now. */
4818 return 0;
4820 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4822 if (! recog_memoized (dep_insn))
4823 return 0;
4824 switch (get_attr_type (dep_insn))
4826 case TYPE_FPDIVSGL:
4827 case TYPE_FPDIVDBL:
4828 case TYPE_FPSQRTSGL:
4829 case TYPE_FPSQRTDBL:
4830 /* An ALU flop can't be issued until two cycles before a
4831 preceding divide or sqrt operation has finished if
4832 the target of the ALU flop is also the target of
4833 the divide or sqrt operation. */
4834 return insn_default_latency (dep_insn) - 2;
4836 default:
4837 return 0;
4842 /* For other output dependencies, the cost is 0. */
4843 return 0;
4845 default:
4846 gcc_unreachable ();
4850 /* Adjust scheduling priorities. We use this to try and keep addil
4851 and the next use of %r1 close together. */
4852 static int
4853 pa_adjust_priority (rtx_insn *insn, int priority)
4855 rtx set = single_set (insn);
4856 rtx src, dest;
4857 if (set)
4859 src = SET_SRC (set);
4860 dest = SET_DEST (set);
4861 if (GET_CODE (src) == LO_SUM
4862 && symbolic_operand (XEXP (src, 1), VOIDmode)
4863 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4864 priority >>= 3;
4866 else if (GET_CODE (src) == MEM
4867 && GET_CODE (XEXP (src, 0)) == LO_SUM
4868 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4869 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4870 priority >>= 1;
4872 else if (GET_CODE (dest) == MEM
4873 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4874 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4875 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4876 priority >>= 3;
4878 return priority;
4881 /* The 700 can only issue a single insn at a time.
4882 The 7XXX processors can issue two insns at a time.
4883 The 8000 can issue 4 insns at a time. */
4884 static int
4885 pa_issue_rate (void)
4887 switch (pa_cpu)
4889 case PROCESSOR_700: return 1;
4890 case PROCESSOR_7100: return 2;
4891 case PROCESSOR_7100LC: return 2;
4892 case PROCESSOR_7200: return 2;
4893 case PROCESSOR_7300: return 2;
4894 case PROCESSOR_8000: return 4;
4896 default:
4897 gcc_unreachable ();
4903 /* Return any length plus adjustment needed by INSN which already has
4904 its length computed as LENGTH. Return LENGTH if no adjustment is
4905 necessary.
4907 Also compute the length of an inline block move here as it is too
4908 complicated to express as a length attribute in pa.md. */
4910 pa_adjust_insn_length (rtx_insn *insn, int length)
4912 rtx pat = PATTERN (insn);
4914 /* If length is negative or undefined, provide initial length. */
4915 if ((unsigned int) length >= INT_MAX)
4917 if (GET_CODE (pat) == SEQUENCE)
4918 insn = as_a <rtx_insn *> (XVECEXP (pat, 0, 0));
4920 switch (get_attr_type (insn))
4922 case TYPE_MILLI:
4923 length = pa_attr_length_millicode_call (insn);
4924 break;
4925 case TYPE_CALL:
4926 length = pa_attr_length_call (insn, 0);
4927 break;
4928 case TYPE_SIBCALL:
4929 length = pa_attr_length_call (insn, 1);
4930 break;
4931 case TYPE_DYNCALL:
4932 length = pa_attr_length_indirect_call (insn);
4933 break;
4934 case TYPE_SH_FUNC_ADRS:
4935 length = pa_attr_length_millicode_call (insn) + 20;
4936 break;
4937 default:
4938 gcc_unreachable ();
4942 /* Block move pattern. */
4943 if (NONJUMP_INSN_P (insn)
4944 && GET_CODE (pat) == PARALLEL
4945 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4946 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4947 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4948 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4949 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4950 length += compute_movmem_length (insn) - 4;
4951 /* Block clear pattern. */
4952 else if (NONJUMP_INSN_P (insn)
4953 && GET_CODE (pat) == PARALLEL
4954 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4955 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4956 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4957 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4958 length += compute_clrmem_length (insn) - 4;
4959 /* Conditional branch with an unfilled delay slot. */
4960 else if (JUMP_P (insn) && ! simplejump_p (insn))
4962 /* Adjust a short backwards conditional with an unfilled delay slot. */
4963 if (GET_CODE (pat) == SET
4964 && length == 4
4965 && JUMP_LABEL (insn) != NULL_RTX
4966 && ! forward_branch_p (insn))
4967 length += 4;
4968 else if (GET_CODE (pat) == PARALLEL
4969 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4970 && length == 4)
4971 length += 4;
4972 /* Adjust dbra insn with short backwards conditional branch with
4973 unfilled delay slot -- only for case where counter is in a
4974 general register register. */
4975 else if (GET_CODE (pat) == PARALLEL
4976 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
4977 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
4978 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
4979 && length == 4
4980 && ! forward_branch_p (insn))
4981 length += 4;
4983 return length;
4986 /* Implement the TARGET_PRINT_OPERAND_PUNCT_VALID_P hook. */
4988 static bool
4989 pa_print_operand_punct_valid_p (unsigned char code)
4991 if (code == '@'
4992 || code == '#'
4993 || code == '*'
4994 || code == '^')
4995 return true;
4997 return false;
5000 /* Print operand X (an rtx) in assembler syntax to file FILE.
5001 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
5002 For `%' followed by punctuation, CODE is the punctuation and X is null. */
5004 void
5005 pa_print_operand (FILE *file, rtx x, int code)
5007 switch (code)
5009 case '#':
5010 /* Output a 'nop' if there's nothing for the delay slot. */
5011 if (dbr_sequence_length () == 0)
5012 fputs ("\n\tnop", file);
5013 return;
5014 case '*':
5015 /* Output a nullification completer if there's nothing for the */
5016 /* delay slot or nullification is requested. */
5017 if (dbr_sequence_length () == 0 ||
5018 (final_sequence &&
5019 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
5020 fputs (",n", file);
5021 return;
5022 case 'R':
5023 /* Print out the second register name of a register pair.
5024 I.e., R (6) => 7. */
5025 fputs (reg_names[REGNO (x) + 1], file);
5026 return;
5027 case 'r':
5028 /* A register or zero. */
5029 if (x == const0_rtx
5030 || (x == CONST0_RTX (DFmode))
5031 || (x == CONST0_RTX (SFmode)))
5033 fputs ("%r0", file);
5034 return;
5036 else
5037 break;
5038 case 'f':
5039 /* A register or zero (floating point). */
5040 if (x == const0_rtx
5041 || (x == CONST0_RTX (DFmode))
5042 || (x == CONST0_RTX (SFmode)))
5044 fputs ("%fr0", file);
5045 return;
5047 else
5048 break;
5049 case 'A':
5051 rtx xoperands[2];
5053 xoperands[0] = XEXP (XEXP (x, 0), 0);
5054 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
5055 pa_output_global_address (file, xoperands[1], 0);
5056 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
5057 return;
5060 case 'C': /* Plain (C)ondition */
5061 case 'X':
5062 switch (GET_CODE (x))
5064 case EQ:
5065 fputs ("=", file); break;
5066 case NE:
5067 fputs ("<>", file); break;
5068 case GT:
5069 fputs (">", file); break;
5070 case GE:
5071 fputs (">=", file); break;
5072 case GEU:
5073 fputs (">>=", file); break;
5074 case GTU:
5075 fputs (">>", file); break;
5076 case LT:
5077 fputs ("<", file); break;
5078 case LE:
5079 fputs ("<=", file); break;
5080 case LEU:
5081 fputs ("<<=", file); break;
5082 case LTU:
5083 fputs ("<<", file); break;
5084 default:
5085 gcc_unreachable ();
5087 return;
5088 case 'N': /* Condition, (N)egated */
5089 switch (GET_CODE (x))
5091 case EQ:
5092 fputs ("<>", file); break;
5093 case NE:
5094 fputs ("=", file); break;
5095 case GT:
5096 fputs ("<=", file); break;
5097 case GE:
5098 fputs ("<", file); break;
5099 case GEU:
5100 fputs ("<<", file); break;
5101 case GTU:
5102 fputs ("<<=", file); break;
5103 case LT:
5104 fputs (">=", file); break;
5105 case LE:
5106 fputs (">", file); break;
5107 case LEU:
5108 fputs (">>", file); break;
5109 case LTU:
5110 fputs (">>=", file); break;
5111 default:
5112 gcc_unreachable ();
5114 return;
5115 /* For floating point comparisons. Note that the output
5116 predicates are the complement of the desired mode. The
5117 conditions for GT, GE, LT, LE and LTGT cause an invalid
5118 operation exception if the result is unordered and this
5119 exception is enabled in the floating-point status register. */
5120 case 'Y':
5121 switch (GET_CODE (x))
5123 case EQ:
5124 fputs ("!=", file); break;
5125 case NE:
5126 fputs ("=", file); break;
5127 case GT:
5128 fputs ("!>", file); break;
5129 case GE:
5130 fputs ("!>=", file); break;
5131 case LT:
5132 fputs ("!<", file); break;
5133 case LE:
5134 fputs ("!<=", file); break;
5135 case LTGT:
5136 fputs ("!<>", file); break;
5137 case UNLE:
5138 fputs ("!?<=", file); break;
5139 case UNLT:
5140 fputs ("!?<", file); break;
5141 case UNGE:
5142 fputs ("!?>=", file); break;
5143 case UNGT:
5144 fputs ("!?>", file); break;
5145 case UNEQ:
5146 fputs ("!?=", file); break;
5147 case UNORDERED:
5148 fputs ("!?", file); break;
5149 case ORDERED:
5150 fputs ("?", file); break;
5151 default:
5152 gcc_unreachable ();
5154 return;
5155 case 'S': /* Condition, operands are (S)wapped. */
5156 switch (GET_CODE (x))
5158 case EQ:
5159 fputs ("=", file); break;
5160 case NE:
5161 fputs ("<>", file); break;
5162 case GT:
5163 fputs ("<", file); break;
5164 case GE:
5165 fputs ("<=", file); break;
5166 case GEU:
5167 fputs ("<<=", file); break;
5168 case GTU:
5169 fputs ("<<", file); break;
5170 case LT:
5171 fputs (">", file); break;
5172 case LE:
5173 fputs (">=", file); break;
5174 case LEU:
5175 fputs (">>=", file); break;
5176 case LTU:
5177 fputs (">>", file); break;
5178 default:
5179 gcc_unreachable ();
5181 return;
5182 case 'B': /* Condition, (B)oth swapped and negate. */
5183 switch (GET_CODE (x))
5185 case EQ:
5186 fputs ("<>", file); break;
5187 case NE:
5188 fputs ("=", file); break;
5189 case GT:
5190 fputs (">=", file); break;
5191 case GE:
5192 fputs (">", file); break;
5193 case GEU:
5194 fputs (">>", file); break;
5195 case GTU:
5196 fputs (">>=", file); break;
5197 case LT:
5198 fputs ("<=", file); break;
5199 case LE:
5200 fputs ("<", file); break;
5201 case LEU:
5202 fputs ("<<", file); break;
5203 case LTU:
5204 fputs ("<<=", file); break;
5205 default:
5206 gcc_unreachable ();
5208 return;
5209 case 'k':
5210 gcc_assert (GET_CODE (x) == CONST_INT);
5211 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
5212 return;
5213 case 'Q':
5214 gcc_assert (GET_CODE (x) == CONST_INT);
5215 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5216 return;
5217 case 'L':
5218 gcc_assert (GET_CODE (x) == CONST_INT);
5219 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5220 return;
5221 case 'O':
5222 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
5223 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5224 return;
5225 case 'p':
5226 gcc_assert (GET_CODE (x) == CONST_INT);
5227 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5228 return;
5229 case 'P':
5230 gcc_assert (GET_CODE (x) == CONST_INT);
5231 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5232 return;
5233 case 'I':
5234 if (GET_CODE (x) == CONST_INT)
5235 fputs ("i", file);
5236 return;
5237 case 'M':
5238 case 'F':
5239 switch (GET_CODE (XEXP (x, 0)))
5241 case PRE_DEC:
5242 case PRE_INC:
5243 if (ASSEMBLER_DIALECT == 0)
5244 fputs ("s,mb", file);
5245 else
5246 fputs (",mb", file);
5247 break;
5248 case POST_DEC:
5249 case POST_INC:
5250 if (ASSEMBLER_DIALECT == 0)
5251 fputs ("s,ma", file);
5252 else
5253 fputs (",ma", file);
5254 break;
5255 case PLUS:
5256 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5257 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5259 if (ASSEMBLER_DIALECT == 0)
5260 fputs ("x", file);
5262 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5263 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5265 if (ASSEMBLER_DIALECT == 0)
5266 fputs ("x,s", file);
5267 else
5268 fputs (",s", file);
5270 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5271 fputs ("s", file);
5272 break;
5273 default:
5274 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5275 fputs ("s", file);
5276 break;
5278 return;
5279 case 'G':
5280 pa_output_global_address (file, x, 0);
5281 return;
5282 case 'H':
5283 pa_output_global_address (file, x, 1);
5284 return;
5285 case 0: /* Don't do anything special */
5286 break;
5287 case 'Z':
5289 unsigned op[3];
5290 compute_zdepwi_operands (INTVAL (x), op);
5291 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5292 return;
5294 case 'z':
5296 unsigned op[3];
5297 compute_zdepdi_operands (INTVAL (x), op);
5298 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5299 return;
5301 case 'c':
5302 /* We can get here from a .vtable_inherit due to our
5303 CONSTANT_ADDRESS_P rejecting perfectly good constant
5304 addresses. */
5305 break;
5306 default:
5307 gcc_unreachable ();
5309 if (GET_CODE (x) == REG)
5311 fputs (reg_names [REGNO (x)], file);
5312 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5314 fputs ("R", file);
5315 return;
5317 if (FP_REG_P (x)
5318 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5319 && (REGNO (x) & 1) == 0)
5320 fputs ("L", file);
5322 else if (GET_CODE (x) == MEM)
5324 int size = GET_MODE_SIZE (GET_MODE (x));
5325 rtx base = NULL_RTX;
5326 switch (GET_CODE (XEXP (x, 0)))
5328 case PRE_DEC:
5329 case POST_DEC:
5330 base = XEXP (XEXP (x, 0), 0);
5331 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5332 break;
5333 case PRE_INC:
5334 case POST_INC:
5335 base = XEXP (XEXP (x, 0), 0);
5336 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5337 break;
5338 case PLUS:
5339 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5340 fprintf (file, "%s(%s)",
5341 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5342 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5343 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5344 fprintf (file, "%s(%s)",
5345 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5346 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5347 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5348 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5350 /* Because the REG_POINTER flag can get lost during reload,
5351 pa_legitimate_address_p canonicalizes the order of the
5352 index and base registers in the combined move patterns. */
5353 rtx base = XEXP (XEXP (x, 0), 1);
5354 rtx index = XEXP (XEXP (x, 0), 0);
5356 fprintf (file, "%s(%s)",
5357 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5359 else
5360 output_address (XEXP (x, 0));
5361 break;
5362 default:
5363 output_address (XEXP (x, 0));
5364 break;
5367 else
5368 output_addr_const (file, x);
5371 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5373 void
5374 pa_output_global_address (FILE *file, rtx x, int round_constant)
5377 /* Imagine (high (const (plus ...))). */
5378 if (GET_CODE (x) == HIGH)
5379 x = XEXP (x, 0);
5381 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5382 output_addr_const (file, x);
5383 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5385 output_addr_const (file, x);
5386 fputs ("-$global$", file);
5388 else if (GET_CODE (x) == CONST)
5390 const char *sep = "";
5391 int offset = 0; /* assembler wants -$global$ at end */
5392 rtx base = NULL_RTX;
5394 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5396 case SYMBOL_REF:
5397 base = XEXP (XEXP (x, 0), 0);
5398 output_addr_const (file, base);
5399 break;
5400 case CONST_INT:
5401 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5402 break;
5403 default:
5404 gcc_unreachable ();
5407 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5409 case SYMBOL_REF:
5410 base = XEXP (XEXP (x, 0), 1);
5411 output_addr_const (file, base);
5412 break;
5413 case CONST_INT:
5414 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5415 break;
5416 default:
5417 gcc_unreachable ();
5420 /* How bogus. The compiler is apparently responsible for
5421 rounding the constant if it uses an LR field selector.
5423 The linker and/or assembler seem a better place since
5424 they have to do this kind of thing already.
5426 If we fail to do this, HP's optimizing linker may eliminate
5427 an addil, but not update the ldw/stw/ldo instruction that
5428 uses the result of the addil. */
5429 if (round_constant)
5430 offset = ((offset + 0x1000) & ~0x1fff);
5432 switch (GET_CODE (XEXP (x, 0)))
5434 case PLUS:
5435 if (offset < 0)
5437 offset = -offset;
5438 sep = "-";
5440 else
5441 sep = "+";
5442 break;
5444 case MINUS:
5445 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5446 sep = "-";
5447 break;
5449 default:
5450 gcc_unreachable ();
5453 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5454 fputs ("-$global$", file);
5455 if (offset)
5456 fprintf (file, "%s%d", sep, offset);
5458 else
5459 output_addr_const (file, x);
5462 /* Output boilerplate text to appear at the beginning of the file.
5463 There are several possible versions. */
5464 #define aputs(x) fputs(x, asm_out_file)
5465 static inline void
5466 pa_file_start_level (void)
5468 if (TARGET_64BIT)
5469 aputs ("\t.LEVEL 2.0w\n");
5470 else if (TARGET_PA_20)
5471 aputs ("\t.LEVEL 2.0\n");
5472 else if (TARGET_PA_11)
5473 aputs ("\t.LEVEL 1.1\n");
5474 else
5475 aputs ("\t.LEVEL 1.0\n");
5478 static inline void
5479 pa_file_start_space (int sortspace)
5481 aputs ("\t.SPACE $PRIVATE$");
5482 if (sortspace)
5483 aputs (",SORT=16");
5484 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31");
5485 if (flag_tm)
5486 aputs ("\n\t.SUBSPA $TM_CLONE_TABLE$,QUAD=1,ALIGN=8,ACCESS=31");
5487 aputs ("\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5488 "\n\t.SPACE $TEXT$");
5489 if (sortspace)
5490 aputs (",SORT=8");
5491 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5492 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5495 static inline void
5496 pa_file_start_file (int want_version)
5498 if (write_symbols != NO_DEBUG)
5500 output_file_directive (asm_out_file, main_input_filename);
5501 if (want_version)
5502 aputs ("\t.version\t\"01.01\"\n");
5506 static inline void
5507 pa_file_start_mcount (const char *aswhat)
5509 if (profile_flag)
5510 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5513 static void
5514 pa_elf_file_start (void)
5516 pa_file_start_level ();
5517 pa_file_start_mcount ("ENTRY");
5518 pa_file_start_file (0);
5521 static void
5522 pa_som_file_start (void)
5524 pa_file_start_level ();
5525 pa_file_start_space (0);
5526 aputs ("\t.IMPORT $global$,DATA\n"
5527 "\t.IMPORT $$dyncall,MILLICODE\n");
5528 pa_file_start_mcount ("CODE");
5529 pa_file_start_file (0);
5532 static void
5533 pa_linux_file_start (void)
5535 pa_file_start_file (1);
5536 pa_file_start_level ();
5537 pa_file_start_mcount ("CODE");
5540 static void
5541 pa_hpux64_gas_file_start (void)
5543 pa_file_start_level ();
5544 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5545 if (profile_flag)
5546 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5547 #endif
5548 pa_file_start_file (1);
5551 static void
5552 pa_hpux64_hpas_file_start (void)
5554 pa_file_start_level ();
5555 pa_file_start_space (1);
5556 pa_file_start_mcount ("CODE");
5557 pa_file_start_file (0);
5559 #undef aputs
5561 /* Search the deferred plabel list for SYMBOL and return its internal
5562 label. If an entry for SYMBOL is not found, a new entry is created. */
5565 pa_get_deferred_plabel (rtx symbol)
5567 const char *fname = XSTR (symbol, 0);
5568 size_t i;
5570 /* See if we have already put this function on the list of deferred
5571 plabels. This list is generally small, so a liner search is not
5572 too ugly. If it proves too slow replace it with something faster. */
5573 for (i = 0; i < n_deferred_plabels; i++)
5574 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5575 break;
5577 /* If the deferred plabel list is empty, or this entry was not found
5578 on the list, create a new entry on the list. */
5579 if (deferred_plabels == NULL || i == n_deferred_plabels)
5581 tree id;
5583 if (deferred_plabels == 0)
5584 deferred_plabels = ggc_alloc<deferred_plabel> ();
5585 else
5586 deferred_plabels = GGC_RESIZEVEC (struct deferred_plabel,
5587 deferred_plabels,
5588 n_deferred_plabels + 1);
5590 i = n_deferred_plabels++;
5591 deferred_plabels[i].internal_label = gen_label_rtx ();
5592 deferred_plabels[i].symbol = symbol;
5594 /* Gross. We have just implicitly taken the address of this
5595 function. Mark it in the same manner as assemble_name. */
5596 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5597 if (id)
5598 mark_referenced (id);
5601 return deferred_plabels[i].internal_label;
5604 static void
5605 output_deferred_plabels (void)
5607 size_t i;
5609 /* If we have some deferred plabels, then we need to switch into the
5610 data or readonly data section, and align it to a 4 byte boundary
5611 before outputting the deferred plabels. */
5612 if (n_deferred_plabels)
5614 switch_to_section (flag_pic ? data_section : readonly_data_section);
5615 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5618 /* Now output the deferred plabels. */
5619 for (i = 0; i < n_deferred_plabels; i++)
5621 targetm.asm_out.internal_label (asm_out_file, "L",
5622 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5623 assemble_integer (deferred_plabels[i].symbol,
5624 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5628 /* Initialize optabs to point to emulation routines. */
5630 static void
5631 pa_init_libfuncs (void)
5633 if (HPUX_LONG_DOUBLE_LIBRARY)
5635 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5636 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5637 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5638 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5639 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5640 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5641 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5642 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5643 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5645 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5646 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5647 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5648 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5649 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5650 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5651 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5653 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5654 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5655 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5656 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5658 set_conv_libfunc (sfix_optab, SImode, TFmode,
5659 TARGET_64BIT ? "__U_Qfcnvfxt_quad_to_sgl"
5660 : "_U_Qfcnvfxt_quad_to_sgl");
5661 set_conv_libfunc (sfix_optab, DImode, TFmode,
5662 "_U_Qfcnvfxt_quad_to_dbl");
5663 set_conv_libfunc (ufix_optab, SImode, TFmode,
5664 "_U_Qfcnvfxt_quad_to_usgl");
5665 set_conv_libfunc (ufix_optab, DImode, TFmode,
5666 "_U_Qfcnvfxt_quad_to_udbl");
5668 set_conv_libfunc (sfloat_optab, TFmode, SImode,
5669 "_U_Qfcnvxf_sgl_to_quad");
5670 set_conv_libfunc (sfloat_optab, TFmode, DImode,
5671 "_U_Qfcnvxf_dbl_to_quad");
5672 set_conv_libfunc (ufloat_optab, TFmode, SImode,
5673 "_U_Qfcnvxf_usgl_to_quad");
5674 set_conv_libfunc (ufloat_optab, TFmode, DImode,
5675 "_U_Qfcnvxf_udbl_to_quad");
5678 if (TARGET_SYNC_LIBCALL)
5679 init_sync_libfuncs (UNITS_PER_WORD);
5682 /* HP's millicode routines mean something special to the assembler.
5683 Keep track of which ones we have used. */
5685 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5686 static void import_milli (enum millicodes);
5687 static char imported[(int) end1000];
5688 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5689 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5690 #define MILLI_START 10
5692 static void
5693 import_milli (enum millicodes code)
5695 char str[sizeof (import_string)];
5697 if (!imported[(int) code])
5699 imported[(int) code] = 1;
5700 strcpy (str, import_string);
5701 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5702 output_asm_insn (str, 0);
5706 /* The register constraints have put the operands and return value in
5707 the proper registers. */
5709 const char *
5710 pa_output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx_insn *insn)
5712 import_milli (mulI);
5713 return pa_output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5716 /* Emit the rtl for doing a division by a constant. */
5718 /* Do magic division millicodes exist for this value? */
5719 const int pa_magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5721 /* We'll use an array to keep track of the magic millicodes and
5722 whether or not we've used them already. [n][0] is signed, [n][1] is
5723 unsigned. */
5725 static int div_milli[16][2];
5728 pa_emit_hpdiv_const (rtx *operands, int unsignedp)
5730 if (GET_CODE (operands[2]) == CONST_INT
5731 && INTVAL (operands[2]) > 0
5732 && INTVAL (operands[2]) < 16
5733 && pa_magic_milli[INTVAL (operands[2])])
5735 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5737 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5738 emit
5739 (gen_rtx_PARALLEL
5740 (VOIDmode,
5741 gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29),
5742 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5743 SImode,
5744 gen_rtx_REG (SImode, 26),
5745 operands[2])),
5746 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5747 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5748 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5749 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5750 gen_rtx_CLOBBER (VOIDmode, ret))));
5751 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5752 return 1;
5754 return 0;
5757 const char *
5758 pa_output_div_insn (rtx *operands, int unsignedp, rtx_insn *insn)
5760 int divisor;
5762 /* If the divisor is a constant, try to use one of the special
5763 opcodes .*/
5764 if (GET_CODE (operands[0]) == CONST_INT)
5766 static char buf[100];
5767 divisor = INTVAL (operands[0]);
5768 if (!div_milli[divisor][unsignedp])
5770 div_milli[divisor][unsignedp] = 1;
5771 if (unsignedp)
5772 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5773 else
5774 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5776 if (unsignedp)
5778 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5779 INTVAL (operands[0]));
5780 return pa_output_millicode_call (insn,
5781 gen_rtx_SYMBOL_REF (SImode, buf));
5783 else
5785 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5786 INTVAL (operands[0]));
5787 return pa_output_millicode_call (insn,
5788 gen_rtx_SYMBOL_REF (SImode, buf));
5791 /* Divisor isn't a special constant. */
5792 else
5794 if (unsignedp)
5796 import_milli (divU);
5797 return pa_output_millicode_call (insn,
5798 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5800 else
5802 import_milli (divI);
5803 return pa_output_millicode_call (insn,
5804 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5809 /* Output a $$rem millicode to do mod. */
5811 const char *
5812 pa_output_mod_insn (int unsignedp, rtx_insn *insn)
5814 if (unsignedp)
5816 import_milli (remU);
5817 return pa_output_millicode_call (insn,
5818 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5820 else
5822 import_milli (remI);
5823 return pa_output_millicode_call (insn,
5824 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5828 void
5829 pa_output_arg_descriptor (rtx call_insn)
5831 const char *arg_regs[4];
5832 machine_mode arg_mode;
5833 rtx link;
5834 int i, output_flag = 0;
5835 int regno;
5837 /* We neither need nor want argument location descriptors for the
5838 64bit runtime environment or the ELF32 environment. */
5839 if (TARGET_64BIT || TARGET_ELF32)
5840 return;
5842 for (i = 0; i < 4; i++)
5843 arg_regs[i] = 0;
5845 /* Specify explicitly that no argument relocations should take place
5846 if using the portable runtime calling conventions. */
5847 if (TARGET_PORTABLE_RUNTIME)
5849 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5850 asm_out_file);
5851 return;
5854 gcc_assert (CALL_P (call_insn));
5855 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5856 link; link = XEXP (link, 1))
5858 rtx use = XEXP (link, 0);
5860 if (! (GET_CODE (use) == USE
5861 && GET_CODE (XEXP (use, 0)) == REG
5862 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5863 continue;
5865 arg_mode = GET_MODE (XEXP (use, 0));
5866 regno = REGNO (XEXP (use, 0));
5867 if (regno >= 23 && regno <= 26)
5869 arg_regs[26 - regno] = "GR";
5870 if (arg_mode == DImode)
5871 arg_regs[25 - regno] = "GR";
5873 else if (regno >= 32 && regno <= 39)
5875 if (arg_mode == SFmode)
5876 arg_regs[(regno - 32) / 2] = "FR";
5877 else
5879 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5880 arg_regs[(regno - 34) / 2] = "FR";
5881 arg_regs[(regno - 34) / 2 + 1] = "FU";
5882 #else
5883 arg_regs[(regno - 34) / 2] = "FU";
5884 arg_regs[(regno - 34) / 2 + 1] = "FR";
5885 #endif
5889 fputs ("\t.CALL ", asm_out_file);
5890 for (i = 0; i < 4; i++)
5892 if (arg_regs[i])
5894 if (output_flag++)
5895 fputc (',', asm_out_file);
5896 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5899 fputc ('\n', asm_out_file);
5902 /* Inform reload about cases where moving X with a mode MODE to or from
5903 a register in RCLASS requires an extra scratch or immediate register.
5904 Return the class needed for the immediate register. */
5906 static reg_class_t
5907 pa_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
5908 machine_mode mode, secondary_reload_info *sri)
5910 int regno;
5911 enum reg_class rclass = (enum reg_class) rclass_i;
5913 /* Handle the easy stuff first. */
5914 if (rclass == R1_REGS)
5915 return NO_REGS;
5917 if (REG_P (x))
5919 regno = REGNO (x);
5920 if (rclass == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
5921 return NO_REGS;
5923 else
5924 regno = -1;
5926 /* If we have something like (mem (mem (...)), we can safely assume the
5927 inner MEM will end up in a general register after reloading, so there's
5928 no need for a secondary reload. */
5929 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
5930 return NO_REGS;
5932 /* Trying to load a constant into a FP register during PIC code
5933 generation requires %r1 as a scratch register. For float modes,
5934 the only legitimate constant is CONST0_RTX. However, there are
5935 a few patterns that accept constant double operands. */
5936 if (flag_pic
5937 && FP_REG_CLASS_P (rclass)
5938 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
5940 switch (mode)
5942 case SImode:
5943 sri->icode = CODE_FOR_reload_insi_r1;
5944 break;
5946 case DImode:
5947 sri->icode = CODE_FOR_reload_indi_r1;
5948 break;
5950 case SFmode:
5951 sri->icode = CODE_FOR_reload_insf_r1;
5952 break;
5954 case DFmode:
5955 sri->icode = CODE_FOR_reload_indf_r1;
5956 break;
5958 default:
5959 gcc_unreachable ();
5961 return NO_REGS;
5964 /* Secondary reloads of symbolic expressions require %r1 as a scratch
5965 register when we're generating PIC code or when the operand isn't
5966 readonly. */
5967 if (pa_symbolic_expression_p (x))
5969 if (GET_CODE (x) == HIGH)
5970 x = XEXP (x, 0);
5972 if (flag_pic || !read_only_operand (x, VOIDmode))
5974 switch (mode)
5976 case SImode:
5977 sri->icode = CODE_FOR_reload_insi_r1;
5978 break;
5980 case DImode:
5981 sri->icode = CODE_FOR_reload_indi_r1;
5982 break;
5984 default:
5985 gcc_unreachable ();
5987 return NO_REGS;
5991 /* Profiling showed the PA port spends about 1.3% of its compilation
5992 time in true_regnum from calls inside pa_secondary_reload_class. */
5993 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
5994 regno = true_regnum (x);
5996 /* Handle reloads for floating point loads and stores. */
5997 if ((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
5998 && FP_REG_CLASS_P (rclass))
6000 if (MEM_P (x))
6002 x = XEXP (x, 0);
6004 /* We don't need an intermediate for indexed and LO_SUM DLT
6005 memory addresses. When INT14_OK_STRICT is true, it might
6006 appear that we could directly allow register indirect
6007 memory addresses. However, this doesn't work because we
6008 don't support SUBREGs in floating-point register copies
6009 and reload doesn't tell us when it's going to use a SUBREG. */
6010 if (IS_INDEX_ADDR_P (x)
6011 || IS_LO_SUM_DLT_ADDR_P (x))
6012 return NO_REGS;
6014 /* Request intermediate general register. */
6015 return GENERAL_REGS;
6018 /* Request a secondary reload with a general scratch register
6019 for everything else. ??? Could symbolic operands be handled
6020 directly when generating non-pic PA 2.0 code? */
6021 sri->icode = (in_p
6022 ? direct_optab_handler (reload_in_optab, mode)
6023 : direct_optab_handler (reload_out_optab, mode));
6024 return NO_REGS;
6027 /* A SAR<->FP register copy requires an intermediate general register
6028 and secondary memory. We need a secondary reload with a general
6029 scratch register for spills. */
6030 if (rclass == SHIFT_REGS)
6032 /* Handle spill. */
6033 if (regno >= FIRST_PSEUDO_REGISTER || regno < 0)
6035 sri->icode = (in_p
6036 ? direct_optab_handler (reload_in_optab, mode)
6037 : direct_optab_handler (reload_out_optab, mode));
6038 return NO_REGS;
6041 /* Handle FP copy. */
6042 if (FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))
6043 return GENERAL_REGS;
6046 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
6047 && REGNO_REG_CLASS (regno) == SHIFT_REGS
6048 && FP_REG_CLASS_P (rclass))
6049 return GENERAL_REGS;
6051 return NO_REGS;
6054 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
6055 is only marked as live on entry by df-scan when it is a fixed
6056 register. It isn't a fixed register in the 64-bit runtime,
6057 so we need to mark it here. */
6059 static void
6060 pa_extra_live_on_entry (bitmap regs)
6062 if (TARGET_64BIT)
6063 bitmap_set_bit (regs, ARG_POINTER_REGNUM);
6066 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
6067 to prevent it from being deleted. */
6070 pa_eh_return_handler_rtx (void)
6072 rtx tmp;
6074 tmp = gen_rtx_PLUS (word_mode, hard_frame_pointer_rtx,
6075 TARGET_64BIT ? GEN_INT (-16) : GEN_INT (-20));
6076 tmp = gen_rtx_MEM (word_mode, tmp);
6077 tmp->volatil = 1;
6078 return tmp;
6081 /* In the 32-bit runtime, arguments larger than eight bytes are passed
6082 by invisible reference. As a GCC extension, we also pass anything
6083 with a zero or variable size by reference.
6085 The 64-bit runtime does not describe passing any types by invisible
6086 reference. The internals of GCC can't currently handle passing
6087 empty structures, and zero or variable length arrays when they are
6088 not passed entirely on the stack or by reference. Thus, as a GCC
6089 extension, we pass these types by reference. The HP compiler doesn't
6090 support these types, so hopefully there shouldn't be any compatibility
6091 issues. This may have to be revisited when HP releases a C99 compiler
6092 or updates the ABI. */
6094 static bool
6095 pa_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
6096 machine_mode mode, const_tree type,
6097 bool named ATTRIBUTE_UNUSED)
6099 HOST_WIDE_INT size;
6101 if (type)
6102 size = int_size_in_bytes (type);
6103 else
6104 size = GET_MODE_SIZE (mode);
6106 if (TARGET_64BIT)
6107 return size <= 0;
6108 else
6109 return size <= 0 || size > 8;
6112 enum direction
6113 pa_function_arg_padding (machine_mode mode, const_tree type)
6115 if (mode == BLKmode
6116 || (TARGET_64BIT
6117 && type
6118 && (AGGREGATE_TYPE_P (type)
6119 || TREE_CODE (type) == COMPLEX_TYPE
6120 || TREE_CODE (type) == VECTOR_TYPE)))
6122 /* Return none if justification is not required. */
6123 if (type
6124 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
6125 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
6126 return none;
6128 /* The directions set here are ignored when a BLKmode argument larger
6129 than a word is placed in a register. Different code is used for
6130 the stack and registers. This makes it difficult to have a
6131 consistent data representation for both the stack and registers.
6132 For both runtimes, the justification and padding for arguments on
6133 the stack and in registers should be identical. */
6134 if (TARGET_64BIT)
6135 /* The 64-bit runtime specifies left justification for aggregates. */
6136 return upward;
6137 else
6138 /* The 32-bit runtime architecture specifies right justification.
6139 When the argument is passed on the stack, the argument is padded
6140 with garbage on the left. The HP compiler pads with zeros. */
6141 return downward;
6144 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
6145 return downward;
6146 else
6147 return none;
6151 /* Do what is necessary for `va_start'. We look at the current function
6152 to determine if stdargs or varargs is used and fill in an initial
6153 va_list. A pointer to this constructor is returned. */
6155 static rtx
6156 hppa_builtin_saveregs (void)
6158 rtx offset, dest;
6159 tree fntype = TREE_TYPE (current_function_decl);
6160 int argadj = ((!stdarg_p (fntype))
6161 ? UNITS_PER_WORD : 0);
6163 if (argadj)
6164 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
6165 else
6166 offset = crtl->args.arg_offset_rtx;
6168 if (TARGET_64BIT)
6170 int i, off;
6172 /* Adjust for varargs/stdarg differences. */
6173 if (argadj)
6174 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, -argadj);
6175 else
6176 offset = crtl->args.arg_offset_rtx;
6178 /* We need to save %r26 .. %r19 inclusive starting at offset -64
6179 from the incoming arg pointer and growing to larger addresses. */
6180 for (i = 26, off = -64; i >= 19; i--, off += 8)
6181 emit_move_insn (gen_rtx_MEM (word_mode,
6182 plus_constant (Pmode,
6183 arg_pointer_rtx, off)),
6184 gen_rtx_REG (word_mode, i));
6186 /* The incoming args pointer points just beyond the flushback area;
6187 normally this is not a serious concern. However, when we are doing
6188 varargs/stdargs we want to make the arg pointer point to the start
6189 of the incoming argument area. */
6190 emit_move_insn (virtual_incoming_args_rtx,
6191 plus_constant (Pmode, arg_pointer_rtx, -64));
6193 /* Now return a pointer to the first anonymous argument. */
6194 return copy_to_reg (expand_binop (Pmode, add_optab,
6195 virtual_incoming_args_rtx,
6196 offset, 0, 0, OPTAB_LIB_WIDEN));
6199 /* Store general registers on the stack. */
6200 dest = gen_rtx_MEM (BLKmode,
6201 plus_constant (Pmode, crtl->args.internal_arg_pointer,
6202 -16));
6203 set_mem_alias_set (dest, get_varargs_alias_set ());
6204 set_mem_align (dest, BITS_PER_WORD);
6205 move_block_from_reg (23, dest, 4);
6207 /* move_block_from_reg will emit code to store the argument registers
6208 individually as scalar stores.
6210 However, other insns may later load from the same addresses for
6211 a structure load (passing a struct to a varargs routine).
6213 The alias code assumes that such aliasing can never happen, so we
6214 have to keep memory referencing insns from moving up beyond the
6215 last argument register store. So we emit a blockage insn here. */
6216 emit_insn (gen_blockage ());
6218 return copy_to_reg (expand_binop (Pmode, add_optab,
6219 crtl->args.internal_arg_pointer,
6220 offset, 0, 0, OPTAB_LIB_WIDEN));
6223 static void
6224 hppa_va_start (tree valist, rtx nextarg)
6226 nextarg = expand_builtin_saveregs ();
6227 std_expand_builtin_va_start (valist, nextarg);
6230 static tree
6231 hppa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
6232 gimple_seq *post_p)
6234 if (TARGET_64BIT)
6236 /* Args grow upward. We can use the generic routines. */
6237 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6239 else /* !TARGET_64BIT */
6241 tree ptr = build_pointer_type (type);
6242 tree valist_type;
6243 tree t, u;
6244 unsigned int size, ofs;
6245 bool indirect;
6247 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6248 if (indirect)
6250 type = ptr;
6251 ptr = build_pointer_type (type);
6253 size = int_size_in_bytes (type);
6254 valist_type = TREE_TYPE (valist);
6256 /* Args grow down. Not handled by generic routines. */
6258 u = fold_convert (sizetype, size_in_bytes (type));
6259 u = fold_build1 (NEGATE_EXPR, sizetype, u);
6260 t = fold_build_pointer_plus (valist, u);
6262 /* Align to 4 or 8 byte boundary depending on argument size. */
6264 u = build_int_cst (TREE_TYPE (t), (HOST_WIDE_INT)(size > 4 ? -8 : -4));
6265 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, u);
6266 t = fold_convert (valist_type, t);
6268 t = build2 (MODIFY_EXPR, valist_type, valist, t);
6270 ofs = (8 - size) % 4;
6271 if (ofs != 0)
6272 t = fold_build_pointer_plus_hwi (t, ofs);
6274 t = fold_convert (ptr, t);
6275 t = build_va_arg_indirect_ref (t);
6277 if (indirect)
6278 t = build_va_arg_indirect_ref (t);
6280 return t;
6284 /* True if MODE is valid for the target. By "valid", we mean able to
6285 be manipulated in non-trivial ways. In particular, this means all
6286 the arithmetic is supported.
6288 Currently, TImode is not valid as the HP 64-bit runtime documentation
6289 doesn't document the alignment and calling conventions for this type.
6290 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6291 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6293 static bool
6294 pa_scalar_mode_supported_p (machine_mode mode)
6296 int precision = GET_MODE_PRECISION (mode);
6298 switch (GET_MODE_CLASS (mode))
6300 case MODE_PARTIAL_INT:
6301 case MODE_INT:
6302 if (precision == CHAR_TYPE_SIZE)
6303 return true;
6304 if (precision == SHORT_TYPE_SIZE)
6305 return true;
6306 if (precision == INT_TYPE_SIZE)
6307 return true;
6308 if (precision == LONG_TYPE_SIZE)
6309 return true;
6310 if (precision == LONG_LONG_TYPE_SIZE)
6311 return true;
6312 return false;
6314 case MODE_FLOAT:
6315 if (precision == FLOAT_TYPE_SIZE)
6316 return true;
6317 if (precision == DOUBLE_TYPE_SIZE)
6318 return true;
6319 if (precision == LONG_DOUBLE_TYPE_SIZE)
6320 return true;
6321 return false;
6323 case MODE_DECIMAL_FLOAT:
6324 return false;
6326 default:
6327 gcc_unreachable ();
6331 /* Return TRUE if INSN, a jump insn, has an unfilled delay slot and
6332 it branches into the delay slot. Otherwise, return FALSE. */
6334 static bool
6335 branch_to_delay_slot_p (rtx_insn *insn)
6337 rtx jump_insn;
6339 if (dbr_sequence_length ())
6340 return FALSE;
6342 jump_insn = next_active_insn (JUMP_LABEL (insn));
6343 while (insn)
6345 insn = next_active_insn (insn);
6346 if (jump_insn == insn)
6347 return TRUE;
6349 /* We can't rely on the length of asms. So, we return FALSE when
6350 the branch is followed by an asm. */
6351 if (!insn
6352 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6353 || extract_asm_operands (PATTERN (insn)) != NULL_RTX
6354 || get_attr_length (insn) > 0)
6355 break;
6358 return FALSE;
6361 /* Return TRUE if INSN, a forward jump insn, needs a nop in its delay slot.
6363 This occurs when INSN has an unfilled delay slot and is followed
6364 by an asm. Disaster can occur if the asm is empty and the jump
6365 branches into the delay slot. So, we add a nop in the delay slot
6366 when this occurs. */
6368 static bool
6369 branch_needs_nop_p (rtx_insn *insn)
6371 rtx jump_insn;
6373 if (dbr_sequence_length ())
6374 return FALSE;
6376 jump_insn = next_active_insn (JUMP_LABEL (insn));
6377 while (insn)
6379 insn = next_active_insn (insn);
6380 if (!insn || jump_insn == insn)
6381 return TRUE;
6383 if (!(GET_CODE (PATTERN (insn)) == ASM_INPUT
6384 || extract_asm_operands (PATTERN (insn)) != NULL_RTX)
6385 && get_attr_length (insn) > 0)
6386 break;
6389 return FALSE;
6392 /* Return TRUE if INSN, a forward jump insn, can use nullification
6393 to skip the following instruction. This avoids an extra cycle due
6394 to a mis-predicted branch when we fall through. */
6396 static bool
6397 use_skip_p (rtx_insn *insn)
6399 rtx jump_insn = next_active_insn (JUMP_LABEL (insn));
6401 while (insn)
6403 insn = next_active_insn (insn);
6405 /* We can't rely on the length of asms, so we can't skip asms. */
6406 if (!insn
6407 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6408 || extract_asm_operands (PATTERN (insn)) != NULL_RTX)
6409 break;
6410 if (get_attr_length (insn) == 4
6411 && jump_insn == next_active_insn (insn))
6412 return TRUE;
6413 if (get_attr_length (insn) > 0)
6414 break;
6417 return FALSE;
6420 /* This routine handles all the normal conditional branch sequences we
6421 might need to generate. It handles compare immediate vs compare
6422 register, nullification of delay slots, varying length branches,
6423 negated branches, and all combinations of the above. It returns the
6424 output appropriate to emit the branch corresponding to all given
6425 parameters. */
6427 const char *
6428 pa_output_cbranch (rtx *operands, int negated, rtx_insn *insn)
6430 static char buf[100];
6431 bool useskip;
6432 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6433 int length = get_attr_length (insn);
6434 int xdelay;
6436 /* A conditional branch to the following instruction (e.g. the delay slot)
6437 is asking for a disaster. This can happen when not optimizing and
6438 when jump optimization fails.
6440 While it is usually safe to emit nothing, this can fail if the
6441 preceding instruction is a nullified branch with an empty delay
6442 slot and the same branch target as this branch. We could check
6443 for this but jump optimization should eliminate nop jumps. It
6444 is always safe to emit a nop. */
6445 if (branch_to_delay_slot_p (insn))
6446 return "nop";
6448 /* The doubleword form of the cmpib instruction doesn't have the LEU
6449 and GTU conditions while the cmpb instruction does. Since we accept
6450 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6451 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6452 operands[2] = gen_rtx_REG (DImode, 0);
6453 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
6454 operands[1] = gen_rtx_REG (DImode, 0);
6456 /* If this is a long branch with its delay slot unfilled, set `nullify'
6457 as it can nullify the delay slot and save a nop. */
6458 if (length == 8 && dbr_sequence_length () == 0)
6459 nullify = 1;
6461 /* If this is a short forward conditional branch which did not get
6462 its delay slot filled, the delay slot can still be nullified. */
6463 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6464 nullify = forward_branch_p (insn);
6466 /* A forward branch over a single nullified insn can be done with a
6467 comclr instruction. This avoids a single cycle penalty due to
6468 mis-predicted branch if we fall through (branch not taken). */
6469 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6471 switch (length)
6473 /* All short conditional branches except backwards with an unfilled
6474 delay slot. */
6475 case 4:
6476 if (useskip)
6477 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6478 else
6479 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6480 if (GET_MODE (operands[1]) == DImode)
6481 strcat (buf, "*");
6482 if (negated)
6483 strcat (buf, "%B3");
6484 else
6485 strcat (buf, "%S3");
6486 if (useskip)
6487 strcat (buf, " %2,%r1,%%r0");
6488 else if (nullify)
6490 if (branch_needs_nop_p (insn))
6491 strcat (buf, ",n %2,%r1,%0%#");
6492 else
6493 strcat (buf, ",n %2,%r1,%0");
6495 else
6496 strcat (buf, " %2,%r1,%0");
6497 break;
6499 /* All long conditionals. Note a short backward branch with an
6500 unfilled delay slot is treated just like a long backward branch
6501 with an unfilled delay slot. */
6502 case 8:
6503 /* Handle weird backwards branch with a filled delay slot
6504 which is nullified. */
6505 if (dbr_sequence_length () != 0
6506 && ! forward_branch_p (insn)
6507 && nullify)
6509 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6510 if (GET_MODE (operands[1]) == DImode)
6511 strcat (buf, "*");
6512 if (negated)
6513 strcat (buf, "%S3");
6514 else
6515 strcat (buf, "%B3");
6516 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6518 /* Handle short backwards branch with an unfilled delay slot.
6519 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6520 taken and untaken branches. */
6521 else if (dbr_sequence_length () == 0
6522 && ! forward_branch_p (insn)
6523 && INSN_ADDRESSES_SET_P ()
6524 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6525 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6527 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6528 if (GET_MODE (operands[1]) == DImode)
6529 strcat (buf, "*");
6530 if (negated)
6531 strcat (buf, "%B3 %2,%r1,%0%#");
6532 else
6533 strcat (buf, "%S3 %2,%r1,%0%#");
6535 else
6537 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6538 if (GET_MODE (operands[1]) == DImode)
6539 strcat (buf, "*");
6540 if (negated)
6541 strcat (buf, "%S3");
6542 else
6543 strcat (buf, "%B3");
6544 if (nullify)
6545 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6546 else
6547 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6549 break;
6551 default:
6552 /* The reversed conditional branch must branch over one additional
6553 instruction if the delay slot is filled and needs to be extracted
6554 by pa_output_lbranch. If the delay slot is empty or this is a
6555 nullified forward branch, the instruction after the reversed
6556 condition branch must be nullified. */
6557 if (dbr_sequence_length () == 0
6558 || (nullify && forward_branch_p (insn)))
6560 nullify = 1;
6561 xdelay = 0;
6562 operands[4] = GEN_INT (length);
6564 else
6566 xdelay = 1;
6567 operands[4] = GEN_INT (length + 4);
6570 /* Create a reversed conditional branch which branches around
6571 the following insns. */
6572 if (GET_MODE (operands[1]) != DImode)
6574 if (nullify)
6576 if (negated)
6577 strcpy (buf,
6578 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6579 else
6580 strcpy (buf,
6581 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6583 else
6585 if (negated)
6586 strcpy (buf,
6587 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6588 else
6589 strcpy (buf,
6590 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6593 else
6595 if (nullify)
6597 if (negated)
6598 strcpy (buf,
6599 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6600 else
6601 strcpy (buf,
6602 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6604 else
6606 if (negated)
6607 strcpy (buf,
6608 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6609 else
6610 strcpy (buf,
6611 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6615 output_asm_insn (buf, operands);
6616 return pa_output_lbranch (operands[0], insn, xdelay);
6618 return buf;
6621 /* This routine handles output of long unconditional branches that
6622 exceed the maximum range of a simple branch instruction. Since
6623 we don't have a register available for the branch, we save register
6624 %r1 in the frame marker, load the branch destination DEST into %r1,
6625 execute the branch, and restore %r1 in the delay slot of the branch.
6627 Since long branches may have an insn in the delay slot and the
6628 delay slot is used to restore %r1, we in general need to extract
6629 this insn and execute it before the branch. However, to facilitate
6630 use of this function by conditional branches, we also provide an
6631 option to not extract the delay insn so that it will be emitted
6632 after the long branch. So, if there is an insn in the delay slot,
6633 it is extracted if XDELAY is nonzero.
6635 The lengths of the various long-branch sequences are 20, 16 and 24
6636 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6638 const char *
6639 pa_output_lbranch (rtx dest, rtx_insn *insn, int xdelay)
6641 rtx xoperands[2];
6643 xoperands[0] = dest;
6645 /* First, free up the delay slot. */
6646 if (xdelay && dbr_sequence_length () != 0)
6648 /* We can't handle a jump in the delay slot. */
6649 gcc_assert (! JUMP_P (NEXT_INSN (insn)));
6651 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6652 optimize, 0, NULL);
6654 /* Now delete the delay insn. */
6655 SET_INSN_DELETED (NEXT_INSN (insn));
6658 /* Output an insn to save %r1. The runtime documentation doesn't
6659 specify whether the "Clean Up" slot in the callers frame can
6660 be clobbered by the callee. It isn't copied by HP's builtin
6661 alloca, so this suggests that it can be clobbered if necessary.
6662 The "Static Link" location is copied by HP builtin alloca, so
6663 we avoid using it. Using the cleanup slot might be a problem
6664 if we have to interoperate with languages that pass cleanup
6665 information. However, it should be possible to handle these
6666 situations with GCC's asm feature.
6668 The "Current RP" slot is reserved for the called procedure, so
6669 we try to use it when we don't have a frame of our own. It's
6670 rather unlikely that we won't have a frame when we need to emit
6671 a very long branch.
6673 Really the way to go long term is a register scavenger; goto
6674 the target of the jump and find a register which we can use
6675 as a scratch to hold the value in %r1. Then, we wouldn't have
6676 to free up the delay slot or clobber a slot that may be needed
6677 for other purposes. */
6678 if (TARGET_64BIT)
6680 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6681 /* Use the return pointer slot in the frame marker. */
6682 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6683 else
6684 /* Use the slot at -40 in the frame marker since HP builtin
6685 alloca doesn't copy it. */
6686 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6688 else
6690 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6691 /* Use the return pointer slot in the frame marker. */
6692 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6693 else
6694 /* Use the "Clean Up" slot in the frame marker. In GCC,
6695 the only other use of this location is for copying a
6696 floating point double argument from a floating-point
6697 register to two general registers. The copy is done
6698 as an "atomic" operation when outputting a call, so it
6699 won't interfere with our using the location here. */
6700 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6703 if (TARGET_PORTABLE_RUNTIME)
6705 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6706 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6707 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6709 else if (flag_pic)
6711 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6712 if (TARGET_SOM || !TARGET_GAS)
6714 xoperands[1] = gen_label_rtx ();
6715 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6716 targetm.asm_out.internal_label (asm_out_file, "L",
6717 CODE_LABEL_NUMBER (xoperands[1]));
6718 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6720 else
6722 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6723 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6725 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6727 else
6728 /* Now output a very long branch to the original target. */
6729 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6731 /* Now restore the value of %r1 in the delay slot. */
6732 if (TARGET_64BIT)
6734 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6735 return "ldd -16(%%r30),%%r1";
6736 else
6737 return "ldd -40(%%r30),%%r1";
6739 else
6741 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6742 return "ldw -20(%%r30),%%r1";
6743 else
6744 return "ldw -12(%%r30),%%r1";
6748 /* This routine handles all the branch-on-bit conditional branch sequences we
6749 might need to generate. It handles nullification of delay slots,
6750 varying length branches, negated branches and all combinations of the
6751 above. it returns the appropriate output template to emit the branch. */
6753 const char *
6754 pa_output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn, int which)
6756 static char buf[100];
6757 bool useskip;
6758 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6759 int length = get_attr_length (insn);
6760 int xdelay;
6762 /* A conditional branch to the following instruction (e.g. the delay slot) is
6763 asking for a disaster. I do not think this can happen as this pattern
6764 is only used when optimizing; jump optimization should eliminate the
6765 jump. But be prepared just in case. */
6767 if (branch_to_delay_slot_p (insn))
6768 return "nop";
6770 /* If this is a long branch with its delay slot unfilled, set `nullify'
6771 as it can nullify the delay slot and save a nop. */
6772 if (length == 8 && dbr_sequence_length () == 0)
6773 nullify = 1;
6775 /* If this is a short forward conditional branch which did not get
6776 its delay slot filled, the delay slot can still be nullified. */
6777 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6778 nullify = forward_branch_p (insn);
6780 /* A forward branch over a single nullified insn can be done with a
6781 extrs instruction. This avoids a single cycle penalty due to
6782 mis-predicted branch if we fall through (branch not taken). */
6783 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6785 switch (length)
6788 /* All short conditional branches except backwards with an unfilled
6789 delay slot. */
6790 case 4:
6791 if (useskip)
6792 strcpy (buf, "{extrs,|extrw,s,}");
6793 else
6794 strcpy (buf, "bb,");
6795 if (useskip && GET_MODE (operands[0]) == DImode)
6796 strcpy (buf, "extrd,s,*");
6797 else if (GET_MODE (operands[0]) == DImode)
6798 strcpy (buf, "bb,*");
6799 if ((which == 0 && negated)
6800 || (which == 1 && ! negated))
6801 strcat (buf, ">=");
6802 else
6803 strcat (buf, "<");
6804 if (useskip)
6805 strcat (buf, " %0,%1,1,%%r0");
6806 else if (nullify && negated)
6808 if (branch_needs_nop_p (insn))
6809 strcat (buf, ",n %0,%1,%3%#");
6810 else
6811 strcat (buf, ",n %0,%1,%3");
6813 else if (nullify && ! negated)
6815 if (branch_needs_nop_p (insn))
6816 strcat (buf, ",n %0,%1,%2%#");
6817 else
6818 strcat (buf, ",n %0,%1,%2");
6820 else if (! nullify && negated)
6821 strcat (buf, " %0,%1,%3");
6822 else if (! nullify && ! negated)
6823 strcat (buf, " %0,%1,%2");
6824 break;
6826 /* All long conditionals. Note a short backward branch with an
6827 unfilled delay slot is treated just like a long backward branch
6828 with an unfilled delay slot. */
6829 case 8:
6830 /* Handle weird backwards branch with a filled delay slot
6831 which is nullified. */
6832 if (dbr_sequence_length () != 0
6833 && ! forward_branch_p (insn)
6834 && nullify)
6836 strcpy (buf, "bb,");
6837 if (GET_MODE (operands[0]) == DImode)
6838 strcat (buf, "*");
6839 if ((which == 0 && negated)
6840 || (which == 1 && ! negated))
6841 strcat (buf, "<");
6842 else
6843 strcat (buf, ">=");
6844 if (negated)
6845 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6846 else
6847 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6849 /* Handle short backwards branch with an unfilled delay slot.
6850 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6851 taken and untaken branches. */
6852 else if (dbr_sequence_length () == 0
6853 && ! forward_branch_p (insn)
6854 && INSN_ADDRESSES_SET_P ()
6855 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6856 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6858 strcpy (buf, "bb,");
6859 if (GET_MODE (operands[0]) == DImode)
6860 strcat (buf, "*");
6861 if ((which == 0 && negated)
6862 || (which == 1 && ! negated))
6863 strcat (buf, ">=");
6864 else
6865 strcat (buf, "<");
6866 if (negated)
6867 strcat (buf, " %0,%1,%3%#");
6868 else
6869 strcat (buf, " %0,%1,%2%#");
6871 else
6873 if (GET_MODE (operands[0]) == DImode)
6874 strcpy (buf, "extrd,s,*");
6875 else
6876 strcpy (buf, "{extrs,|extrw,s,}");
6877 if ((which == 0 && negated)
6878 || (which == 1 && ! negated))
6879 strcat (buf, "<");
6880 else
6881 strcat (buf, ">=");
6882 if (nullify && negated)
6883 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6884 else if (nullify && ! negated)
6885 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6886 else if (negated)
6887 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6888 else
6889 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6891 break;
6893 default:
6894 /* The reversed conditional branch must branch over one additional
6895 instruction if the delay slot is filled and needs to be extracted
6896 by pa_output_lbranch. If the delay slot is empty or this is a
6897 nullified forward branch, the instruction after the reversed
6898 condition branch must be nullified. */
6899 if (dbr_sequence_length () == 0
6900 || (nullify && forward_branch_p (insn)))
6902 nullify = 1;
6903 xdelay = 0;
6904 operands[4] = GEN_INT (length);
6906 else
6908 xdelay = 1;
6909 operands[4] = GEN_INT (length + 4);
6912 if (GET_MODE (operands[0]) == DImode)
6913 strcpy (buf, "bb,*");
6914 else
6915 strcpy (buf, "bb,");
6916 if ((which == 0 && negated)
6917 || (which == 1 && !negated))
6918 strcat (buf, "<");
6919 else
6920 strcat (buf, ">=");
6921 if (nullify)
6922 strcat (buf, ",n %0,%1,.+%4");
6923 else
6924 strcat (buf, " %0,%1,.+%4");
6925 output_asm_insn (buf, operands);
6926 return pa_output_lbranch (negated ? operands[3] : operands[2],
6927 insn, xdelay);
6929 return buf;
6932 /* This routine handles all the branch-on-variable-bit conditional branch
6933 sequences we might need to generate. It handles nullification of delay
6934 slots, varying length branches, negated branches and all combinations
6935 of the above. it returns the appropriate output template to emit the
6936 branch. */
6938 const char *
6939 pa_output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn,
6940 int which)
6942 static char buf[100];
6943 bool useskip;
6944 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6945 int length = get_attr_length (insn);
6946 int xdelay;
6948 /* A conditional branch to the following instruction (e.g. the delay slot) is
6949 asking for a disaster. I do not think this can happen as this pattern
6950 is only used when optimizing; jump optimization should eliminate the
6951 jump. But be prepared just in case. */
6953 if (branch_to_delay_slot_p (insn))
6954 return "nop";
6956 /* If this is a long branch with its delay slot unfilled, set `nullify'
6957 as it can nullify the delay slot and save a nop. */
6958 if (length == 8 && dbr_sequence_length () == 0)
6959 nullify = 1;
6961 /* If this is a short forward conditional branch which did not get
6962 its delay slot filled, the delay slot can still be nullified. */
6963 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6964 nullify = forward_branch_p (insn);
6966 /* A forward branch over a single nullified insn can be done with a
6967 extrs instruction. This avoids a single cycle penalty due to
6968 mis-predicted branch if we fall through (branch not taken). */
6969 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6971 switch (length)
6974 /* All short conditional branches except backwards with an unfilled
6975 delay slot. */
6976 case 4:
6977 if (useskip)
6978 strcpy (buf, "{vextrs,|extrw,s,}");
6979 else
6980 strcpy (buf, "{bvb,|bb,}");
6981 if (useskip && GET_MODE (operands[0]) == DImode)
6982 strcpy (buf, "extrd,s,*");
6983 else if (GET_MODE (operands[0]) == DImode)
6984 strcpy (buf, "bb,*");
6985 if ((which == 0 && negated)
6986 || (which == 1 && ! negated))
6987 strcat (buf, ">=");
6988 else
6989 strcat (buf, "<");
6990 if (useskip)
6991 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6992 else if (nullify && negated)
6994 if (branch_needs_nop_p (insn))
6995 strcat (buf, "{,n %0,%3%#|,n %0,%%sar,%3%#}");
6996 else
6997 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
6999 else if (nullify && ! negated)
7001 if (branch_needs_nop_p (insn))
7002 strcat (buf, "{,n %0,%2%#|,n %0,%%sar,%2%#}");
7003 else
7004 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
7006 else if (! nullify && negated)
7007 strcat (buf, "{ %0,%3| %0,%%sar,%3}");
7008 else if (! nullify && ! negated)
7009 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
7010 break;
7012 /* All long conditionals. Note a short backward branch with an
7013 unfilled delay slot is treated just like a long backward branch
7014 with an unfilled delay slot. */
7015 case 8:
7016 /* Handle weird backwards branch with a filled delay slot
7017 which is nullified. */
7018 if (dbr_sequence_length () != 0
7019 && ! forward_branch_p (insn)
7020 && nullify)
7022 strcpy (buf, "{bvb,|bb,}");
7023 if (GET_MODE (operands[0]) == DImode)
7024 strcat (buf, "*");
7025 if ((which == 0 && negated)
7026 || (which == 1 && ! negated))
7027 strcat (buf, "<");
7028 else
7029 strcat (buf, ">=");
7030 if (negated)
7031 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
7032 else
7033 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
7035 /* Handle short backwards branch with an unfilled delay slot.
7036 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7037 taken and untaken branches. */
7038 else if (dbr_sequence_length () == 0
7039 && ! forward_branch_p (insn)
7040 && INSN_ADDRESSES_SET_P ()
7041 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7042 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7044 strcpy (buf, "{bvb,|bb,}");
7045 if (GET_MODE (operands[0]) == DImode)
7046 strcat (buf, "*");
7047 if ((which == 0 && negated)
7048 || (which == 1 && ! negated))
7049 strcat (buf, ">=");
7050 else
7051 strcat (buf, "<");
7052 if (negated)
7053 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
7054 else
7055 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
7057 else
7059 strcpy (buf, "{vextrs,|extrw,s,}");
7060 if (GET_MODE (operands[0]) == DImode)
7061 strcpy (buf, "extrd,s,*");
7062 if ((which == 0 && negated)
7063 || (which == 1 && ! negated))
7064 strcat (buf, "<");
7065 else
7066 strcat (buf, ">=");
7067 if (nullify && negated)
7068 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
7069 else if (nullify && ! negated)
7070 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
7071 else if (negated)
7072 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
7073 else
7074 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
7076 break;
7078 default:
7079 /* The reversed conditional branch must branch over one additional
7080 instruction if the delay slot is filled and needs to be extracted
7081 by pa_output_lbranch. If the delay slot is empty or this is a
7082 nullified forward branch, the instruction after the reversed
7083 condition branch must be nullified. */
7084 if (dbr_sequence_length () == 0
7085 || (nullify && forward_branch_p (insn)))
7087 nullify = 1;
7088 xdelay = 0;
7089 operands[4] = GEN_INT (length);
7091 else
7093 xdelay = 1;
7094 operands[4] = GEN_INT (length + 4);
7097 if (GET_MODE (operands[0]) == DImode)
7098 strcpy (buf, "bb,*");
7099 else
7100 strcpy (buf, "{bvb,|bb,}");
7101 if ((which == 0 && negated)
7102 || (which == 1 && !negated))
7103 strcat (buf, "<");
7104 else
7105 strcat (buf, ">=");
7106 if (nullify)
7107 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
7108 else
7109 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
7110 output_asm_insn (buf, operands);
7111 return pa_output_lbranch (negated ? operands[3] : operands[2],
7112 insn, xdelay);
7114 return buf;
7117 /* Return the output template for emitting a dbra type insn.
7119 Note it may perform some output operations on its own before
7120 returning the final output string. */
7121 const char *
7122 pa_output_dbra (rtx *operands, rtx_insn *insn, int which_alternative)
7124 int length = get_attr_length (insn);
7126 /* A conditional branch to the following instruction (e.g. the delay slot) is
7127 asking for a disaster. Be prepared! */
7129 if (branch_to_delay_slot_p (insn))
7131 if (which_alternative == 0)
7132 return "ldo %1(%0),%0";
7133 else if (which_alternative == 1)
7135 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
7136 output_asm_insn ("ldw -16(%%r30),%4", operands);
7137 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7138 return "{fldws|fldw} -16(%%r30),%0";
7140 else
7142 output_asm_insn ("ldw %0,%4", operands);
7143 return "ldo %1(%4),%4\n\tstw %4,%0";
7147 if (which_alternative == 0)
7149 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7150 int xdelay;
7152 /* If this is a long branch with its delay slot unfilled, set `nullify'
7153 as it can nullify the delay slot and save a nop. */
7154 if (length == 8 && dbr_sequence_length () == 0)
7155 nullify = 1;
7157 /* If this is a short forward conditional branch which did not get
7158 its delay slot filled, the delay slot can still be nullified. */
7159 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7160 nullify = forward_branch_p (insn);
7162 switch (length)
7164 case 4:
7165 if (nullify)
7167 if (branch_needs_nop_p (insn))
7168 return "addib,%C2,n %1,%0,%3%#";
7169 else
7170 return "addib,%C2,n %1,%0,%3";
7172 else
7173 return "addib,%C2 %1,%0,%3";
7175 case 8:
7176 /* Handle weird backwards branch with a fulled delay slot
7177 which is nullified. */
7178 if (dbr_sequence_length () != 0
7179 && ! forward_branch_p (insn)
7180 && nullify)
7181 return "addib,%N2,n %1,%0,.+12\n\tb %3";
7182 /* Handle short backwards branch with an unfilled delay slot.
7183 Using a addb;nop rather than addi;bl saves 1 cycle for both
7184 taken and untaken branches. */
7185 else if (dbr_sequence_length () == 0
7186 && ! forward_branch_p (insn)
7187 && INSN_ADDRESSES_SET_P ()
7188 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7189 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7190 return "addib,%C2 %1,%0,%3%#";
7192 /* Handle normal cases. */
7193 if (nullify)
7194 return "addi,%N2 %1,%0,%0\n\tb,n %3";
7195 else
7196 return "addi,%N2 %1,%0,%0\n\tb %3";
7198 default:
7199 /* The reversed conditional branch must branch over one additional
7200 instruction if the delay slot is filled and needs to be extracted
7201 by pa_output_lbranch. If the delay slot is empty or this is a
7202 nullified forward branch, the instruction after the reversed
7203 condition branch must be nullified. */
7204 if (dbr_sequence_length () == 0
7205 || (nullify && forward_branch_p (insn)))
7207 nullify = 1;
7208 xdelay = 0;
7209 operands[4] = GEN_INT (length);
7211 else
7213 xdelay = 1;
7214 operands[4] = GEN_INT (length + 4);
7217 if (nullify)
7218 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
7219 else
7220 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
7222 return pa_output_lbranch (operands[3], insn, xdelay);
7226 /* Deal with gross reload from FP register case. */
7227 else if (which_alternative == 1)
7229 /* Move loop counter from FP register to MEM then into a GR,
7230 increment the GR, store the GR into MEM, and finally reload
7231 the FP register from MEM from within the branch's delay slot. */
7232 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
7233 operands);
7234 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7235 if (length == 24)
7236 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
7237 else if (length == 28)
7238 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7239 else
7241 operands[5] = GEN_INT (length - 16);
7242 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
7243 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7244 return pa_output_lbranch (operands[3], insn, 0);
7247 /* Deal with gross reload from memory case. */
7248 else
7250 /* Reload loop counter from memory, the store back to memory
7251 happens in the branch's delay slot. */
7252 output_asm_insn ("ldw %0,%4", operands);
7253 if (length == 12)
7254 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
7255 else if (length == 16)
7256 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
7257 else
7259 operands[5] = GEN_INT (length - 4);
7260 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
7261 return pa_output_lbranch (operands[3], insn, 0);
7266 /* Return the output template for emitting a movb type insn.
7268 Note it may perform some output operations on its own before
7269 returning the final output string. */
7270 const char *
7271 pa_output_movb (rtx *operands, rtx_insn *insn, int which_alternative,
7272 int reverse_comparison)
7274 int length = get_attr_length (insn);
7276 /* A conditional branch to the following instruction (e.g. the delay slot) is
7277 asking for a disaster. Be prepared! */
7279 if (branch_to_delay_slot_p (insn))
7281 if (which_alternative == 0)
7282 return "copy %1,%0";
7283 else if (which_alternative == 1)
7285 output_asm_insn ("stw %1,-16(%%r30)", operands);
7286 return "{fldws|fldw} -16(%%r30),%0";
7288 else if (which_alternative == 2)
7289 return "stw %1,%0";
7290 else
7291 return "mtsar %r1";
7294 /* Support the second variant. */
7295 if (reverse_comparison)
7296 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
7298 if (which_alternative == 0)
7300 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7301 int xdelay;
7303 /* If this is a long branch with its delay slot unfilled, set `nullify'
7304 as it can nullify the delay slot and save a nop. */
7305 if (length == 8 && dbr_sequence_length () == 0)
7306 nullify = 1;
7308 /* If this is a short forward conditional branch which did not get
7309 its delay slot filled, the delay slot can still be nullified. */
7310 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7311 nullify = forward_branch_p (insn);
7313 switch (length)
7315 case 4:
7316 if (nullify)
7318 if (branch_needs_nop_p (insn))
7319 return "movb,%C2,n %1,%0,%3%#";
7320 else
7321 return "movb,%C2,n %1,%0,%3";
7323 else
7324 return "movb,%C2 %1,%0,%3";
7326 case 8:
7327 /* Handle weird backwards branch with a filled delay slot
7328 which is nullified. */
7329 if (dbr_sequence_length () != 0
7330 && ! forward_branch_p (insn)
7331 && nullify)
7332 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7334 /* Handle short backwards branch with an unfilled delay slot.
7335 Using a movb;nop rather than or;bl saves 1 cycle for both
7336 taken and untaken branches. */
7337 else if (dbr_sequence_length () == 0
7338 && ! forward_branch_p (insn)
7339 && INSN_ADDRESSES_SET_P ()
7340 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7341 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7342 return "movb,%C2 %1,%0,%3%#";
7343 /* Handle normal cases. */
7344 if (nullify)
7345 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7346 else
7347 return "or,%N2 %1,%%r0,%0\n\tb %3";
7349 default:
7350 /* The reversed conditional branch must branch over one additional
7351 instruction if the delay slot is filled and needs to be extracted
7352 by pa_output_lbranch. If the delay slot is empty or this is a
7353 nullified forward branch, the instruction after the reversed
7354 condition branch must be nullified. */
7355 if (dbr_sequence_length () == 0
7356 || (nullify && forward_branch_p (insn)))
7358 nullify = 1;
7359 xdelay = 0;
7360 operands[4] = GEN_INT (length);
7362 else
7364 xdelay = 1;
7365 operands[4] = GEN_INT (length + 4);
7368 if (nullify)
7369 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
7370 else
7371 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
7373 return pa_output_lbranch (operands[3], insn, xdelay);
7376 /* Deal with gross reload for FP destination register case. */
7377 else if (which_alternative == 1)
7379 /* Move source register to MEM, perform the branch test, then
7380 finally load the FP register from MEM from within the branch's
7381 delay slot. */
7382 output_asm_insn ("stw %1,-16(%%r30)", operands);
7383 if (length == 12)
7384 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7385 else if (length == 16)
7386 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7387 else
7389 operands[4] = GEN_INT (length - 4);
7390 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
7391 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7392 return pa_output_lbranch (operands[3], insn, 0);
7395 /* Deal with gross reload from memory case. */
7396 else if (which_alternative == 2)
7398 /* Reload loop counter from memory, the store back to memory
7399 happens in the branch's delay slot. */
7400 if (length == 8)
7401 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7402 else if (length == 12)
7403 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7404 else
7406 operands[4] = GEN_INT (length);
7407 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7408 operands);
7409 return pa_output_lbranch (operands[3], insn, 0);
7412 /* Handle SAR as a destination. */
7413 else
7415 if (length == 8)
7416 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7417 else if (length == 12)
7418 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7419 else
7421 operands[4] = GEN_INT (length);
7422 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7423 operands);
7424 return pa_output_lbranch (operands[3], insn, 0);
7429 /* Copy any FP arguments in INSN into integer registers. */
7430 static void
7431 copy_fp_args (rtx insn)
7433 rtx link;
7434 rtx xoperands[2];
7436 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7438 int arg_mode, regno;
7439 rtx use = XEXP (link, 0);
7441 if (! (GET_CODE (use) == USE
7442 && GET_CODE (XEXP (use, 0)) == REG
7443 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7444 continue;
7446 arg_mode = GET_MODE (XEXP (use, 0));
7447 regno = REGNO (XEXP (use, 0));
7449 /* Is it a floating point register? */
7450 if (regno >= 32 && regno <= 39)
7452 /* Copy the FP register into an integer register via memory. */
7453 if (arg_mode == SFmode)
7455 xoperands[0] = XEXP (use, 0);
7456 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
7457 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
7458 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7460 else
7462 xoperands[0] = XEXP (use, 0);
7463 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
7464 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
7465 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
7466 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7472 /* Compute length of the FP argument copy sequence for INSN. */
7473 static int
7474 length_fp_args (rtx insn)
7476 int length = 0;
7477 rtx link;
7479 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7481 int arg_mode, regno;
7482 rtx use = XEXP (link, 0);
7484 if (! (GET_CODE (use) == USE
7485 && GET_CODE (XEXP (use, 0)) == REG
7486 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7487 continue;
7489 arg_mode = GET_MODE (XEXP (use, 0));
7490 regno = REGNO (XEXP (use, 0));
7492 /* Is it a floating point register? */
7493 if (regno >= 32 && regno <= 39)
7495 if (arg_mode == SFmode)
7496 length += 8;
7497 else
7498 length += 12;
7502 return length;
7505 /* Return the attribute length for the millicode call instruction INSN.
7506 The length must match the code generated by pa_output_millicode_call.
7507 We include the delay slot in the returned length as it is better to
7508 over estimate the length than to under estimate it. */
7511 pa_attr_length_millicode_call (rtx_insn *insn)
7513 unsigned long distance = -1;
7514 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7516 if (INSN_ADDRESSES_SET_P ())
7518 distance = (total + insn_current_reference_address (insn));
7519 if (distance < total)
7520 distance = -1;
7523 if (TARGET_64BIT)
7525 if (!TARGET_LONG_CALLS && distance < 7600000)
7526 return 8;
7528 return 20;
7530 else if (TARGET_PORTABLE_RUNTIME)
7531 return 24;
7532 else
7534 if (!TARGET_LONG_CALLS && distance < MAX_PCREL17F_OFFSET)
7535 return 8;
7537 if (!flag_pic)
7538 return 12;
7540 return 24;
7544 /* INSN is a function call.
7546 CALL_DEST is the routine we are calling. */
7548 const char *
7549 pa_output_millicode_call (rtx_insn *insn, rtx call_dest)
7551 int attr_length = get_attr_length (insn);
7552 int seq_length = dbr_sequence_length ();
7553 rtx xoperands[3];
7555 xoperands[0] = call_dest;
7556 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7558 /* Handle the common case where we are sure that the branch will
7559 reach the beginning of the $CODE$ subspace. The within reach
7560 form of the $$sh_func_adrs call has a length of 28. Because it
7561 has an attribute type of sh_func_adrs, it never has a nonzero
7562 sequence length (i.e., the delay slot is never filled). */
7563 if (!TARGET_LONG_CALLS
7564 && (attr_length == 8
7565 || (attr_length == 28
7566 && get_attr_type (insn) == TYPE_SH_FUNC_ADRS)))
7568 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
7570 else
7572 if (TARGET_64BIT)
7574 /* It might seem that one insn could be saved by accessing
7575 the millicode function using the linkage table. However,
7576 this doesn't work in shared libraries and other dynamically
7577 loaded objects. Using a pc-relative sequence also avoids
7578 problems related to the implicit use of the gp register. */
7579 output_asm_insn ("b,l .+8,%%r1", xoperands);
7581 if (TARGET_GAS)
7583 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7584 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7586 else
7588 xoperands[1] = gen_label_rtx ();
7589 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7590 targetm.asm_out.internal_label (asm_out_file, "L",
7591 CODE_LABEL_NUMBER (xoperands[1]));
7592 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7595 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7597 else if (TARGET_PORTABLE_RUNTIME)
7599 /* Pure portable runtime doesn't allow be/ble; we also don't
7600 have PIC support in the assembler/linker, so this sequence
7601 is needed. */
7603 /* Get the address of our target into %r1. */
7604 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7605 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7607 /* Get our return address into %r31. */
7608 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7609 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7611 /* Jump to our target address in %r1. */
7612 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7614 else if (!flag_pic)
7616 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7617 if (TARGET_PA_20)
7618 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7619 else
7620 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7622 else
7624 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7625 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
7627 if (TARGET_SOM || !TARGET_GAS)
7629 /* The HP assembler can generate relocations for the
7630 difference of two symbols. GAS can do this for a
7631 millicode symbol but not an arbitrary external
7632 symbol when generating SOM output. */
7633 xoperands[1] = gen_label_rtx ();
7634 targetm.asm_out.internal_label (asm_out_file, "L",
7635 CODE_LABEL_NUMBER (xoperands[1]));
7636 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7637 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7639 else
7641 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
7642 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7643 xoperands);
7646 /* Jump to our target address in %r1. */
7647 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7651 if (seq_length == 0)
7652 output_asm_insn ("nop", xoperands);
7654 return "";
7657 /* Return the attribute length of the call instruction INSN. The SIBCALL
7658 flag indicates whether INSN is a regular call or a sibling call. The
7659 length returned must be longer than the code actually generated by
7660 pa_output_call. Since branch shortening is done before delay branch
7661 sequencing, there is no way to determine whether or not the delay
7662 slot will be filled during branch shortening. Even when the delay
7663 slot is filled, we may have to add a nop if the delay slot contains
7664 a branch that can't reach its target. Thus, we always have to include
7665 the delay slot in the length estimate. This used to be done in
7666 pa_adjust_insn_length but we do it here now as some sequences always
7667 fill the delay slot and we can save four bytes in the estimate for
7668 these sequences. */
7671 pa_attr_length_call (rtx_insn *insn, int sibcall)
7673 int local_call;
7674 rtx call, call_dest;
7675 tree call_decl;
7676 int length = 0;
7677 rtx pat = PATTERN (insn);
7678 unsigned long distance = -1;
7680 gcc_assert (CALL_P (insn));
7682 if (INSN_ADDRESSES_SET_P ())
7684 unsigned long total;
7686 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7687 distance = (total + insn_current_reference_address (insn));
7688 if (distance < total)
7689 distance = -1;
7692 gcc_assert (GET_CODE (pat) == PARALLEL);
7694 /* Get the call rtx. */
7695 call = XVECEXP (pat, 0, 0);
7696 if (GET_CODE (call) == SET)
7697 call = SET_SRC (call);
7699 gcc_assert (GET_CODE (call) == CALL);
7701 /* Determine if this is a local call. */
7702 call_dest = XEXP (XEXP (call, 0), 0);
7703 call_decl = SYMBOL_REF_DECL (call_dest);
7704 local_call = call_decl && targetm.binds_local_p (call_decl);
7706 /* pc-relative branch. */
7707 if (!TARGET_LONG_CALLS
7708 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7709 || distance < MAX_PCREL17F_OFFSET))
7710 length += 8;
7712 /* 64-bit plabel sequence. */
7713 else if (TARGET_64BIT && !local_call)
7714 length += sibcall ? 28 : 24;
7716 /* non-pic long absolute branch sequence. */
7717 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7718 length += 12;
7720 /* long pc-relative branch sequence. */
7721 else if (TARGET_LONG_PIC_SDIFF_CALL
7722 || (TARGET_GAS && !TARGET_SOM
7723 && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7725 length += 20;
7727 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7728 length += 8;
7731 /* 32-bit plabel sequence. */
7732 else
7734 length += 32;
7736 if (TARGET_SOM)
7737 length += length_fp_args (insn);
7739 if (flag_pic)
7740 length += 4;
7742 if (!TARGET_PA_20)
7744 if (!sibcall)
7745 length += 8;
7747 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7748 length += 8;
7752 return length;
7755 /* INSN is a function call.
7757 CALL_DEST is the routine we are calling. */
7759 const char *
7760 pa_output_call (rtx_insn *insn, rtx call_dest, int sibcall)
7762 int seq_length = dbr_sequence_length ();
7763 tree call_decl = SYMBOL_REF_DECL (call_dest);
7764 int local_call = call_decl && targetm.binds_local_p (call_decl);
7765 rtx xoperands[2];
7767 xoperands[0] = call_dest;
7769 /* Handle the common case where we're sure that the branch will reach
7770 the beginning of the "$CODE$" subspace. This is the beginning of
7771 the current function if we are in a named section. */
7772 if (!TARGET_LONG_CALLS && pa_attr_length_call (insn, sibcall) == 8)
7774 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7775 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7777 else
7779 if (TARGET_64BIT && !local_call)
7781 /* ??? As far as I can tell, the HP linker doesn't support the
7782 long pc-relative sequence described in the 64-bit runtime
7783 architecture. So, we use a slightly longer indirect call. */
7784 xoperands[0] = pa_get_deferred_plabel (call_dest);
7785 xoperands[1] = gen_label_rtx ();
7787 /* If this isn't a sibcall, we put the load of %r27 into the
7788 delay slot. We can't do this in a sibcall as we don't
7789 have a second call-clobbered scratch register available.
7790 We don't need to do anything when generating fast indirect
7791 calls. */
7792 if (seq_length != 0 && !sibcall)
7794 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7795 optimize, 0, NULL);
7797 /* Now delete the delay insn. */
7798 SET_INSN_DELETED (NEXT_INSN (insn));
7799 seq_length = 0;
7802 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7803 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7804 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7806 if (sibcall)
7808 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7809 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7810 output_asm_insn ("bve (%%r1)", xoperands);
7812 else
7814 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7815 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7816 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7817 seq_length = 1;
7820 else
7822 int indirect_call = 0;
7824 /* Emit a long call. There are several different sequences
7825 of increasing length and complexity. In most cases,
7826 they don't allow an instruction in the delay slot. */
7827 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7828 && !TARGET_LONG_PIC_SDIFF_CALL
7829 && !(TARGET_GAS && !TARGET_SOM
7830 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7831 && !TARGET_64BIT)
7832 indirect_call = 1;
7834 if (seq_length != 0
7835 && !sibcall
7836 && (!TARGET_PA_20
7837 || indirect_call
7838 || ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)))
7840 /* A non-jump insn in the delay slot. By definition we can
7841 emit this insn before the call (and in fact before argument
7842 relocating. */
7843 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7844 NULL);
7846 /* Now delete the delay insn. */
7847 SET_INSN_DELETED (NEXT_INSN (insn));
7848 seq_length = 0;
7851 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7853 /* This is the best sequence for making long calls in
7854 non-pic code. Unfortunately, GNU ld doesn't provide
7855 the stub needed for external calls, and GAS's support
7856 for this with the SOM linker is buggy. It is safe
7857 to use this for local calls. */
7858 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7859 if (sibcall)
7860 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7861 else
7863 if (TARGET_PA_20)
7864 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7865 xoperands);
7866 else
7867 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7869 output_asm_insn ("copy %%r31,%%r2", xoperands);
7870 seq_length = 1;
7873 else
7875 if (TARGET_LONG_PIC_SDIFF_CALL)
7877 /* The HP assembler and linker can handle relocations
7878 for the difference of two symbols. The HP assembler
7879 recognizes the sequence as a pc-relative call and
7880 the linker provides stubs when needed. */
7881 xoperands[1] = gen_label_rtx ();
7882 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7883 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7884 targetm.asm_out.internal_label (asm_out_file, "L",
7885 CODE_LABEL_NUMBER (xoperands[1]));
7886 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7888 else if (TARGET_GAS && !TARGET_SOM
7889 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7891 /* GAS currently can't generate the relocations that
7892 are needed for the SOM linker under HP-UX using this
7893 sequence. The GNU linker doesn't generate the stubs
7894 that are needed for external calls on TARGET_ELF32
7895 with this sequence. For now, we have to use a
7896 longer plabel sequence when using GAS. */
7897 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7898 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7899 xoperands);
7900 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7901 xoperands);
7903 else
7905 /* Emit a long plabel-based call sequence. This is
7906 essentially an inline implementation of $$dyncall.
7907 We don't actually try to call $$dyncall as this is
7908 as difficult as calling the function itself. */
7909 xoperands[0] = pa_get_deferred_plabel (call_dest);
7910 xoperands[1] = gen_label_rtx ();
7912 /* Since the call is indirect, FP arguments in registers
7913 need to be copied to the general registers. Then, the
7914 argument relocation stub will copy them back. */
7915 if (TARGET_SOM)
7916 copy_fp_args (insn);
7918 if (flag_pic)
7920 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7921 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7922 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7924 else
7926 output_asm_insn ("addil LR'%0-$global$,%%r27",
7927 xoperands);
7928 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7929 xoperands);
7932 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7933 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7934 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7935 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7937 if (!sibcall && !TARGET_PA_20)
7939 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7940 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
7941 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7942 else
7943 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7947 if (TARGET_PA_20)
7949 if (sibcall)
7950 output_asm_insn ("bve (%%r1)", xoperands);
7951 else
7953 if (indirect_call)
7955 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7956 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
7957 seq_length = 1;
7959 else
7960 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7963 else
7965 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7966 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
7967 xoperands);
7969 if (sibcall)
7971 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
7972 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
7973 else
7974 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
7976 else
7978 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
7979 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
7980 else
7981 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
7983 if (indirect_call)
7984 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
7985 else
7986 output_asm_insn ("copy %%r31,%%r2", xoperands);
7987 seq_length = 1;
7994 if (seq_length == 0)
7995 output_asm_insn ("nop", xoperands);
7997 return "";
8000 /* Return the attribute length of the indirect call instruction INSN.
8001 The length must match the code generated by output_indirect call.
8002 The returned length includes the delay slot. Currently, the delay
8003 slot of an indirect call sequence is not exposed and it is used by
8004 the sequence itself. */
8007 pa_attr_length_indirect_call (rtx_insn *insn)
8009 unsigned long distance = -1;
8010 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
8012 if (INSN_ADDRESSES_SET_P ())
8014 distance = (total + insn_current_reference_address (insn));
8015 if (distance < total)
8016 distance = -1;
8019 if (TARGET_64BIT)
8020 return 12;
8022 if (TARGET_FAST_INDIRECT_CALLS
8023 || (!TARGET_LONG_CALLS
8024 && !TARGET_PORTABLE_RUNTIME
8025 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
8026 || distance < MAX_PCREL17F_OFFSET)))
8027 return 8;
8029 if (flag_pic)
8030 return 20;
8032 if (TARGET_PORTABLE_RUNTIME)
8033 return 16;
8035 /* Out of reach, can use ble. */
8036 return 12;
8039 const char *
8040 pa_output_indirect_call (rtx_insn *insn, rtx call_dest)
8042 rtx xoperands[1];
8044 if (TARGET_64BIT)
8046 xoperands[0] = call_dest;
8047 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
8048 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
8049 return "";
8052 /* First the special case for kernels, level 0 systems, etc. */
8053 if (TARGET_FAST_INDIRECT_CALLS)
8054 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8056 /* Now the normal case -- we can reach $$dyncall directly or
8057 we're sure that we can get there via a long-branch stub.
8059 No need to check target flags as the length uniquely identifies
8060 the remaining cases. */
8061 if (pa_attr_length_indirect_call (insn) == 8)
8063 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
8064 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
8065 variant of the B,L instruction can't be used on the SOM target. */
8066 if (TARGET_PA_20 && !TARGET_SOM)
8067 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
8068 else
8069 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
8072 /* Long millicode call, but we are not generating PIC or portable runtime
8073 code. */
8074 if (pa_attr_length_indirect_call (insn) == 12)
8075 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
8077 /* Long millicode call for portable runtime. */
8078 if (pa_attr_length_indirect_call (insn) == 16)
8079 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)";
8081 /* We need a long PIC call to $$dyncall. */
8082 xoperands[0] = NULL_RTX;
8083 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
8084 if (TARGET_SOM || !TARGET_GAS)
8086 xoperands[0] = gen_label_rtx ();
8087 output_asm_insn ("addil L'$$dyncall-%0,%%r2", xoperands);
8088 targetm.asm_out.internal_label (asm_out_file, "L",
8089 CODE_LABEL_NUMBER (xoperands[0]));
8090 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
8092 else
8094 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r2", xoperands);
8095 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
8096 xoperands);
8098 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8099 output_asm_insn ("ldo 12(%%r2),%%r2", xoperands);
8100 return "";
8103 /* In HPUX 8.0's shared library scheme, special relocations are needed
8104 for function labels if they might be passed to a function
8105 in a shared library (because shared libraries don't live in code
8106 space), and special magic is needed to construct their address. */
8108 void
8109 pa_encode_label (rtx sym)
8111 const char *str = XSTR (sym, 0);
8112 int len = strlen (str) + 1;
8113 char *newstr, *p;
8115 p = newstr = XALLOCAVEC (char, len + 1);
8116 *p++ = '@';
8117 strcpy (p, str);
8119 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
8122 static void
8123 pa_encode_section_info (tree decl, rtx rtl, int first)
8125 int old_referenced = 0;
8127 if (!first && MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
8128 old_referenced
8129 = SYMBOL_REF_FLAGS (XEXP (rtl, 0)) & SYMBOL_FLAG_REFERENCED;
8131 default_encode_section_info (decl, rtl, first);
8133 if (first && TEXT_SPACE_P (decl))
8135 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
8136 if (TREE_CODE (decl) == FUNCTION_DECL)
8137 pa_encode_label (XEXP (rtl, 0));
8139 else if (old_referenced)
8140 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= old_referenced;
8143 /* This is sort of inverse to pa_encode_section_info. */
8145 static const char *
8146 pa_strip_name_encoding (const char *str)
8148 str += (*str == '@');
8149 str += (*str == '*');
8150 return str;
8153 /* Returns 1 if OP is a function label involved in a simple addition
8154 with a constant. Used to keep certain patterns from matching
8155 during instruction combination. */
8157 pa_is_function_label_plus_const (rtx op)
8159 /* Strip off any CONST. */
8160 if (GET_CODE (op) == CONST)
8161 op = XEXP (op, 0);
8163 return (GET_CODE (op) == PLUS
8164 && function_label_operand (XEXP (op, 0), VOIDmode)
8165 && GET_CODE (XEXP (op, 1)) == CONST_INT);
8168 /* Output assembly code for a thunk to FUNCTION. */
8170 static void
8171 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
8172 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
8173 tree function)
8175 static unsigned int current_thunk_number;
8176 int val_14 = VAL_14_BITS_P (delta);
8177 unsigned int old_last_address = last_address, nbytes = 0;
8178 char label[16];
8179 rtx xoperands[4];
8181 xoperands[0] = XEXP (DECL_RTL (function), 0);
8182 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
8183 xoperands[2] = GEN_INT (delta);
8185 final_start_function (emit_barrier (), file, 1);
8187 /* Output the thunk. We know that the function is in the same
8188 translation unit (i.e., the same space) as the thunk, and that
8189 thunks are output after their method. Thus, we don't need an
8190 external branch to reach the function. With SOM and GAS,
8191 functions and thunks are effectively in different sections.
8192 Thus, we can always use a IA-relative branch and the linker
8193 will add a long branch stub if necessary.
8195 However, we have to be careful when generating PIC code on the
8196 SOM port to ensure that the sequence does not transfer to an
8197 import stub for the target function as this could clobber the
8198 return value saved at SP-24. This would also apply to the
8199 32-bit linux port if the multi-space model is implemented. */
8200 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8201 && !(flag_pic && TREE_PUBLIC (function))
8202 && (TARGET_GAS || last_address < 262132))
8203 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8204 && ((targetm_common.have_named_sections
8205 && DECL_SECTION_NAME (thunk_fndecl) != NULL
8206 /* The GNU 64-bit linker has rather poor stub management.
8207 So, we use a long branch from thunks that aren't in
8208 the same section as the target function. */
8209 && ((!TARGET_64BIT
8210 && (DECL_SECTION_NAME (thunk_fndecl)
8211 != DECL_SECTION_NAME (function)))
8212 || ((DECL_SECTION_NAME (thunk_fndecl)
8213 == DECL_SECTION_NAME (function))
8214 && last_address < 262132)))
8215 /* In this case, we need to be able to reach the start of
8216 the stub table even though the function is likely closer
8217 and can be jumped to directly. */
8218 || (targetm_common.have_named_sections
8219 && DECL_SECTION_NAME (thunk_fndecl) == NULL
8220 && DECL_SECTION_NAME (function) == NULL
8221 && total_code_bytes < MAX_PCREL17F_OFFSET)
8222 /* Likewise. */
8223 || (!targetm_common.have_named_sections
8224 && total_code_bytes < MAX_PCREL17F_OFFSET))))
8226 if (!val_14)
8227 output_asm_insn ("addil L'%2,%%r26", xoperands);
8229 output_asm_insn ("b %0", xoperands);
8231 if (val_14)
8233 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8234 nbytes += 8;
8236 else
8238 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8239 nbytes += 12;
8242 else if (TARGET_64BIT)
8244 /* We only have one call-clobbered scratch register, so we can't
8245 make use of the delay slot if delta doesn't fit in 14 bits. */
8246 if (!val_14)
8248 output_asm_insn ("addil L'%2,%%r26", xoperands);
8249 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8252 output_asm_insn ("b,l .+8,%%r1", xoperands);
8254 if (TARGET_GAS)
8256 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8257 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
8259 else
8261 xoperands[3] = GEN_INT (val_14 ? 8 : 16);
8262 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands);
8265 if (val_14)
8267 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8268 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8269 nbytes += 20;
8271 else
8273 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
8274 nbytes += 24;
8277 else if (TARGET_PORTABLE_RUNTIME)
8279 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8280 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
8282 if (!val_14)
8283 output_asm_insn ("addil L'%2,%%r26", xoperands);
8285 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8287 if (val_14)
8289 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8290 nbytes += 16;
8292 else
8294 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8295 nbytes += 20;
8298 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8300 /* The function is accessible from outside this module. The only
8301 way to avoid an import stub between the thunk and function is to
8302 call the function directly with an indirect sequence similar to
8303 that used by $$dyncall. This is possible because $$dyncall acts
8304 as the import stub in an indirect call. */
8305 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
8306 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8307 output_asm_insn ("addil LT'%3,%%r19", xoperands);
8308 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
8309 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8310 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
8311 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
8312 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
8313 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8315 if (!val_14)
8317 output_asm_insn ("addil L'%2,%%r26", xoperands);
8318 nbytes += 4;
8321 if (TARGET_PA_20)
8323 output_asm_insn ("bve (%%r22)", xoperands);
8324 nbytes += 36;
8326 else if (TARGET_NO_SPACE_REGS)
8328 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
8329 nbytes += 36;
8331 else
8333 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
8334 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
8335 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
8336 nbytes += 44;
8339 if (val_14)
8340 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8341 else
8342 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8344 else if (flag_pic)
8346 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
8348 if (TARGET_SOM || !TARGET_GAS)
8350 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands);
8351 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands);
8353 else
8355 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8356 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands);
8359 if (!val_14)
8360 output_asm_insn ("addil L'%2,%%r26", xoperands);
8362 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8364 if (val_14)
8366 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8367 nbytes += 20;
8369 else
8371 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8372 nbytes += 24;
8375 else
8377 if (!val_14)
8378 output_asm_insn ("addil L'%2,%%r26", xoperands);
8380 output_asm_insn ("ldil L'%0,%%r22", xoperands);
8381 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
8383 if (val_14)
8385 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8386 nbytes += 12;
8388 else
8390 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8391 nbytes += 16;
8395 final_end_function ();
8397 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8399 switch_to_section (data_section);
8400 output_asm_insn (".align 4", xoperands);
8401 ASM_OUTPUT_LABEL (file, label);
8402 output_asm_insn (".word P'%0", xoperands);
8405 current_thunk_number++;
8406 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8407 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8408 last_address += nbytes;
8409 if (old_last_address > last_address)
8410 last_address = UINT_MAX;
8411 update_total_code_bytes (nbytes);
8414 /* Only direct calls to static functions are allowed to be sibling (tail)
8415 call optimized.
8417 This restriction is necessary because some linker generated stubs will
8418 store return pointers into rp' in some cases which might clobber a
8419 live value already in rp'.
8421 In a sibcall the current function and the target function share stack
8422 space. Thus if the path to the current function and the path to the
8423 target function save a value in rp', they save the value into the
8424 same stack slot, which has undesirable consequences.
8426 Because of the deferred binding nature of shared libraries any function
8427 with external scope could be in a different load module and thus require
8428 rp' to be saved when calling that function. So sibcall optimizations
8429 can only be safe for static function.
8431 Note that GCC never needs return value relocations, so we don't have to
8432 worry about static calls with return value relocations (which require
8433 saving rp').
8435 It is safe to perform a sibcall optimization when the target function
8436 will never return. */
8437 static bool
8438 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8440 if (TARGET_PORTABLE_RUNTIME)
8441 return false;
8443 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
8444 single subspace mode and the call is not indirect. As far as I know,
8445 there is no operating system support for the multiple subspace mode.
8446 It might be possible to support indirect calls if we didn't use
8447 $$dyncall (see the indirect sequence generated in pa_output_call). */
8448 if (TARGET_ELF32)
8449 return (decl != NULL_TREE);
8451 /* Sibcalls are not ok because the arg pointer register is not a fixed
8452 register. This prevents the sibcall optimization from occurring. In
8453 addition, there are problems with stub placement using GNU ld. This
8454 is because a normal sibcall branch uses a 17-bit relocation while
8455 a regular call branch uses a 22-bit relocation. As a result, more
8456 care needs to be taken in the placement of long-branch stubs. */
8457 if (TARGET_64BIT)
8458 return false;
8460 /* Sibcalls are only ok within a translation unit. */
8461 return (decl && !TREE_PUBLIC (decl));
8464 /* ??? Addition is not commutative on the PA due to the weird implicit
8465 space register selection rules for memory addresses. Therefore, we
8466 don't consider a + b == b + a, as this might be inside a MEM. */
8467 static bool
8468 pa_commutative_p (const_rtx x, int outer_code)
8470 return (COMMUTATIVE_P (x)
8471 && (TARGET_NO_SPACE_REGS
8472 || (outer_code != UNKNOWN && outer_code != MEM)
8473 || GET_CODE (x) != PLUS));
8476 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8477 use in fmpyadd instructions. */
8479 pa_fmpyaddoperands (rtx *operands)
8481 machine_mode mode = GET_MODE (operands[0]);
8483 /* Must be a floating point mode. */
8484 if (mode != SFmode && mode != DFmode)
8485 return 0;
8487 /* All modes must be the same. */
8488 if (! (mode == GET_MODE (operands[1])
8489 && mode == GET_MODE (operands[2])
8490 && mode == GET_MODE (operands[3])
8491 && mode == GET_MODE (operands[4])
8492 && mode == GET_MODE (operands[5])))
8493 return 0;
8495 /* All operands must be registers. */
8496 if (! (GET_CODE (operands[1]) == REG
8497 && GET_CODE (operands[2]) == REG
8498 && GET_CODE (operands[3]) == REG
8499 && GET_CODE (operands[4]) == REG
8500 && GET_CODE (operands[5]) == REG))
8501 return 0;
8503 /* Only 2 real operands to the addition. One of the input operands must
8504 be the same as the output operand. */
8505 if (! rtx_equal_p (operands[3], operands[4])
8506 && ! rtx_equal_p (operands[3], operands[5]))
8507 return 0;
8509 /* Inout operand of add cannot conflict with any operands from multiply. */
8510 if (rtx_equal_p (operands[3], operands[0])
8511 || rtx_equal_p (operands[3], operands[1])
8512 || rtx_equal_p (operands[3], operands[2]))
8513 return 0;
8515 /* multiply cannot feed into addition operands. */
8516 if (rtx_equal_p (operands[4], operands[0])
8517 || rtx_equal_p (operands[5], operands[0]))
8518 return 0;
8520 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8521 if (mode == SFmode
8522 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8523 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8524 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8525 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8526 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8527 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8528 return 0;
8530 /* Passed. Operands are suitable for fmpyadd. */
8531 return 1;
8534 #if !defined(USE_COLLECT2)
8535 static void
8536 pa_asm_out_constructor (rtx symbol, int priority)
8538 if (!function_label_operand (symbol, VOIDmode))
8539 pa_encode_label (symbol);
8541 #ifdef CTORS_SECTION_ASM_OP
8542 default_ctor_section_asm_out_constructor (symbol, priority);
8543 #else
8544 # ifdef TARGET_ASM_NAMED_SECTION
8545 default_named_section_asm_out_constructor (symbol, priority);
8546 # else
8547 default_stabs_asm_out_constructor (symbol, priority);
8548 # endif
8549 #endif
8552 static void
8553 pa_asm_out_destructor (rtx symbol, int priority)
8555 if (!function_label_operand (symbol, VOIDmode))
8556 pa_encode_label (symbol);
8558 #ifdef DTORS_SECTION_ASM_OP
8559 default_dtor_section_asm_out_destructor (symbol, priority);
8560 #else
8561 # ifdef TARGET_ASM_NAMED_SECTION
8562 default_named_section_asm_out_destructor (symbol, priority);
8563 # else
8564 default_stabs_asm_out_destructor (symbol, priority);
8565 # endif
8566 #endif
8568 #endif
8570 /* This function places uninitialized global data in the bss section.
8571 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8572 function on the SOM port to prevent uninitialized global data from
8573 being placed in the data section. */
8575 void
8576 pa_asm_output_aligned_bss (FILE *stream,
8577 const char *name,
8578 unsigned HOST_WIDE_INT size,
8579 unsigned int align)
8581 switch_to_section (bss_section);
8582 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8584 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8585 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8586 #endif
8588 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8589 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8590 #endif
8592 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8593 ASM_OUTPUT_LABEL (stream, name);
8594 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8597 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8598 that doesn't allow the alignment of global common storage to be directly
8599 specified. The SOM linker aligns common storage based on the rounded
8600 value of the NUM_BYTES parameter in the .comm directive. It's not
8601 possible to use the .align directive as it doesn't affect the alignment
8602 of the label associated with a .comm directive. */
8604 void
8605 pa_asm_output_aligned_common (FILE *stream,
8606 const char *name,
8607 unsigned HOST_WIDE_INT size,
8608 unsigned int align)
8610 unsigned int max_common_align;
8612 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8613 if (align > max_common_align)
8615 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8616 "for global common data. Using %u",
8617 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8618 align = max_common_align;
8621 switch_to_section (bss_section);
8623 assemble_name (stream, name);
8624 fprintf (stream, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8625 MAX (size, align / BITS_PER_UNIT));
8628 /* We can't use .comm for local common storage as the SOM linker effectively
8629 treats the symbol as universal and uses the same storage for local symbols
8630 with the same name in different object files. The .block directive
8631 reserves an uninitialized block of storage. However, it's not common
8632 storage. Fortunately, GCC never requests common storage with the same
8633 name in any given translation unit. */
8635 void
8636 pa_asm_output_aligned_local (FILE *stream,
8637 const char *name,
8638 unsigned HOST_WIDE_INT size,
8639 unsigned int align)
8641 switch_to_section (bss_section);
8642 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8644 #ifdef LOCAL_ASM_OP
8645 fprintf (stream, "%s", LOCAL_ASM_OP);
8646 assemble_name (stream, name);
8647 fprintf (stream, "\n");
8648 #endif
8650 ASM_OUTPUT_LABEL (stream, name);
8651 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8654 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8655 use in fmpysub instructions. */
8657 pa_fmpysuboperands (rtx *operands)
8659 machine_mode mode = GET_MODE (operands[0]);
8661 /* Must be a floating point mode. */
8662 if (mode != SFmode && mode != DFmode)
8663 return 0;
8665 /* All modes must be the same. */
8666 if (! (mode == GET_MODE (operands[1])
8667 && mode == GET_MODE (operands[2])
8668 && mode == GET_MODE (operands[3])
8669 && mode == GET_MODE (operands[4])
8670 && mode == GET_MODE (operands[5])))
8671 return 0;
8673 /* All operands must be registers. */
8674 if (! (GET_CODE (operands[1]) == REG
8675 && GET_CODE (operands[2]) == REG
8676 && GET_CODE (operands[3]) == REG
8677 && GET_CODE (operands[4]) == REG
8678 && GET_CODE (operands[5]) == REG))
8679 return 0;
8681 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8682 operation, so operands[4] must be the same as operand[3]. */
8683 if (! rtx_equal_p (operands[3], operands[4]))
8684 return 0;
8686 /* multiply cannot feed into subtraction. */
8687 if (rtx_equal_p (operands[5], operands[0]))
8688 return 0;
8690 /* Inout operand of sub cannot conflict with any operands from multiply. */
8691 if (rtx_equal_p (operands[3], operands[0])
8692 || rtx_equal_p (operands[3], operands[1])
8693 || rtx_equal_p (operands[3], operands[2]))
8694 return 0;
8696 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8697 if (mode == SFmode
8698 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8699 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8700 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8701 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8702 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8703 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8704 return 0;
8706 /* Passed. Operands are suitable for fmpysub. */
8707 return 1;
8710 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8711 constants for shadd instructions. */
8713 pa_shadd_constant_p (int val)
8715 if (val == 2 || val == 4 || val == 8)
8716 return 1;
8717 else
8718 return 0;
8721 /* Return TRUE if INSN branches forward. */
8723 static bool
8724 forward_branch_p (rtx_insn *insn)
8726 rtx lab = JUMP_LABEL (insn);
8728 /* The INSN must have a jump label. */
8729 gcc_assert (lab != NULL_RTX);
8731 if (INSN_ADDRESSES_SET_P ())
8732 return INSN_ADDRESSES (INSN_UID (lab)) > INSN_ADDRESSES (INSN_UID (insn));
8734 while (insn)
8736 if (insn == lab)
8737 return true;
8738 else
8739 insn = NEXT_INSN (insn);
8742 return false;
8745 /* Output an unconditional move and branch insn. */
8747 const char *
8748 pa_output_parallel_movb (rtx *operands, rtx_insn *insn)
8750 int length = get_attr_length (insn);
8752 /* These are the cases in which we win. */
8753 if (length == 4)
8754 return "mov%I1b,tr %1,%0,%2";
8756 /* None of the following cases win, but they don't lose either. */
8757 if (length == 8)
8759 if (dbr_sequence_length () == 0)
8761 /* Nothing in the delay slot, fake it by putting the combined
8762 insn (the copy or add) in the delay slot of a bl. */
8763 if (GET_CODE (operands[1]) == CONST_INT)
8764 return "b %2\n\tldi %1,%0";
8765 else
8766 return "b %2\n\tcopy %1,%0";
8768 else
8770 /* Something in the delay slot, but we've got a long branch. */
8771 if (GET_CODE (operands[1]) == CONST_INT)
8772 return "ldi %1,%0\n\tb %2";
8773 else
8774 return "copy %1,%0\n\tb %2";
8778 if (GET_CODE (operands[1]) == CONST_INT)
8779 output_asm_insn ("ldi %1,%0", operands);
8780 else
8781 output_asm_insn ("copy %1,%0", operands);
8782 return pa_output_lbranch (operands[2], insn, 1);
8785 /* Output an unconditional add and branch insn. */
8787 const char *
8788 pa_output_parallel_addb (rtx *operands, rtx_insn *insn)
8790 int length = get_attr_length (insn);
8792 /* To make life easy we want operand0 to be the shared input/output
8793 operand and operand1 to be the readonly operand. */
8794 if (operands[0] == operands[1])
8795 operands[1] = operands[2];
8797 /* These are the cases in which we win. */
8798 if (length == 4)
8799 return "add%I1b,tr %1,%0,%3";
8801 /* None of the following cases win, but they don't lose either. */
8802 if (length == 8)
8804 if (dbr_sequence_length () == 0)
8805 /* Nothing in the delay slot, fake it by putting the combined
8806 insn (the copy or add) in the delay slot of a bl. */
8807 return "b %3\n\tadd%I1 %1,%0,%0";
8808 else
8809 /* Something in the delay slot, but we've got a long branch. */
8810 return "add%I1 %1,%0,%0\n\tb %3";
8813 output_asm_insn ("add%I1 %1,%0,%0", operands);
8814 return pa_output_lbranch (operands[3], insn, 1);
8817 /* We use this hook to perform a PA specific optimization which is difficult
8818 to do in earlier passes. */
8820 static void
8821 pa_reorg (void)
8823 remove_useless_addtr_insns (1);
8825 if (pa_cpu < PROCESSOR_8000)
8826 pa_combine_instructions ();
8829 /* The PA has a number of odd instructions which can perform multiple
8830 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8831 it may be profitable to combine two instructions into one instruction
8832 with two outputs. It's not profitable PA2.0 machines because the
8833 two outputs would take two slots in the reorder buffers.
8835 This routine finds instructions which can be combined and combines
8836 them. We only support some of the potential combinations, and we
8837 only try common ways to find suitable instructions.
8839 * addb can add two registers or a register and a small integer
8840 and jump to a nearby (+-8k) location. Normally the jump to the
8841 nearby location is conditional on the result of the add, but by
8842 using the "true" condition we can make the jump unconditional.
8843 Thus addb can perform two independent operations in one insn.
8845 * movb is similar to addb in that it can perform a reg->reg
8846 or small immediate->reg copy and jump to a nearby (+-8k location).
8848 * fmpyadd and fmpysub can perform a FP multiply and either an
8849 FP add or FP sub if the operands of the multiply and add/sub are
8850 independent (there are other minor restrictions). Note both
8851 the fmpy and fadd/fsub can in theory move to better spots according
8852 to data dependencies, but for now we require the fmpy stay at a
8853 fixed location.
8855 * Many of the memory operations can perform pre & post updates
8856 of index registers. GCC's pre/post increment/decrement addressing
8857 is far too simple to take advantage of all the possibilities. This
8858 pass may not be suitable since those insns may not be independent.
8860 * comclr can compare two ints or an int and a register, nullify
8861 the following instruction and zero some other register. This
8862 is more difficult to use as it's harder to find an insn which
8863 will generate a comclr than finding something like an unconditional
8864 branch. (conditional moves & long branches create comclr insns).
8866 * Most arithmetic operations can conditionally skip the next
8867 instruction. They can be viewed as "perform this operation
8868 and conditionally jump to this nearby location" (where nearby
8869 is an insns away). These are difficult to use due to the
8870 branch length restrictions. */
8872 static void
8873 pa_combine_instructions (void)
8875 rtx_insn *anchor;
8877 /* This can get expensive since the basic algorithm is on the
8878 order of O(n^2) (or worse). Only do it for -O2 or higher
8879 levels of optimization. */
8880 if (optimize < 2)
8881 return;
8883 /* Walk down the list of insns looking for "anchor" insns which
8884 may be combined with "floating" insns. As the name implies,
8885 "anchor" instructions don't move, while "floating" insns may
8886 move around. */
8887 rtx par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
8888 rtx_insn *new_rtx = make_insn_raw (par);
8890 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
8892 enum attr_pa_combine_type anchor_attr;
8893 enum attr_pa_combine_type floater_attr;
8895 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
8896 Also ignore any special USE insns. */
8897 if ((! NONJUMP_INSN_P (anchor) && ! JUMP_P (anchor) && ! CALL_P (anchor))
8898 || GET_CODE (PATTERN (anchor)) == USE
8899 || GET_CODE (PATTERN (anchor)) == CLOBBER)
8900 continue;
8902 anchor_attr = get_attr_pa_combine_type (anchor);
8903 /* See if anchor is an insn suitable for combination. */
8904 if (anchor_attr == PA_COMBINE_TYPE_FMPY
8905 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
8906 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8907 && ! forward_branch_p (anchor)))
8909 rtx_insn *floater;
8911 for (floater = PREV_INSN (anchor);
8912 floater;
8913 floater = PREV_INSN (floater))
8915 if (NOTE_P (floater)
8916 || (NONJUMP_INSN_P (floater)
8917 && (GET_CODE (PATTERN (floater)) == USE
8918 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8919 continue;
8921 /* Anything except a regular INSN will stop our search. */
8922 if (! NONJUMP_INSN_P (floater))
8924 floater = NULL;
8925 break;
8928 /* See if FLOATER is suitable for combination with the
8929 anchor. */
8930 floater_attr = get_attr_pa_combine_type (floater);
8931 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8932 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8933 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8934 && floater_attr == PA_COMBINE_TYPE_FMPY))
8936 /* If ANCHOR and FLOATER can be combined, then we're
8937 done with this pass. */
8938 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
8939 SET_DEST (PATTERN (floater)),
8940 XEXP (SET_SRC (PATTERN (floater)), 0),
8941 XEXP (SET_SRC (PATTERN (floater)), 1)))
8942 break;
8945 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8946 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
8948 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
8950 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
8951 SET_DEST (PATTERN (floater)),
8952 XEXP (SET_SRC (PATTERN (floater)), 0),
8953 XEXP (SET_SRC (PATTERN (floater)), 1)))
8954 break;
8956 else
8958 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
8959 SET_DEST (PATTERN (floater)),
8960 SET_SRC (PATTERN (floater)),
8961 SET_SRC (PATTERN (floater))))
8962 break;
8967 /* If we didn't find anything on the backwards scan try forwards. */
8968 if (!floater
8969 && (anchor_attr == PA_COMBINE_TYPE_FMPY
8970 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
8972 for (floater = anchor; floater; floater = NEXT_INSN (floater))
8974 if (NOTE_P (floater)
8975 || (NONJUMP_INSN_P (floater)
8976 && (GET_CODE (PATTERN (floater)) == USE
8977 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8979 continue;
8981 /* Anything except a regular INSN will stop our search. */
8982 if (! NONJUMP_INSN_P (floater))
8984 floater = NULL;
8985 break;
8988 /* See if FLOATER is suitable for combination with the
8989 anchor. */
8990 floater_attr = get_attr_pa_combine_type (floater);
8991 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8992 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8993 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8994 && floater_attr == PA_COMBINE_TYPE_FMPY))
8996 /* If ANCHOR and FLOATER can be combined, then we're
8997 done with this pass. */
8998 if (pa_can_combine_p (new_rtx, anchor, floater, 1,
8999 SET_DEST (PATTERN (floater)),
9000 XEXP (SET_SRC (PATTERN (floater)),
9002 XEXP (SET_SRC (PATTERN (floater)),
9003 1)))
9004 break;
9009 /* FLOATER will be nonzero if we found a suitable floating
9010 insn for combination with ANCHOR. */
9011 if (floater
9012 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9013 || anchor_attr == PA_COMBINE_TYPE_FMPY))
9015 /* Emit the new instruction and delete the old anchor. */
9016 emit_insn_before (gen_rtx_PARALLEL
9017 (VOIDmode,
9018 gen_rtvec (2, PATTERN (anchor),
9019 PATTERN (floater))),
9020 anchor);
9022 SET_INSN_DELETED (anchor);
9024 /* Emit a special USE insn for FLOATER, then delete
9025 the floating insn. */
9026 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9027 delete_insn (floater);
9029 continue;
9031 else if (floater
9032 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
9034 rtx temp;
9035 /* Emit the new_jump instruction and delete the old anchor. */
9036 temp
9037 = emit_jump_insn_before (gen_rtx_PARALLEL
9038 (VOIDmode,
9039 gen_rtvec (2, PATTERN (anchor),
9040 PATTERN (floater))),
9041 anchor);
9043 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
9044 SET_INSN_DELETED (anchor);
9046 /* Emit a special USE insn for FLOATER, then delete
9047 the floating insn. */
9048 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9049 delete_insn (floater);
9050 continue;
9056 static int
9057 pa_can_combine_p (rtx_insn *new_rtx, rtx_insn *anchor, rtx_insn *floater,
9058 int reversed, rtx dest,
9059 rtx src1, rtx src2)
9061 int insn_code_number;
9062 rtx_insn *start, *end;
9064 /* Create a PARALLEL with the patterns of ANCHOR and
9065 FLOATER, try to recognize it, then test constraints
9066 for the resulting pattern.
9068 If the pattern doesn't match or the constraints
9069 aren't met keep searching for a suitable floater
9070 insn. */
9071 XVECEXP (PATTERN (new_rtx), 0, 0) = PATTERN (anchor);
9072 XVECEXP (PATTERN (new_rtx), 0, 1) = PATTERN (floater);
9073 INSN_CODE (new_rtx) = -1;
9074 insn_code_number = recog_memoized (new_rtx);
9075 basic_block bb = BLOCK_FOR_INSN (anchor);
9076 if (insn_code_number < 0
9077 || (extract_insn (new_rtx),
9078 !constrain_operands (1, get_preferred_alternatives (new_rtx, bb))))
9079 return 0;
9081 if (reversed)
9083 start = anchor;
9084 end = floater;
9086 else
9088 start = floater;
9089 end = anchor;
9092 /* There's up to three operands to consider. One
9093 output and two inputs.
9095 The output must not be used between FLOATER & ANCHOR
9096 exclusive. The inputs must not be set between
9097 FLOATER and ANCHOR exclusive. */
9099 if (reg_used_between_p (dest, start, end))
9100 return 0;
9102 if (reg_set_between_p (src1, start, end))
9103 return 0;
9105 if (reg_set_between_p (src2, start, end))
9106 return 0;
9108 /* If we get here, then everything is good. */
9109 return 1;
9112 /* Return nonzero if references for INSN are delayed.
9114 Millicode insns are actually function calls with some special
9115 constraints on arguments and register usage.
9117 Millicode calls always expect their arguments in the integer argument
9118 registers, and always return their result in %r29 (ret1). They
9119 are expected to clobber their arguments, %r1, %r29, and the return
9120 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9122 This function tells reorg that the references to arguments and
9123 millicode calls do not appear to happen until after the millicode call.
9124 This allows reorg to put insns which set the argument registers into the
9125 delay slot of the millicode call -- thus they act more like traditional
9126 CALL_INSNs.
9128 Note we cannot consider side effects of the insn to be delayed because
9129 the branch and link insn will clobber the return pointer. If we happened
9130 to use the return pointer in the delay slot of the call, then we lose.
9132 get_attr_type will try to recognize the given insn, so make sure to
9133 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9134 in particular. */
9136 pa_insn_refs_are_delayed (rtx_insn *insn)
9138 return ((NONJUMP_INSN_P (insn)
9139 && GET_CODE (PATTERN (insn)) != SEQUENCE
9140 && GET_CODE (PATTERN (insn)) != USE
9141 && GET_CODE (PATTERN (insn)) != CLOBBER
9142 && get_attr_type (insn) == TYPE_MILLI));
9145 /* Promote the return value, but not the arguments. */
9147 static machine_mode
9148 pa_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
9149 machine_mode mode,
9150 int *punsignedp ATTRIBUTE_UNUSED,
9151 const_tree fntype ATTRIBUTE_UNUSED,
9152 int for_return)
9154 if (for_return == 0)
9155 return mode;
9156 return promote_mode (type, mode, punsignedp);
9159 /* On the HP-PA the value is found in register(s) 28(-29), unless
9160 the mode is SF or DF. Then the value is returned in fr4 (32).
9162 This must perform the same promotions as PROMOTE_MODE, else promoting
9163 return values in TARGET_PROMOTE_FUNCTION_MODE will not work correctly.
9165 Small structures must be returned in a PARALLEL on PA64 in order
9166 to match the HP Compiler ABI. */
9168 static rtx
9169 pa_function_value (const_tree valtype,
9170 const_tree func ATTRIBUTE_UNUSED,
9171 bool outgoing ATTRIBUTE_UNUSED)
9173 machine_mode valmode;
9175 if (AGGREGATE_TYPE_P (valtype)
9176 || TREE_CODE (valtype) == COMPLEX_TYPE
9177 || TREE_CODE (valtype) == VECTOR_TYPE)
9179 HOST_WIDE_INT valsize = int_size_in_bytes (valtype);
9181 /* Handle aggregates that fit exactly in a word or double word. */
9182 if ((valsize & (UNITS_PER_WORD - 1)) == 0)
9183 return gen_rtx_REG (TYPE_MODE (valtype), 28);
9185 if (TARGET_64BIT)
9187 /* Aggregates with a size less than or equal to 128 bits are
9188 returned in GR 28(-29). They are left justified. The pad
9189 bits are undefined. Larger aggregates are returned in
9190 memory. */
9191 rtx loc[2];
9192 int i, offset = 0;
9193 int ub = valsize <= UNITS_PER_WORD ? 1 : 2;
9195 for (i = 0; i < ub; i++)
9197 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9198 gen_rtx_REG (DImode, 28 + i),
9199 GEN_INT (offset));
9200 offset += 8;
9203 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9205 else if (valsize > UNITS_PER_WORD)
9207 /* Aggregates 5 to 8 bytes in size are returned in general
9208 registers r28-r29 in the same manner as other non
9209 floating-point objects. The data is right-justified and
9210 zero-extended to 64 bits. This is opposite to the normal
9211 justification used on big endian targets and requires
9212 special treatment. */
9213 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9214 gen_rtx_REG (DImode, 28), const0_rtx);
9215 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9219 if ((INTEGRAL_TYPE_P (valtype)
9220 && GET_MODE_BITSIZE (TYPE_MODE (valtype)) < BITS_PER_WORD)
9221 || POINTER_TYPE_P (valtype))
9222 valmode = word_mode;
9223 else
9224 valmode = TYPE_MODE (valtype);
9226 if (TREE_CODE (valtype) == REAL_TYPE
9227 && !AGGREGATE_TYPE_P (valtype)
9228 && TYPE_MODE (valtype) != TFmode
9229 && !TARGET_SOFT_FLOAT)
9230 return gen_rtx_REG (valmode, 32);
9232 return gen_rtx_REG (valmode, 28);
9235 /* Implement the TARGET_LIBCALL_VALUE hook. */
9237 static rtx
9238 pa_libcall_value (machine_mode mode,
9239 const_rtx fun ATTRIBUTE_UNUSED)
9241 if (! TARGET_SOFT_FLOAT
9242 && (mode == SFmode || mode == DFmode))
9243 return gen_rtx_REG (mode, 32);
9244 else
9245 return gen_rtx_REG (mode, 28);
9248 /* Implement the TARGET_FUNCTION_VALUE_REGNO_P hook. */
9250 static bool
9251 pa_function_value_regno_p (const unsigned int regno)
9253 if (regno == 28
9254 || (! TARGET_SOFT_FLOAT && regno == 32))
9255 return true;
9257 return false;
9260 /* Update the data in CUM to advance over an argument
9261 of mode MODE and data type TYPE.
9262 (TYPE is null for libcalls where that information may not be available.) */
9264 static void
9265 pa_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
9266 const_tree type, bool named ATTRIBUTE_UNUSED)
9268 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9269 int arg_size = FUNCTION_ARG_SIZE (mode, type);
9271 cum->nargs_prototype--;
9272 cum->words += (arg_size
9273 + ((cum->words & 01)
9274 && type != NULL_TREE
9275 && arg_size > 1));
9278 /* Return the location of a parameter that is passed in a register or NULL
9279 if the parameter has any component that is passed in memory.
9281 This is new code and will be pushed to into the net sources after
9282 further testing.
9284 ??? We might want to restructure this so that it looks more like other
9285 ports. */
9286 static rtx
9287 pa_function_arg (cumulative_args_t cum_v, machine_mode mode,
9288 const_tree type, bool named ATTRIBUTE_UNUSED)
9290 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9291 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9292 int alignment = 0;
9293 int arg_size;
9294 int fpr_reg_base;
9295 int gpr_reg_base;
9296 rtx retval;
9298 if (mode == VOIDmode)
9299 return NULL_RTX;
9301 arg_size = FUNCTION_ARG_SIZE (mode, type);
9303 /* If this arg would be passed partially or totally on the stack, then
9304 this routine should return zero. pa_arg_partial_bytes will
9305 handle arguments which are split between regs and stack slots if
9306 the ABI mandates split arguments. */
9307 if (!TARGET_64BIT)
9309 /* The 32-bit ABI does not split arguments. */
9310 if (cum->words + arg_size > max_arg_words)
9311 return NULL_RTX;
9313 else
9315 if (arg_size > 1)
9316 alignment = cum->words & 1;
9317 if (cum->words + alignment >= max_arg_words)
9318 return NULL_RTX;
9321 /* The 32bit ABIs and the 64bit ABIs are rather different,
9322 particularly in their handling of FP registers. We might
9323 be able to cleverly share code between them, but I'm not
9324 going to bother in the hope that splitting them up results
9325 in code that is more easily understood. */
9327 if (TARGET_64BIT)
9329 /* Advance the base registers to their current locations.
9331 Remember, gprs grow towards smaller register numbers while
9332 fprs grow to higher register numbers. Also remember that
9333 although FP regs are 32-bit addressable, we pretend that
9334 the registers are 64-bits wide. */
9335 gpr_reg_base = 26 - cum->words;
9336 fpr_reg_base = 32 + cum->words;
9338 /* Arguments wider than one word and small aggregates need special
9339 treatment. */
9340 if (arg_size > 1
9341 || mode == BLKmode
9342 || (type && (AGGREGATE_TYPE_P (type)
9343 || TREE_CODE (type) == COMPLEX_TYPE
9344 || TREE_CODE (type) == VECTOR_TYPE)))
9346 /* Double-extended precision (80-bit), quad-precision (128-bit)
9347 and aggregates including complex numbers are aligned on
9348 128-bit boundaries. The first eight 64-bit argument slots
9349 are associated one-to-one, with general registers r26
9350 through r19, and also with floating-point registers fr4
9351 through fr11. Arguments larger than one word are always
9352 passed in general registers.
9354 Using a PARALLEL with a word mode register results in left
9355 justified data on a big-endian target. */
9357 rtx loc[8];
9358 int i, offset = 0, ub = arg_size;
9360 /* Align the base register. */
9361 gpr_reg_base -= alignment;
9363 ub = MIN (ub, max_arg_words - cum->words - alignment);
9364 for (i = 0; i < ub; i++)
9366 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9367 gen_rtx_REG (DImode, gpr_reg_base),
9368 GEN_INT (offset));
9369 gpr_reg_base -= 1;
9370 offset += 8;
9373 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9376 else
9378 /* If the argument is larger than a word, then we know precisely
9379 which registers we must use. */
9380 if (arg_size > 1)
9382 if (cum->words)
9384 gpr_reg_base = 23;
9385 fpr_reg_base = 38;
9387 else
9389 gpr_reg_base = 25;
9390 fpr_reg_base = 34;
9393 /* Structures 5 to 8 bytes in size are passed in the general
9394 registers in the same manner as other non floating-point
9395 objects. The data is right-justified and zero-extended
9396 to 64 bits. This is opposite to the normal justification
9397 used on big endian targets and requires special treatment.
9398 We now define BLOCK_REG_PADDING to pad these objects.
9399 Aggregates, complex and vector types are passed in the same
9400 manner as structures. */
9401 if (mode == BLKmode
9402 || (type && (AGGREGATE_TYPE_P (type)
9403 || TREE_CODE (type) == COMPLEX_TYPE
9404 || TREE_CODE (type) == VECTOR_TYPE)))
9406 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9407 gen_rtx_REG (DImode, gpr_reg_base),
9408 const0_rtx);
9409 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9412 else
9414 /* We have a single word (32 bits). A simple computation
9415 will get us the register #s we need. */
9416 gpr_reg_base = 26 - cum->words;
9417 fpr_reg_base = 32 + 2 * cum->words;
9421 /* Determine if the argument needs to be passed in both general and
9422 floating point registers. */
9423 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9424 /* If we are doing soft-float with portable runtime, then there
9425 is no need to worry about FP regs. */
9426 && !TARGET_SOFT_FLOAT
9427 /* The parameter must be some kind of scalar float, else we just
9428 pass it in integer registers. */
9429 && GET_MODE_CLASS (mode) == MODE_FLOAT
9430 /* The target function must not have a prototype. */
9431 && cum->nargs_prototype <= 0
9432 /* libcalls do not need to pass items in both FP and general
9433 registers. */
9434 && type != NULL_TREE
9435 /* All this hair applies to "outgoing" args only. This includes
9436 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9437 && !cum->incoming)
9438 /* Also pass outgoing floating arguments in both registers in indirect
9439 calls with the 32 bit ABI and the HP assembler since there is no
9440 way to the specify argument locations in static functions. */
9441 || (!TARGET_64BIT
9442 && !TARGET_GAS
9443 && !cum->incoming
9444 && cum->indirect
9445 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9447 retval
9448 = gen_rtx_PARALLEL
9449 (mode,
9450 gen_rtvec (2,
9451 gen_rtx_EXPR_LIST (VOIDmode,
9452 gen_rtx_REG (mode, fpr_reg_base),
9453 const0_rtx),
9454 gen_rtx_EXPR_LIST (VOIDmode,
9455 gen_rtx_REG (mode, gpr_reg_base),
9456 const0_rtx)));
9458 else
9460 /* See if we should pass this parameter in a general register. */
9461 if (TARGET_SOFT_FLOAT
9462 /* Indirect calls in the normal 32bit ABI require all arguments
9463 to be passed in general registers. */
9464 || (!TARGET_PORTABLE_RUNTIME
9465 && !TARGET_64BIT
9466 && !TARGET_ELF32
9467 && cum->indirect)
9468 /* If the parameter is not a scalar floating-point parameter,
9469 then it belongs in GPRs. */
9470 || GET_MODE_CLASS (mode) != MODE_FLOAT
9471 /* Structure with single SFmode field belongs in GPR. */
9472 || (type && AGGREGATE_TYPE_P (type)))
9473 retval = gen_rtx_REG (mode, gpr_reg_base);
9474 else
9475 retval = gen_rtx_REG (mode, fpr_reg_base);
9477 return retval;
9480 /* Arguments larger than one word are double word aligned. */
9482 static unsigned int
9483 pa_function_arg_boundary (machine_mode mode, const_tree type)
9485 bool singleword = (type
9486 ? (integer_zerop (TYPE_SIZE (type))
9487 || !TREE_CONSTANT (TYPE_SIZE (type))
9488 || int_size_in_bytes (type) <= UNITS_PER_WORD)
9489 : GET_MODE_SIZE (mode) <= UNITS_PER_WORD);
9491 return singleword ? PARM_BOUNDARY : MAX_PARM_BOUNDARY;
9494 /* If this arg would be passed totally in registers or totally on the stack,
9495 then this routine should return zero. */
9497 static int
9498 pa_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
9499 tree type, bool named ATTRIBUTE_UNUSED)
9501 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9502 unsigned int max_arg_words = 8;
9503 unsigned int offset = 0;
9505 if (!TARGET_64BIT)
9506 return 0;
9508 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9509 offset = 1;
9511 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9512 /* Arg fits fully into registers. */
9513 return 0;
9514 else if (cum->words + offset >= max_arg_words)
9515 /* Arg fully on the stack. */
9516 return 0;
9517 else
9518 /* Arg is split. */
9519 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9523 /* A get_unnamed_section callback for switching to the text section.
9525 This function is only used with SOM. Because we don't support
9526 named subspaces, we can only create a new subspace or switch back
9527 to the default text subspace. */
9529 static void
9530 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9532 gcc_assert (TARGET_SOM);
9533 if (TARGET_GAS)
9535 if (cfun && cfun->machine && !cfun->machine->in_nsubspa)
9537 /* We only want to emit a .nsubspa directive once at the
9538 start of the function. */
9539 cfun->machine->in_nsubspa = 1;
9541 /* Create a new subspace for the text. This provides
9542 better stub placement and one-only functions. */
9543 if (cfun->decl
9544 && DECL_ONE_ONLY (cfun->decl)
9545 && !DECL_WEAK (cfun->decl))
9547 output_section_asm_op ("\t.SPACE $TEXT$\n"
9548 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9549 "ACCESS=44,SORT=24,COMDAT");
9550 return;
9553 else
9555 /* There isn't a current function or the body of the current
9556 function has been completed. So, we are changing to the
9557 text section to output debugging information. Thus, we
9558 need to forget that we are in the text section so that
9559 varasm.c will call us when text_section is selected again. */
9560 gcc_assert (!cfun || !cfun->machine
9561 || cfun->machine->in_nsubspa == 2);
9562 in_section = NULL;
9564 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9565 return;
9567 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9570 /* A get_unnamed_section callback for switching to comdat data
9571 sections. This function is only used with SOM. */
9573 static void
9574 som_output_comdat_data_section_asm_op (const void *data)
9576 in_section = NULL;
9577 output_section_asm_op (data);
9580 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9582 static void
9583 pa_som_asm_init_sections (void)
9585 text_section
9586 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9588 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9589 is not being generated. */
9590 som_readonly_data_section
9591 = get_unnamed_section (0, output_section_asm_op,
9592 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9594 /* When secondary definitions are not supported, SOM makes readonly
9595 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9596 the comdat flag. */
9597 som_one_only_readonly_data_section
9598 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9599 "\t.SPACE $TEXT$\n"
9600 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9601 "ACCESS=0x2c,SORT=16,COMDAT");
9604 /* When secondary definitions are not supported, SOM makes data one-only
9605 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9606 som_one_only_data_section
9607 = get_unnamed_section (SECTION_WRITE,
9608 som_output_comdat_data_section_asm_op,
9609 "\t.SPACE $PRIVATE$\n"
9610 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9611 "ACCESS=31,SORT=24,COMDAT");
9613 if (flag_tm)
9614 som_tm_clone_table_section
9615 = get_unnamed_section (0, output_section_asm_op,
9616 "\t.SPACE $PRIVATE$\n\t.SUBSPA $TM_CLONE_TABLE$");
9618 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9619 which reference data within the $TEXT$ space (for example constant
9620 strings in the $LIT$ subspace).
9622 The assemblers (GAS and HP as) both have problems with handling
9623 the difference of two symbols which is the other correct way to
9624 reference constant data during PIC code generation.
9626 So, there's no way to reference constant data which is in the
9627 $TEXT$ space during PIC generation. Instead place all constant
9628 data into the $PRIVATE$ subspace (this reduces sharing, but it
9629 works correctly). */
9630 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9632 /* We must not have a reference to an external symbol defined in a
9633 shared library in a readonly section, else the SOM linker will
9634 complain.
9636 So, we force exception information into the data section. */
9637 exception_section = data_section;
9640 /* Implement TARGET_ASM_TM_CLONE_TABLE_SECTION. */
9642 static section *
9643 pa_som_tm_clone_table_section (void)
9645 return som_tm_clone_table_section;
9648 /* On hpux10, the linker will give an error if we have a reference
9649 in the read-only data section to a symbol defined in a shared
9650 library. Therefore, expressions that might require a reloc can
9651 not be placed in the read-only data section. */
9653 static section *
9654 pa_select_section (tree exp, int reloc,
9655 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9657 if (TREE_CODE (exp) == VAR_DECL
9658 && TREE_READONLY (exp)
9659 && !TREE_THIS_VOLATILE (exp)
9660 && DECL_INITIAL (exp)
9661 && (DECL_INITIAL (exp) == error_mark_node
9662 || TREE_CONSTANT (DECL_INITIAL (exp)))
9663 && !reloc)
9665 if (TARGET_SOM
9666 && DECL_ONE_ONLY (exp)
9667 && !DECL_WEAK (exp))
9668 return som_one_only_readonly_data_section;
9669 else
9670 return readonly_data_section;
9672 else if (CONSTANT_CLASS_P (exp) && !reloc)
9673 return readonly_data_section;
9674 else if (TARGET_SOM
9675 && TREE_CODE (exp) == VAR_DECL
9676 && DECL_ONE_ONLY (exp)
9677 && !DECL_WEAK (exp))
9678 return som_one_only_data_section;
9679 else
9680 return data_section;
9683 static void
9684 pa_globalize_label (FILE *stream, const char *name)
9686 /* We only handle DATA objects here, functions are globalized in
9687 ASM_DECLARE_FUNCTION_NAME. */
9688 if (! FUNCTION_NAME_P (name))
9690 fputs ("\t.EXPORT ", stream);
9691 assemble_name (stream, name);
9692 fputs (",DATA\n", stream);
9696 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9698 static rtx
9699 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9700 int incoming ATTRIBUTE_UNUSED)
9702 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9705 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9707 bool
9708 pa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9710 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9711 PA64 ABI says that objects larger than 128 bits are returned in memory.
9712 Note, int_size_in_bytes can return -1 if the size of the object is
9713 variable or larger than the maximum value that can be expressed as
9714 a HOST_WIDE_INT. It can also return zero for an empty type. The
9715 simplest way to handle variable and empty types is to pass them in
9716 memory. This avoids problems in defining the boundaries of argument
9717 slots, allocating registers, etc. */
9718 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9719 || int_size_in_bytes (type) <= 0);
9722 /* Structure to hold declaration and name of external symbols that are
9723 emitted by GCC. We generate a vector of these symbols and output them
9724 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9725 This avoids putting out names that are never really used. */
9727 typedef struct GTY(()) extern_symbol
9729 tree decl;
9730 const char *name;
9731 } extern_symbol;
9733 /* Define gc'd vector type for extern_symbol. */
9735 /* Vector of extern_symbol pointers. */
9736 static GTY(()) vec<extern_symbol, va_gc> *extern_symbols;
9738 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9739 /* Mark DECL (name NAME) as an external reference (assembler output
9740 file FILE). This saves the names to output at the end of the file
9741 if actually referenced. */
9743 void
9744 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9746 gcc_assert (file == asm_out_file);
9747 extern_symbol p = {decl, name};
9748 vec_safe_push (extern_symbols, p);
9751 /* Output text required at the end of an assembler file.
9752 This includes deferred plabels and .import directives for
9753 all external symbols that were actually referenced. */
9755 static void
9756 pa_hpux_file_end (void)
9758 unsigned int i;
9759 extern_symbol *p;
9761 if (!NO_DEFERRED_PROFILE_COUNTERS)
9762 output_deferred_profile_counters ();
9764 output_deferred_plabels ();
9766 for (i = 0; vec_safe_iterate (extern_symbols, i, &p); i++)
9768 tree decl = p->decl;
9770 if (!TREE_ASM_WRITTEN (decl)
9771 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
9772 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
9775 vec_free (extern_symbols);
9777 #endif
9779 /* Return true if a change from mode FROM to mode TO for a register
9780 in register class RCLASS is invalid. */
9782 bool
9783 pa_cannot_change_mode_class (machine_mode from, machine_mode to,
9784 enum reg_class rclass)
9786 if (from == to)
9787 return false;
9789 /* Reject changes to/from complex and vector modes. */
9790 if (COMPLEX_MODE_P (from) || VECTOR_MODE_P (from)
9791 || COMPLEX_MODE_P (to) || VECTOR_MODE_P (to))
9792 return true;
9794 if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to))
9795 return false;
9797 /* There is no way to load QImode or HImode values directly from
9798 memory. SImode loads to the FP registers are not zero extended.
9799 On the 64-bit target, this conflicts with the definition of
9800 LOAD_EXTEND_OP. Thus, we can't allow changing between modes
9801 with different sizes in the floating-point registers. */
9802 if (MAYBE_FP_REG_CLASS_P (rclass))
9803 return true;
9805 /* HARD_REGNO_MODE_OK places modes with sizes larger than a word
9806 in specific sets of registers. Thus, we cannot allow changing
9807 to a larger mode when it's larger than a word. */
9808 if (GET_MODE_SIZE (to) > UNITS_PER_WORD
9809 && GET_MODE_SIZE (to) > GET_MODE_SIZE (from))
9810 return true;
9812 return false;
9815 /* Returns TRUE if it is a good idea to tie two pseudo registers
9816 when one has mode MODE1 and one has mode MODE2.
9817 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
9818 for any hard reg, then this must be FALSE for correct output.
9820 We should return FALSE for QImode and HImode because these modes
9821 are not ok in the floating-point registers. However, this prevents
9822 tieing these modes to SImode and DImode in the general registers.
9823 So, this isn't a good idea. We rely on HARD_REGNO_MODE_OK and
9824 CANNOT_CHANGE_MODE_CLASS to prevent these modes from being used
9825 in the floating-point registers. */
9827 bool
9828 pa_modes_tieable_p (machine_mode mode1, machine_mode mode2)
9830 /* Don't tie modes in different classes. */
9831 if (GET_MODE_CLASS (mode1) != GET_MODE_CLASS (mode2))
9832 return false;
9834 return true;
9838 /* Length in units of the trampoline instruction code. */
9840 #define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 32 : 40))
9843 /* Output assembler code for a block containing the constant parts
9844 of a trampoline, leaving space for the variable parts.\
9846 The trampoline sets the static chain pointer to STATIC_CHAIN_REGNUM
9847 and then branches to the specified routine.
9849 This code template is copied from text segment to stack location
9850 and then patched with pa_trampoline_init to contain valid values,
9851 and then entered as a subroutine.
9853 It is best to keep this as small as possible to avoid having to
9854 flush multiple lines in the cache. */
9856 static void
9857 pa_asm_trampoline_template (FILE *f)
9859 if (!TARGET_64BIT)
9861 fputs ("\tldw 36(%r22),%r21\n", f);
9862 fputs ("\tbb,>=,n %r21,30,.+16\n", f);
9863 if (ASSEMBLER_DIALECT == 0)
9864 fputs ("\tdepi 0,31,2,%r21\n", f);
9865 else
9866 fputs ("\tdepwi 0,31,2,%r21\n", f);
9867 fputs ("\tldw 4(%r21),%r19\n", f);
9868 fputs ("\tldw 0(%r21),%r21\n", f);
9869 if (TARGET_PA_20)
9871 fputs ("\tbve (%r21)\n", f);
9872 fputs ("\tldw 40(%r22),%r29\n", f);
9873 fputs ("\t.word 0\n", f);
9874 fputs ("\t.word 0\n", f);
9876 else
9878 fputs ("\tldsid (%r21),%r1\n", f);
9879 fputs ("\tmtsp %r1,%sr0\n", f);
9880 fputs ("\tbe 0(%sr0,%r21)\n", f);
9881 fputs ("\tldw 40(%r22),%r29\n", f);
9883 fputs ("\t.word 0\n", f);
9884 fputs ("\t.word 0\n", f);
9885 fputs ("\t.word 0\n", f);
9886 fputs ("\t.word 0\n", f);
9888 else
9890 fputs ("\t.dword 0\n", f);
9891 fputs ("\t.dword 0\n", f);
9892 fputs ("\t.dword 0\n", f);
9893 fputs ("\t.dword 0\n", f);
9894 fputs ("\tmfia %r31\n", f);
9895 fputs ("\tldd 24(%r31),%r1\n", f);
9896 fputs ("\tldd 24(%r1),%r27\n", f);
9897 fputs ("\tldd 16(%r1),%r1\n", f);
9898 fputs ("\tbve (%r1)\n", f);
9899 fputs ("\tldd 32(%r31),%r31\n", f);
9900 fputs ("\t.dword 0 ; fptr\n", f);
9901 fputs ("\t.dword 0 ; static link\n", f);
9905 /* Emit RTL insns to initialize the variable parts of a trampoline.
9906 FNADDR is an RTX for the address of the function's pure code.
9907 CXT is an RTX for the static chain value for the function.
9909 Move the function address to the trampoline template at offset 36.
9910 Move the static chain value to trampoline template at offset 40.
9911 Move the trampoline address to trampoline template at offset 44.
9912 Move r19 to trampoline template at offset 48. The latter two
9913 words create a plabel for the indirect call to the trampoline.
9915 A similar sequence is used for the 64-bit port but the plabel is
9916 at the beginning of the trampoline.
9918 Finally, the cache entries for the trampoline code are flushed.
9919 This is necessary to ensure that the trampoline instruction sequence
9920 is written to memory prior to any attempts at prefetching the code
9921 sequence. */
9923 static void
9924 pa_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
9926 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
9927 rtx start_addr = gen_reg_rtx (Pmode);
9928 rtx end_addr = gen_reg_rtx (Pmode);
9929 rtx line_length = gen_reg_rtx (Pmode);
9930 rtx r_tramp, tmp;
9932 emit_block_move (m_tramp, assemble_trampoline_template (),
9933 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
9934 r_tramp = force_reg (Pmode, XEXP (m_tramp, 0));
9936 if (!TARGET_64BIT)
9938 tmp = adjust_address (m_tramp, Pmode, 36);
9939 emit_move_insn (tmp, fnaddr);
9940 tmp = adjust_address (m_tramp, Pmode, 40);
9941 emit_move_insn (tmp, chain_value);
9943 /* Create a fat pointer for the trampoline. */
9944 tmp = adjust_address (m_tramp, Pmode, 44);
9945 emit_move_insn (tmp, r_tramp);
9946 tmp = adjust_address (m_tramp, Pmode, 48);
9947 emit_move_insn (tmp, gen_rtx_REG (Pmode, 19));
9949 /* fdc and fic only use registers for the address to flush,
9950 they do not accept integer displacements. We align the
9951 start and end addresses to the beginning of their respective
9952 cache lines to minimize the number of lines flushed. */
9953 emit_insn (gen_andsi3 (start_addr, r_tramp,
9954 GEN_INT (-MIN_CACHELINE_SIZE)));
9955 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp,
9956 TRAMPOLINE_CODE_SIZE-1));
9957 emit_insn (gen_andsi3 (end_addr, tmp,
9958 GEN_INT (-MIN_CACHELINE_SIZE)));
9959 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
9960 emit_insn (gen_dcacheflushsi (start_addr, end_addr, line_length));
9961 emit_insn (gen_icacheflushsi (start_addr, end_addr, line_length,
9962 gen_reg_rtx (Pmode),
9963 gen_reg_rtx (Pmode)));
9965 else
9967 tmp = adjust_address (m_tramp, Pmode, 56);
9968 emit_move_insn (tmp, fnaddr);
9969 tmp = adjust_address (m_tramp, Pmode, 64);
9970 emit_move_insn (tmp, chain_value);
9972 /* Create a fat pointer for the trampoline. */
9973 tmp = adjust_address (m_tramp, Pmode, 16);
9974 emit_move_insn (tmp, force_reg (Pmode, plus_constant (Pmode,
9975 r_tramp, 32)));
9976 tmp = adjust_address (m_tramp, Pmode, 24);
9977 emit_move_insn (tmp, gen_rtx_REG (Pmode, 27));
9979 /* fdc and fic only use registers for the address to flush,
9980 they do not accept integer displacements. We align the
9981 start and end addresses to the beginning of their respective
9982 cache lines to minimize the number of lines flushed. */
9983 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp, 32));
9984 emit_insn (gen_anddi3 (start_addr, tmp,
9985 GEN_INT (-MIN_CACHELINE_SIZE)));
9986 tmp = force_reg (Pmode, plus_constant (Pmode, tmp,
9987 TRAMPOLINE_CODE_SIZE - 1));
9988 emit_insn (gen_anddi3 (end_addr, tmp,
9989 GEN_INT (-MIN_CACHELINE_SIZE)));
9990 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
9991 emit_insn (gen_dcacheflushdi (start_addr, end_addr, line_length));
9992 emit_insn (gen_icacheflushdi (start_addr, end_addr, line_length,
9993 gen_reg_rtx (Pmode),
9994 gen_reg_rtx (Pmode)));
9997 #ifdef HAVE_ENABLE_EXECUTE_STACK
9998  emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
9999      LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
10000 #endif
10003 /* Perform any machine-specific adjustment in the address of the trampoline.
10004 ADDR contains the address that was passed to pa_trampoline_init.
10005 Adjust the trampoline address to point to the plabel at offset 44. */
10007 static rtx
10008 pa_trampoline_adjust_address (rtx addr)
10010 if (!TARGET_64BIT)
10011 addr = memory_address (Pmode, plus_constant (Pmode, addr, 46));
10012 return addr;
10015 static rtx
10016 pa_delegitimize_address (rtx orig_x)
10018 rtx x = delegitimize_mem_from_attrs (orig_x);
10020 if (GET_CODE (x) == LO_SUM
10021 && GET_CODE (XEXP (x, 1)) == UNSPEC
10022 && XINT (XEXP (x, 1), 1) == UNSPEC_DLTIND14R)
10023 return gen_const_mem (Pmode, XVECEXP (XEXP (x, 1), 0, 0));
10024 return x;
10027 static rtx
10028 pa_internal_arg_pointer (void)
10030 /* The argument pointer and the hard frame pointer are the same in
10031 the 32-bit runtime, so we don't need a copy. */
10032 if (TARGET_64BIT)
10033 return copy_to_reg (virtual_incoming_args_rtx);
10034 else
10035 return virtual_incoming_args_rtx;
10038 /* Given FROM and TO register numbers, say whether this elimination is allowed.
10039 Frame pointer elimination is automatically handled. */
10041 static bool
10042 pa_can_eliminate (const int from, const int to)
10044 /* The argument cannot be eliminated in the 64-bit runtime. */
10045 if (TARGET_64BIT && from == ARG_POINTER_REGNUM)
10046 return false;
10048 return (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
10049 ? ! frame_pointer_needed
10050 : true);
10053 /* Define the offset between two registers, FROM to be eliminated and its
10054 replacement TO, at the start of a routine. */
10055 HOST_WIDE_INT
10056 pa_initial_elimination_offset (int from, int to)
10058 HOST_WIDE_INT offset;
10060 if ((from == HARD_FRAME_POINTER_REGNUM || from == FRAME_POINTER_REGNUM)
10061 && to == STACK_POINTER_REGNUM)
10062 offset = -pa_compute_frame_size (get_frame_size (), 0);
10063 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10064 offset = 0;
10065 else
10066 gcc_unreachable ();
10068 return offset;
10071 static void
10072 pa_conditional_register_usage (void)
10074 int i;
10076 if (!TARGET_64BIT && !TARGET_PA_11)
10078 for (i = 56; i <= FP_REG_LAST; i++)
10079 fixed_regs[i] = call_used_regs[i] = 1;
10080 for (i = 33; i < 56; i += 2)
10081 fixed_regs[i] = call_used_regs[i] = 1;
10083 if (TARGET_DISABLE_FPREGS || TARGET_SOFT_FLOAT)
10085 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
10086 fixed_regs[i] = call_used_regs[i] = 1;
10088 if (flag_pic)
10089 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10092 /* Target hook for c_mode_for_suffix. */
10094 static machine_mode
10095 pa_c_mode_for_suffix (char suffix)
10097 if (HPUX_LONG_DOUBLE_LIBRARY)
10099 if (suffix == 'q')
10100 return TFmode;
10103 return VOIDmode;
10106 /* Target hook for function_section. */
10108 static section *
10109 pa_function_section (tree decl, enum node_frequency freq,
10110 bool startup, bool exit)
10112 /* Put functions in text section if target doesn't have named sections. */
10113 if (!targetm_common.have_named_sections)
10114 return text_section;
10116 /* Force nested functions into the same section as the containing
10117 function. */
10118 if (decl
10119 && DECL_SECTION_NAME (decl) == NULL
10120 && DECL_CONTEXT (decl) != NULL_TREE
10121 && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL
10122 && DECL_SECTION_NAME (DECL_CONTEXT (decl)) == NULL)
10123 return function_section (DECL_CONTEXT (decl));
10125 /* Otherwise, use the default function section. */
10126 return default_function_section (decl, freq, startup, exit);
10129 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
10131 In 64-bit mode, we reject CONST_DOUBLES. We also reject CONST_INTS
10132 that need more than three instructions to load prior to reload. This
10133 limit is somewhat arbitrary. It takes three instructions to load a
10134 CONST_INT from memory but two are memory accesses. It may be better
10135 to increase the allowed range for CONST_INTS. We may also be able
10136 to handle CONST_DOUBLES. */
10138 static bool
10139 pa_legitimate_constant_p (machine_mode mode, rtx x)
10141 if (GET_MODE_CLASS (mode) == MODE_FLOAT && x != CONST0_RTX (mode))
10142 return false;
10144 if (!NEW_HP_ASSEMBLER && !TARGET_GAS && GET_CODE (x) == LABEL_REF)
10145 return false;
10147 /* TLS_MODEL_GLOBAL_DYNAMIC and TLS_MODEL_LOCAL_DYNAMIC are not
10148 legitimate constants. The other variants can't be handled by
10149 the move patterns after reload starts. */
10150 if (tls_referenced_p (x))
10151 return false;
10153 if (TARGET_64BIT && GET_CODE (x) == CONST_DOUBLE)
10154 return false;
10156 if (TARGET_64BIT
10157 && HOST_BITS_PER_WIDE_INT > 32
10158 && GET_CODE (x) == CONST_INT
10159 && !reload_in_progress
10160 && !reload_completed
10161 && !LEGITIMATE_64BIT_CONST_INT_P (INTVAL (x))
10162 && !pa_cint_ok_for_move (INTVAL (x)))
10163 return false;
10165 if (function_label_operand (x, mode))
10166 return false;
10168 return true;
10171 /* Implement TARGET_SECTION_TYPE_FLAGS. */
10173 static unsigned int
10174 pa_section_type_flags (tree decl, const char *name, int reloc)
10176 unsigned int flags;
10178 flags = default_section_type_flags (decl, name, reloc);
10180 /* Function labels are placed in the constant pool. This can
10181 cause a section conflict if decls are put in ".data.rel.ro"
10182 or ".data.rel.ro.local" using the __attribute__ construct. */
10183 if (strcmp (name, ".data.rel.ro") == 0
10184 || strcmp (name, ".data.rel.ro.local") == 0)
10185 flags |= SECTION_WRITE | SECTION_RELRO;
10187 return flags;
10190 /* pa_legitimate_address_p recognizes an RTL expression that is a
10191 valid memory address for an instruction. The MODE argument is the
10192 machine mode for the MEM expression that wants to use this address.
10194 On HP PA-RISC, the legitimate address forms are REG+SMALLINT,
10195 REG+REG, and REG+(REG*SCALE). The indexed address forms are only
10196 available with floating point loads and stores, and integer loads.
10197 We get better code by allowing indexed addresses in the initial
10198 RTL generation.
10200 The acceptance of indexed addresses as legitimate implies that we
10201 must provide patterns for doing indexed integer stores, or the move
10202 expanders must force the address of an indexed store to a register.
10203 We have adopted the latter approach.
10205 Another function of pa_legitimate_address_p is to ensure that
10206 the base register is a valid pointer for indexed instructions.
10207 On targets that have non-equivalent space registers, we have to
10208 know at the time of assembler output which register in a REG+REG
10209 pair is the base register. The REG_POINTER flag is sometimes lost
10210 in reload and the following passes, so it can't be relied on during
10211 code generation. Thus, we either have to canonicalize the order
10212 of the registers in REG+REG indexed addresses, or treat REG+REG
10213 addresses separately and provide patterns for both permutations.
10215 The latter approach requires several hundred additional lines of
10216 code in pa.md. The downside to canonicalizing is that a PLUS
10217 in the wrong order can't combine to form to make a scaled indexed
10218 memory operand. As we won't need to canonicalize the operands if
10219 the REG_POINTER lossage can be fixed, it seems better canonicalize.
10221 We initially break out scaled indexed addresses in canonical order
10222 in pa_emit_move_sequence. LEGITIMIZE_ADDRESS also canonicalizes
10223 scaled indexed addresses during RTL generation. However, fold_rtx
10224 has its own opinion on how the operands of a PLUS should be ordered.
10225 If one of the operands is equivalent to a constant, it will make
10226 that operand the second operand. As the base register is likely to
10227 be equivalent to a SYMBOL_REF, we have made it the second operand.
10229 pa_legitimate_address_p accepts REG+REG as legitimate when the
10230 operands are in the order INDEX+BASE on targets with non-equivalent
10231 space registers, and in any order on targets with equivalent space
10232 registers. It accepts both MULT+BASE and BASE+MULT for scaled indexing.
10234 We treat a SYMBOL_REF as legitimate if it is part of the current
10235 function's constant-pool, because such addresses can actually be
10236 output as REG+SMALLINT. */
10238 static bool
10239 pa_legitimate_address_p (machine_mode mode, rtx x, bool strict)
10241 if ((REG_P (x)
10242 && (strict ? STRICT_REG_OK_FOR_BASE_P (x)
10243 : REG_OK_FOR_BASE_P (x)))
10244 || ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_DEC
10245 || GET_CODE (x) == PRE_INC || GET_CODE (x) == POST_INC)
10246 && REG_P (XEXP (x, 0))
10247 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10248 : REG_OK_FOR_BASE_P (XEXP (x, 0)))))
10249 return true;
10251 if (GET_CODE (x) == PLUS)
10253 rtx base, index;
10255 /* For REG+REG, the base register should be in XEXP (x, 1),
10256 so check it first. */
10257 if (REG_P (XEXP (x, 1))
10258 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 1))
10259 : REG_OK_FOR_BASE_P (XEXP (x, 1))))
10260 base = XEXP (x, 1), index = XEXP (x, 0);
10261 else if (REG_P (XEXP (x, 0))
10262 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10263 : REG_OK_FOR_BASE_P (XEXP (x, 0))))
10264 base = XEXP (x, 0), index = XEXP (x, 1);
10265 else
10266 return false;
10268 if (GET_CODE (index) == CONST_INT)
10270 if (INT_5_BITS (index))
10271 return true;
10273 /* When INT14_OK_STRICT is false, a secondary reload is needed
10274 to adjust the displacement of SImode and DImode floating point
10275 instructions but this may fail when the register also needs
10276 reloading. So, we return false when STRICT is true. We
10277 also reject long displacements for float mode addresses since
10278 the majority of accesses will use floating point instructions
10279 that don't support 14-bit offsets. */
10280 if (!INT14_OK_STRICT
10281 && (strict || !(reload_in_progress || reload_completed))
10282 && mode != QImode
10283 && mode != HImode)
10284 return false;
10286 return base14_operand (index, mode);
10289 if (!TARGET_DISABLE_INDEXING
10290 /* Only accept the "canonical" INDEX+BASE operand order
10291 on targets with non-equivalent space registers. */
10292 && (TARGET_NO_SPACE_REGS
10293 ? REG_P (index)
10294 : (base == XEXP (x, 1) && REG_P (index)
10295 && (reload_completed
10296 || (reload_in_progress && HARD_REGISTER_P (base))
10297 || REG_POINTER (base))
10298 && (reload_completed
10299 || (reload_in_progress && HARD_REGISTER_P (index))
10300 || !REG_POINTER (index))))
10301 && MODE_OK_FOR_UNSCALED_INDEXING_P (mode)
10302 && (strict ? STRICT_REG_OK_FOR_INDEX_P (index)
10303 : REG_OK_FOR_INDEX_P (index))
10304 && borx_reg_operand (base, Pmode)
10305 && borx_reg_operand (index, Pmode))
10306 return true;
10308 if (!TARGET_DISABLE_INDEXING
10309 && GET_CODE (index) == MULT
10310 && MODE_OK_FOR_SCALED_INDEXING_P (mode)
10311 && REG_P (XEXP (index, 0))
10312 && GET_MODE (XEXP (index, 0)) == Pmode
10313 && (strict ? STRICT_REG_OK_FOR_INDEX_P (XEXP (index, 0))
10314 : REG_OK_FOR_INDEX_P (XEXP (index, 0)))
10315 && GET_CODE (XEXP (index, 1)) == CONST_INT
10316 && INTVAL (XEXP (index, 1))
10317 == (HOST_WIDE_INT) GET_MODE_SIZE (mode)
10318 && borx_reg_operand (base, Pmode))
10319 return true;
10321 return false;
10324 if (GET_CODE (x) == LO_SUM)
10326 rtx y = XEXP (x, 0);
10328 if (GET_CODE (y) == SUBREG)
10329 y = SUBREG_REG (y);
10331 if (REG_P (y)
10332 && (strict ? STRICT_REG_OK_FOR_BASE_P (y)
10333 : REG_OK_FOR_BASE_P (y)))
10335 /* Needed for -fPIC */
10336 if (mode == Pmode
10337 && GET_CODE (XEXP (x, 1)) == UNSPEC)
10338 return true;
10340 if (!INT14_OK_STRICT
10341 && (strict || !(reload_in_progress || reload_completed))
10342 && mode != QImode
10343 && mode != HImode)
10344 return false;
10346 if (CONSTANT_P (XEXP (x, 1)))
10347 return true;
10349 return false;
10352 if (GET_CODE (x) == CONST_INT && INT_5_BITS (x))
10353 return true;
10355 return false;
10358 /* Look for machine dependent ways to make the invalid address AD a
10359 valid address.
10361 For the PA, transform:
10363 memory(X + <large int>)
10365 into:
10367 if (<large int> & mask) >= 16
10368 Y = (<large int> & ~mask) + mask + 1 Round up.
10369 else
10370 Y = (<large int> & ~mask) Round down.
10371 Z = X + Y
10372 memory (Z + (<large int> - Y));
10374 This makes reload inheritance and reload_cse work better since Z
10375 can be reused.
10377 There may be more opportunities to improve code with this hook. */
10380 pa_legitimize_reload_address (rtx ad, machine_mode mode,
10381 int opnum, int type,
10382 int ind_levels ATTRIBUTE_UNUSED)
10384 long offset, newoffset, mask;
10385 rtx new_rtx, temp = NULL_RTX;
10387 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
10388 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
10390 if (optimize && GET_CODE (ad) == PLUS)
10391 temp = simplify_binary_operation (PLUS, Pmode,
10392 XEXP (ad, 0), XEXP (ad, 1));
10394 new_rtx = temp ? temp : ad;
10396 if (optimize
10397 && GET_CODE (new_rtx) == PLUS
10398 && GET_CODE (XEXP (new_rtx, 0)) == REG
10399 && GET_CODE (XEXP (new_rtx, 1)) == CONST_INT)
10401 offset = INTVAL (XEXP ((new_rtx), 1));
10403 /* Choose rounding direction. Round up if we are >= halfway. */
10404 if ((offset & mask) >= ((mask + 1) / 2))
10405 newoffset = (offset & ~mask) + mask + 1;
10406 else
10407 newoffset = offset & ~mask;
10409 /* Ensure that long displacements are aligned. */
10410 if (mask == 0x3fff
10411 && (GET_MODE_CLASS (mode) == MODE_FLOAT
10412 || (TARGET_64BIT && (mode) == DImode)))
10413 newoffset &= ~(GET_MODE_SIZE (mode) - 1);
10415 if (newoffset != 0 && VAL_14_BITS_P (newoffset))
10417 temp = gen_rtx_PLUS (Pmode, XEXP (new_rtx, 0),
10418 GEN_INT (newoffset));
10419 ad = gen_rtx_PLUS (Pmode, temp, GEN_INT (offset - newoffset));
10420 push_reload (XEXP (ad, 0), 0, &XEXP (ad, 0), 0,
10421 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
10422 opnum, (enum reload_type) type);
10423 return ad;
10427 return NULL_RTX;
10430 /* Output address vector. */
10432 void
10433 pa_output_addr_vec (rtx lab, rtx body)
10435 int idx, vlen = XVECLEN (body, 0);
10437 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10438 if (TARGET_GAS)
10439 fputs ("\t.begin_brtab\n", asm_out_file);
10440 for (idx = 0; idx < vlen; idx++)
10442 ASM_OUTPUT_ADDR_VEC_ELT
10443 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10445 if (TARGET_GAS)
10446 fputs ("\t.end_brtab\n", asm_out_file);
10449 /* Output address difference vector. */
10451 void
10452 pa_output_addr_diff_vec (rtx lab, rtx body)
10454 rtx base = XEXP (XEXP (body, 0), 0);
10455 int idx, vlen = XVECLEN (body, 1);
10457 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10458 if (TARGET_GAS)
10459 fputs ("\t.begin_brtab\n", asm_out_file);
10460 for (idx = 0; idx < vlen; idx++)
10462 ASM_OUTPUT_ADDR_DIFF_ELT
10463 (asm_out_file,
10464 body,
10465 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
10466 CODE_LABEL_NUMBER (base));
10468 if (TARGET_GAS)
10469 fputs ("\t.end_brtab\n", asm_out_file);
10472 #include "gt-pa.h"